mirror of
https://github.com/henrygd/beszel.git
synced 2026-02-02 15:36:07 +00:00
Compare commits
196 Commits
battery-ch
...
v0.18.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0f6142e27e | ||
|
|
8c37b93a4b | ||
|
|
201d16af05 | ||
|
|
db007176fd | ||
|
|
83fb67132b | ||
|
|
a04837f4d5 | ||
|
|
3d8db53e52 | ||
|
|
5797f8a6ad | ||
|
|
79ca31d770 | ||
|
|
41f3705b6b | ||
|
|
20324763d2 | ||
|
|
70f85f9590 | ||
|
|
c7f7f51c99 | ||
|
|
6723ec8ea4 | ||
|
|
afc19ebd3b | ||
|
|
c83d00ccaa | ||
|
|
425c8d2bdf | ||
|
|
42da1e5a52 | ||
|
|
afcae025ae | ||
|
|
1de36625a4 | ||
|
|
a2b6c7f5e6 | ||
|
|
799c7b077a | ||
|
|
cb5f944de6 | ||
|
|
23c4958145 | ||
|
|
edb2edc12c | ||
|
|
648a979a81 | ||
|
|
988de6de7b | ||
|
|
031abbfcb3 | ||
|
|
b59fcc26e5 | ||
|
|
acaa9381fe | ||
|
|
8d9e9260e6 | ||
|
|
0fc4a6daed | ||
|
|
af0c1d3af7 | ||
|
|
9ad3cd0ab9 | ||
|
|
00def272b0 | ||
|
|
383913505f | ||
|
|
ca8cb78c29 | ||
|
|
8821fb5dd0 | ||
|
|
3279a6ca53 | ||
|
|
6a1a98d73f | ||
|
|
1f067aad5b | ||
|
|
1388711105 | ||
|
|
618e5b4cc1 | ||
|
|
42c3ca5db5 | ||
|
|
534791776b | ||
|
|
0c6c53fc7d | ||
|
|
0dfd5ce07d | ||
|
|
2cd6d46f7c | ||
|
|
c333a9fadd | ||
|
|
ba3d1c66f0 | ||
|
|
7276e533ce | ||
|
|
8b84231042 | ||
|
|
77da744008 | ||
|
|
5da7a21119 | ||
|
|
78d742c712 | ||
|
|
1c97ea3e2c | ||
|
|
3d970defe9 | ||
|
|
6282794004 | ||
|
|
475c53a55d | ||
|
|
4547ff7b5d | ||
|
|
e7b4be3dc5 | ||
|
|
2bd85e04fc | ||
|
|
f6ab5f2af1 | ||
|
|
7d943633a3 | ||
|
|
7fff3c999a | ||
|
|
a9068a11a9 | ||
|
|
d3d102516c | ||
|
|
32131439f9 | ||
|
|
d17685c540 | ||
|
|
e59f8eee36 | ||
|
|
35329abcbd | ||
|
|
ee7741c3ab | ||
|
|
ab0803b2da | ||
|
|
96196a353c | ||
|
|
2a8796c38d | ||
|
|
c8d4f7427d | ||
|
|
8d41a797d3 | ||
|
|
570e1cbf40 | ||
|
|
4c9b00a066 | ||
|
|
7d1f8bb180 | ||
|
|
3a6caeb06e | ||
|
|
9e67245e60 | ||
|
|
b7a95d5d76 | ||
|
|
fe550c5901 | ||
|
|
8aac0a571a | ||
|
|
c506b8b0ad | ||
|
|
a6e84c207e | ||
|
|
249402eaed | ||
|
|
394c476f2a | ||
|
|
86e8a141ea | ||
|
|
53a7e06dcf | ||
|
|
11edabd09f | ||
|
|
41a3d9359f | ||
|
|
5dfc5f247f | ||
|
|
9804c8a31a | ||
|
|
4d05bfdff0 | ||
|
|
0388401a9e | ||
|
|
162c548010 | ||
|
|
888b4a57e5 | ||
|
|
26d367b188 | ||
|
|
ca4988951f | ||
|
|
c7a50dd74d | ||
|
|
00fbf5c9c3 | ||
|
|
4bfe9dd5ad | ||
|
|
e159a75b79 | ||
|
|
a69686125e | ||
|
|
3eb025ded2 | ||
|
|
1d0e646094 | ||
|
|
32c8e047e3 | ||
|
|
3650482b09 | ||
|
|
79adfd2c0d | ||
|
|
779dcc62aa | ||
|
|
abe39c1a0a | ||
|
|
bd41ad813c | ||
|
|
77fe63fb63 | ||
|
|
f61ba202d8 | ||
|
|
e1067fa1a3 | ||
|
|
0a3eb898ae | ||
|
|
6c33e9dc93 | ||
|
|
f8ed6ce705 | ||
|
|
f64478b75e | ||
|
|
854a3697d7 | ||
|
|
b7915b9d0e | ||
|
|
4443b606f6 | ||
|
|
6c20a98651 | ||
|
|
b722ccc5bc | ||
|
|
db0315394b | ||
|
|
a7ef1235f4 | ||
|
|
f64a361c60 | ||
|
|
aaa788bc2f | ||
|
|
3eede6bead | ||
|
|
02abfbcb54 | ||
|
|
01d20562f0 | ||
|
|
cbe6ee6499 | ||
|
|
9a61ea8356 | ||
|
|
1af7ff600f | ||
|
|
02d594cc82 | ||
|
|
7d0b5c1c67 | ||
|
|
d3dc8a7af0 | ||
|
|
d67fefe7c5 | ||
|
|
4d364c5e4d | ||
|
|
954400ea45 | ||
|
|
04b6067e64 | ||
|
|
d77ee5554f | ||
|
|
2e034bdead | ||
|
|
fc0947aa04 | ||
|
|
1d546a4091 | ||
|
|
f60b3bbbfb | ||
|
|
8e99b9f1ad | ||
|
|
fa5ed2bc11 | ||
|
|
21d961ab97 | ||
|
|
aaa93b84d2 | ||
|
|
6a562ce03b | ||
|
|
3dbc48727e | ||
|
|
85ac2e5e9a | ||
|
|
af6bd4e505 | ||
|
|
e54c4b3499 | ||
|
|
078c88f825 | ||
|
|
85169b6c5e | ||
|
|
d0ff8ee2c0 | ||
|
|
e898768997 | ||
|
|
0f5b504f23 | ||
|
|
365d291393 | ||
|
|
3dbab24c0f | ||
|
|
1f67fb7c8d | ||
|
|
219e09fc78 | ||
|
|
cd9c2bd9ab | ||
|
|
9f969d843c | ||
|
|
b22a6472fc | ||
|
|
d231ace28e | ||
|
|
473cb7f437 | ||
|
|
783ed9f456 | ||
|
|
9a9a89ee50 | ||
|
|
5122d0341d | ||
|
|
81731689da | ||
|
|
b3e9857448 | ||
|
|
2eda9eb0e3 | ||
|
|
82a5df5048 | ||
|
|
f11564a7ac | ||
|
|
9df4d29236 | ||
|
|
1452817423 | ||
|
|
c57e496f5e | ||
|
|
6287f7003c | ||
|
|
37037b1f4e | ||
|
|
7cf123a99e | ||
|
|
97394e775f | ||
|
|
d5c381188b | ||
|
|
b107d12a62 | ||
|
|
e646f2c1fc | ||
|
|
b18528d24a | ||
|
|
a6e64df399 | ||
|
|
66ba21dd41 | ||
|
|
1851e7a111 | ||
|
|
74b78e96b3 | ||
|
|
a9657f9c00 | ||
|
|
1dee63a0eb |
2
.github/CODEOWNERS
vendored
Normal file
2
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
# Everything needs to be reviewed by Hank
|
||||
* @henrygd
|
||||
19
.github/DISCUSSION_TEMPLATE/ideas.yml
vendored
Normal file
19
.github/DISCUSSION_TEMPLATE/ideas.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
body:
|
||||
- type: dropdown
|
||||
id: component
|
||||
attributes:
|
||||
label: Component
|
||||
description: Which part of Beszel is this about?
|
||||
options:
|
||||
- Hub
|
||||
- Agent
|
||||
- Hub & Agent
|
||||
default: 0
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Description
|
||||
description: Please describe in detail what you want to share.
|
||||
validations:
|
||||
required: true
|
||||
68
.github/DISCUSSION_TEMPLATE/support.yml
vendored
68
.github/DISCUSSION_TEMPLATE/support.yml
vendored
@@ -1,19 +1,54 @@
|
||||
body:
|
||||
- type: markdown
|
||||
- type: checkboxes
|
||||
id: terms
|
||||
attributes:
|
||||
value: |
|
||||
### Before opening a discussion:
|
||||
label: Welcome!
|
||||
description: |
|
||||
Thank you for reaching out to the Beszel community for support! To help us assist you better, please make sure to review the following points before submitting your request:
|
||||
|
||||
- Check the [common issues guide](https://beszel.dev/guide/common-issues).
|
||||
- Search existing [issues](https://github.com/henrygd/beszel/issues) and [discussions](https://github.com/henrygd/beszel/discussions) (including closed).
|
||||
Please note:
|
||||
- For translation-related issues or requests, please use the [Crowdin project](https://crowdin.com/project/beszel).
|
||||
**- Please do not submit support reqeusts that are specific to ZFS. We plan to add integration with ZFS utilities in the near future.**
|
||||
|
||||
options:
|
||||
- label: I have read the [Documentation](https://beszel.dev/guide/getting-started)
|
||||
required: true
|
||||
- label: I have checked the [Common Issues Guide](https://beszel.dev/guide/common-issues) and my problem was not mentioned there.
|
||||
required: true
|
||||
- label: I have searched open and closed issues and discussions and my problem was not mentioned before.
|
||||
required: true
|
||||
- label: I have verified I am using the latest version available. You can check the latest release [here](https://github.com/henrygd/beszel/releases).
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: component
|
||||
attributes:
|
||||
label: Component
|
||||
description: Which part of Beszel is this about?
|
||||
options:
|
||||
- Hub
|
||||
- Agent
|
||||
- Hub & Agent
|
||||
default: 0
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
description: A clear and concise description of the issue or question. If applicable, add screenshots to help explain your problem.
|
||||
label: Problem Description
|
||||
description: |
|
||||
How to write a good bug report?
|
||||
|
||||
- Respect the issue template as much as possible.
|
||||
- The title should be short and descriptive.
|
||||
- Explain the conditions which led you to report this issue: the context.
|
||||
- The context should lead to something, a problem that you’re facing.
|
||||
- Remain clear and concise.
|
||||
- Format your messages to help the reader focus on what matters and understand the structure of your message, use [Markdown syntax](https://help.github.com/articles/github-flavored-markdown)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: system
|
||||
attributes:
|
||||
@@ -21,13 +56,15 @@ body:
|
||||
placeholder: linux/amd64 (agent), freebsd/arm64 (hub)
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: version
|
||||
attributes:
|
||||
label: Beszel version
|
||||
placeholder: 0.9.1
|
||||
validations:
|
||||
required: true
|
||||
|
||||
# - type: input
|
||||
# id: version
|
||||
# attributes:
|
||||
# label: Beszel version
|
||||
# placeholder: 0.9.1
|
||||
# validations:
|
||||
# required: true
|
||||
|
||||
- type: dropdown
|
||||
id: install-method
|
||||
attributes:
|
||||
@@ -41,18 +78,21 @@ body:
|
||||
- Other (please describe above)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: config
|
||||
attributes:
|
||||
label: Configuration
|
||||
description: Please provide any relevant service configuration
|
||||
render: yaml
|
||||
|
||||
- type: textarea
|
||||
id: hub-logs
|
||||
attributes:
|
||||
label: Hub Logs
|
||||
description: Check the logs page in PocketBase (`/_/#/logs`) for relevant errors (copy JSON).
|
||||
render: json
|
||||
|
||||
- type: textarea
|
||||
id: agent-logs
|
||||
attributes:
|
||||
|
||||
103
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
103
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -1,8 +1,30 @@
|
||||
name: 🐛 Bug report
|
||||
description: Report a new bug or issue.
|
||||
description: Use this template to report a bug or issue.
|
||||
title: '[Bug]: '
|
||||
labels: ['bug', "needs confirmation"]
|
||||
labels: ['bug']
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Welcome!
|
||||
description: |
|
||||
The issue tracker is for reporting bugs and feature requests only. For end-user related support questions, please use the **[GitHub Discussions](https://github.com/henrygd/beszel/discussions/new?category=support)** instead
|
||||
|
||||
Please note:
|
||||
- For translation-related issues or requests, please use the [Crowdin project](https://crowdin.com/project/beszel).
|
||||
- To request a change or feature, use the [feature request form](https://github.com/henrygd/beszel/issues/new?template=feature_request.yml).
|
||||
- Any issues that can be resolved by consulting the documentation or by reviewing existing open or closed issues will be closed.
|
||||
**- Please do not submit bugs that are specific to ZFS. We plan to add integration with ZFS utilities in the near future.**
|
||||
|
||||
options:
|
||||
- label: I have read the [Documentation](https://beszel.dev/guide/getting-started)
|
||||
required: true
|
||||
- label: I have checked the [Common Issues Guide](https://beszel.dev/guide/common-issues) and my problem was not mentioned there.
|
||||
required: true
|
||||
- label: I have searched open and closed issues and my problem was not mentioned before.
|
||||
required: true
|
||||
- label: I have verified I am using the latest version available. You can check the latest release [here](https://github.com/henrygd/beszel/releases).
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: component
|
||||
attributes:
|
||||
@@ -12,81 +34,53 @@ body:
|
||||
- Hub
|
||||
- Agent
|
||||
- Hub & Agent
|
||||
default: 0
|
||||
validations:
|
||||
required: true
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
### Thanks for taking the time to fill out this bug report!
|
||||
|
||||
- For more general support, please [start a support thread](https://github.com/henrygd/beszel/discussions/new?category=support).
|
||||
- To request a change or feature, use the [feature request form](https://github.com/henrygd/beszel/issues/new?template=feature_request.yml).
|
||||
- Please do not submit bugs that are specific to ZFS. We plan to add integration with ZFS utilities in the near future.
|
||||
|
||||
### Before submitting a bug report:
|
||||
|
||||
- Check the [common issues guide](https://beszel.dev/guide/common-issues).
|
||||
- Search existing [issues](https://github.com/henrygd/beszel/issues) and [discussions](https://github.com/henrygd/beszel/discussions) (including closed).
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
description: Explain the issue you experienced clearly and concisely.
|
||||
placeholder: I went to the coffee pot and it was empty.
|
||||
label: Problem Description
|
||||
description: |
|
||||
How to write a good bug report?
|
||||
|
||||
- Respect the issue template as much as possible.
|
||||
- The title should be short and descriptive.
|
||||
- Explain the conditions which led you to report this issue: the context.
|
||||
- The context should lead to something, a problem that you’re facing.
|
||||
- Remain clear and concise.
|
||||
- Format your messages to help the reader focus on what matters and understand the structure of your message, use [Markdown syntax](https://help.github.com/articles/github-flavored-markdown)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: expected-behavior
|
||||
attributes:
|
||||
label: Expected Behavior
|
||||
description: In a perfect world, what should have happened?
|
||||
description: |
|
||||
In a perfect world, what should have happened?
|
||||
**Important:** Be specific. Vague descriptions like "it should work" are not helpful.
|
||||
placeholder: When I got to the coffee pot, it should have been full.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: steps-to-reproduce
|
||||
attributes:
|
||||
label: Steps to Reproduce
|
||||
description: Describe how to reproduce the issue in repeatable steps.
|
||||
description: |
|
||||
Provide detailed, numbered steps that someone else can follow to reproduce the issue.
|
||||
**Important:** Vague descriptions like "it doesn't work" or "it's broken" will result in the issue being closed.
|
||||
Include specific actions, URLs, button clicks, and any relevant data or configuration.
|
||||
placeholder: |
|
||||
1. Go to the coffee pot.
|
||||
2. Make more coffee.
|
||||
3. Pour it into a cup.
|
||||
4. Observe that the cup is empty instead of full.
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: category
|
||||
attributes:
|
||||
label: Category
|
||||
description: Which category does this relate to most?
|
||||
options:
|
||||
- Metrics
|
||||
- Charts & Visualization
|
||||
- Settings & Configuration
|
||||
- Notifications & Alerts
|
||||
- Authentication
|
||||
- Installation
|
||||
- Performance
|
||||
- UI / UX
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: metrics
|
||||
attributes:
|
||||
label: Affected Metrics
|
||||
description: If applicable, which specific metric does this relate to most?
|
||||
options:
|
||||
- CPU
|
||||
- Memory
|
||||
- Storage
|
||||
- Network
|
||||
- Containers
|
||||
- GPU
|
||||
- Sensors
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: system
|
||||
attributes:
|
||||
@@ -94,6 +88,7 @@ body:
|
||||
placeholder: linux/amd64 (agent), freebsd/arm64 (hub)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: version
|
||||
attributes:
|
||||
@@ -101,6 +96,7 @@ body:
|
||||
placeholder: 0.9.1
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: install-method
|
||||
attributes:
|
||||
@@ -114,18 +110,21 @@ body:
|
||||
- Other (please describe above)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: config
|
||||
attributes:
|
||||
label: Configuration
|
||||
description: Please provide any relevant service configuration
|
||||
render: yaml
|
||||
|
||||
- type: textarea
|
||||
id: hub-logs
|
||||
attributes:
|
||||
label: Hub Logs
|
||||
description: Check the logs page in PocketBase (`/_/#/logs`) for relevant errors (copy JSON).
|
||||
render: json
|
||||
|
||||
- type: textarea
|
||||
id: agent-logs
|
||||
attributes:
|
||||
|
||||
3
.github/ISSUE_TEMPLATE/config.yml
vendored
3
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,5 +1,8 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: 🗣️ Translations
|
||||
url: https://crowdin.com/project/beszel
|
||||
about: Please report translation issues and request new translations here.
|
||||
- name: 💬 Support and questions
|
||||
url: https://github.com/henrygd/beszel/discussions
|
||||
about: Ask and answer questions here.
|
||||
|
||||
81
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
81
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -1,8 +1,25 @@
|
||||
name: 🚀 Feature request
|
||||
description: Request a new feature or change.
|
||||
title: "[Feature]: "
|
||||
labels: ["enhancement", "needs review"]
|
||||
labels: ["enhancement"]
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Welcome!
|
||||
description: |
|
||||
The issue tracker is for reporting bugs and feature requests only. For end-user related support questions, please use the **[GitHub Discussions](https://github.com/henrygd/beszel/discussions)** instead
|
||||
|
||||
Please note:
|
||||
- For **Bug reports**, use the [Bug Form](https://github.com/henrygd/beszel/issues/new?template=bug_report.yml).
|
||||
- Any requests for new translations should be requested within the [crowdin project](https://crowdin.com/project/beszel).
|
||||
- Create one issue per feature request. This helps us keep track of requests and prioritize them accordingly.
|
||||
|
||||
options:
|
||||
- label: I have searched open and closed feature requests to make sure this or similar feature request does not already exist.
|
||||
required: true
|
||||
- label: This is a feature request, not a bug report or support question.
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: component
|
||||
attributes:
|
||||
@@ -12,65 +29,29 @@ body:
|
||||
- Hub
|
||||
- Agent
|
||||
- Hub & Agent
|
||||
default: 0
|
||||
validations:
|
||||
required: true
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: Before submitting, please search existing [issues](https://github.com/henrygd/beszel/issues) and [discussions](https://github.com/henrygd/beszel/discussions) (including closed).
|
||||
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Describe the feature you would like to see
|
||||
label: Description
|
||||
description: |
|
||||
Describe the solution or feature you'd like. Explain what problem this solves or what value it adds.
|
||||
**Important:** Be specific and detailed. Vague requests like "make it better" will be closed.
|
||||
placeholder: |
|
||||
Example:
|
||||
- What is the feature?
|
||||
- What problem does it solve?
|
||||
- How should it work?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: motivation
|
||||
attributes:
|
||||
label: Motivation / Use Case
|
||||
description: Why do you want this feature? What problem does it solve?
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe how you would like to see this feature implemented
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Screenshots
|
||||
description: Please attach any relevant screenshots, such as images from your current solution or similar implementations.
|
||||
validations:
|
||||
required: false
|
||||
- type: dropdown
|
||||
id: category
|
||||
attributes:
|
||||
label: Category
|
||||
description: Which category does this relate to most?
|
||||
options:
|
||||
- Metrics
|
||||
- Charts & Visualization
|
||||
- Settings & Configuration
|
||||
- Notifications & Alerts
|
||||
- Authentication
|
||||
- Installation
|
||||
- Performance
|
||||
- UI / UX
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: metrics
|
||||
attributes:
|
||||
label: Affected Metrics
|
||||
description: If applicable, which specific metric does this relate to most?
|
||||
options:
|
||||
- CPU
|
||||
- Memory
|
||||
- Storage
|
||||
- Network
|
||||
- Containers
|
||||
- GPU
|
||||
- Sensors
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
42
.github/workflows/docker-images.yml
vendored
42
.github/workflows/docker-images.yml
vendored
@@ -10,6 +10,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
max-parallel: 5
|
||||
matrix:
|
||||
include:
|
||||
# henrygd/beszel
|
||||
@@ -24,19 +25,18 @@ jobs:
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=raw,value={{sha}},enable=${{ github.ref_type != 'tag' }}
|
||||
|
||||
# henrygd/beszel-agent
|
||||
|
||||
# henrygd/beszel-agent:alpine
|
||||
- image: henrygd/beszel-agent
|
||||
dockerfile: ./internal/dockerfile_agent
|
||||
dockerfile: ./internal/dockerfile_agent_alpine
|
||||
registry: docker.io
|
||||
username_secret: DOCKERHUB_USERNAME
|
||||
password_secret: DOCKERHUB_TOKEN
|
||||
tags: |
|
||||
type=raw,value=edge
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=raw,value={{sha}},enable=${{ github.ref_type != 'tag' }}
|
||||
type=raw,value=alpine
|
||||
type=semver,pattern={{version}}-alpine
|
||||
type=semver,pattern={{major}}.{{minor}}-alpine
|
||||
type=semver,pattern={{major}}-alpine
|
||||
|
||||
# henrygd/beszel-agent-nvidia
|
||||
- image: henrygd/beszel-agent-nvidia
|
||||
@@ -66,18 +66,6 @@ jobs:
|
||||
type=semver,pattern={{major}}
|
||||
type=raw,value={{sha}},enable=${{ github.ref_type != 'tag' }}
|
||||
|
||||
# henrygd/beszel-agent:alpine
|
||||
- image: henrygd/beszel-agent
|
||||
dockerfile: ./internal/dockerfile_agent_alpine
|
||||
registry: docker.io
|
||||
username_secret: DOCKERHUB_USERNAME
|
||||
password_secret: DOCKERHUB_TOKEN
|
||||
tags: |
|
||||
type=raw,value=alpine
|
||||
type=semver,pattern={{version}}-alpine
|
||||
type=semver,pattern={{major}}.{{minor}}-alpine
|
||||
type=semver,pattern={{major}}-alpine
|
||||
|
||||
# ghcr.io/henrygd/beszel
|
||||
- image: ghcr.io/${{ github.repository }}/beszel
|
||||
dockerfile: ./internal/dockerfile_hub
|
||||
@@ -99,6 +87,7 @@ jobs:
|
||||
password_secret: GITHUB_TOKEN
|
||||
tags: |
|
||||
type=raw,value=edge
|
||||
type=raw,value=latest
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
@@ -144,6 +133,19 @@ jobs:
|
||||
type=semver,pattern={{major}}.{{minor}}-alpine
|
||||
type=semver,pattern={{major}}-alpine
|
||||
|
||||
# henrygd/beszel-agent (keep at bottom so it gets built after :alpine and gets the latest tag)
|
||||
- image: henrygd/beszel-agent
|
||||
dockerfile: ./internal/dockerfile_agent
|
||||
registry: docker.io
|
||||
username_secret: DOCKERHUB_USERNAME
|
||||
password_secret: DOCKERHUB_TOKEN
|
||||
tags: |
|
||||
type=raw,value=edge
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=raw,value={{sha}},enable=${{ github.ref_type != 'tag' }}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
20
.github/workflows/inactivity-actions.yml
vendored
20
.github/workflows/inactivity-actions.yml
vendored
@@ -10,12 +10,25 @@ permissions:
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
lock-inactive:
|
||||
name: Lock Inactive Issues
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: klaasnicolaas/action-inactivity-lock@v1.1.3
|
||||
id: lock
|
||||
with:
|
||||
days-inactive-issues: 14
|
||||
lock-reason-issues: ""
|
||||
# Action can not skip PRs, set it to 100 years to cover it.
|
||||
days-inactive-prs: 36524
|
||||
lock-reason-prs: ""
|
||||
|
||||
close-stale:
|
||||
name: Close Stale Issues
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Close Stale Issues
|
||||
uses: actions/stale@v9
|
||||
uses: actions/stale@v10
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
@@ -32,11 +45,14 @@ jobs:
|
||||
# Timing
|
||||
days-before-issue-stale: 14
|
||||
days-before-issue-close: 7
|
||||
# Action can not skip PRs, set it to 100 years to cover it.
|
||||
days-before-pr-stale: 36524
|
||||
|
||||
# Labels
|
||||
stale-issue-label: 'stale'
|
||||
remove-stale-when-updated: true
|
||||
only-issue-labels: 'awaiting-requester'
|
||||
any-of-labels: 'awaiting-requester'
|
||||
exempt-issue-labels: 'enhancement'
|
||||
|
||||
# Exemptions
|
||||
exempt-assignees: true
|
||||
|
||||
82
.github/workflows/label-from-dropdown.yml
vendored
82
.github/workflows/label-from-dropdown.yml
vendored
@@ -1,82 +0,0 @@
|
||||
name: Label issues from dropdowns
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened]
|
||||
|
||||
jobs:
|
||||
label_from_dropdown:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
steps:
|
||||
- name: Apply labels based on dropdown choices
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
|
||||
const issueNumber = context.issue.number;
|
||||
const owner = context.repo.owner;
|
||||
const repo = context.repo.repo;
|
||||
|
||||
// Get the issue body
|
||||
const body = context.payload.issue.body;
|
||||
|
||||
// Helper to find dropdown value in the body (assuming markdown format)
|
||||
function extractSectionValue(heading) {
|
||||
const regex = new RegExp(`### ${heading}\\s+([\\s\\S]*?)(?:\\n###|$)`, 'i');
|
||||
const match = body.match(regex);
|
||||
if (match) {
|
||||
// Get the first non-empty line after the heading
|
||||
const lines = match[1].split('\n').map(l => l.trim()).filter(Boolean);
|
||||
return lines[0] || null;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
// Extract dropdown selections
|
||||
const category = extractSectionValue('Category');
|
||||
const metrics = extractSectionValue('Affected Metrics');
|
||||
const component = extractSectionValue('Component');
|
||||
|
||||
// Build labels to add
|
||||
let labelsToAdd = [];
|
||||
if (category) labelsToAdd.push(category);
|
||||
if (metrics) labelsToAdd.push(metrics);
|
||||
if (component) labelsToAdd.push(component);
|
||||
|
||||
// Get existing labels in the repo
|
||||
const { data: existingLabels } = await github.rest.issues.listLabelsForRepo({
|
||||
owner,
|
||||
repo,
|
||||
per_page: 100
|
||||
});
|
||||
const existingLabelNames = existingLabels.map(l => l.name);
|
||||
|
||||
// Find labels that need to be created
|
||||
const labelsToCreate = labelsToAdd.filter(label => !existingLabelNames.includes(label));
|
||||
|
||||
// Create missing labels (with a default color)
|
||||
for (const label of labelsToCreate) {
|
||||
try {
|
||||
await github.rest.issues.createLabel({
|
||||
owner,
|
||||
repo,
|
||||
name: label,
|
||||
color: 'ededed' // light gray, you can pick any hex color
|
||||
});
|
||||
} catch (e) {
|
||||
// Ignore if label already exists (race condition), otherwise rethrow
|
||||
if (!e || e.status !== 422) throw e;
|
||||
}
|
||||
}
|
||||
|
||||
// Now apply all labels (they all exist now)
|
||||
if (labelsToAdd.length > 0) {
|
||||
await github.rest.issues.addLabels({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: issueNumber,
|
||||
labels: labelsToAdd
|
||||
});
|
||||
}
|
||||
@@ -5,6 +5,7 @@ project_name: beszel
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
- go generate -run fetchsmartctl ./agent
|
||||
|
||||
builds:
|
||||
- id: beszel
|
||||
@@ -15,10 +16,21 @@ builds:
|
||||
goos:
|
||||
- linux
|
||||
- darwin
|
||||
- windows
|
||||
- freebsd
|
||||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
- arm
|
||||
ignore:
|
||||
- goos: windows
|
||||
goarch: arm64
|
||||
- goos: windows
|
||||
goarch: arm
|
||||
- goos: freebsd
|
||||
goarch: arm64
|
||||
- goos: freebsd
|
||||
goarch: arm
|
||||
|
||||
- id: beszel-agent
|
||||
binary: beszel-agent
|
||||
@@ -64,6 +76,18 @@ builds:
|
||||
- goos: windows
|
||||
goarch: riscv64
|
||||
|
||||
- id: beszel-agent-linux-amd64-glibc
|
||||
binary: beszel-agent
|
||||
main: internal/cmd/agent/agent.go
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
flags:
|
||||
- -tags=glibc
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
|
||||
archives:
|
||||
- id: beszel-agent
|
||||
formats: [tar.gz]
|
||||
@@ -77,6 +101,15 @@ archives:
|
||||
- goos: windows
|
||||
formats: [zip]
|
||||
|
||||
- id: beszel-agent-linux-amd64-glibc
|
||||
formats: [tar.gz]
|
||||
ids:
|
||||
- beszel-agent-linux-amd64-glibc
|
||||
name_template: >-
|
||||
{{ .Binary }}_
|
||||
{{- .Os }}_
|
||||
{{- .Arch }}_glibc
|
||||
|
||||
- id: beszel
|
||||
formats: [tar.gz]
|
||||
ids:
|
||||
@@ -85,6 +118,9 @@ archives:
|
||||
{{ .Binary }}_
|
||||
{{- .Os }}_
|
||||
{{- .Arch }}
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
formats: [zip]
|
||||
|
||||
nfpms:
|
||||
- id: beszel-agent
|
||||
@@ -122,9 +158,7 @@ nfpms:
|
||||
- debconf
|
||||
scripts:
|
||||
templates: ./supplemental/debian/templates
|
||||
# Currently broken due to a bug in goreleaser
|
||||
# https://github.com/goreleaser/goreleaser/issues/5487
|
||||
#config: ./supplemental/debian/config.sh
|
||||
config: ./supplemental/debian/config.sh
|
||||
|
||||
scoops:
|
||||
- ids: [beszel-agent]
|
||||
|
||||
10
Makefile
10
Makefile
@@ -7,7 +7,7 @@ SKIP_WEB ?= false
|
||||
# Set executable extension based on target OS
|
||||
EXE_EXT := $(if $(filter windows,$(OS)),.exe,)
|
||||
|
||||
.PHONY: tidy build-agent build-hub build-hub-dev build clean lint dev-server dev-agent dev-hub dev generate-locales
|
||||
.PHONY: tidy build-agent build-hub build-hub-dev build clean lint dev-server dev-agent dev-hub dev generate-locales fetch-smartctl-conditional
|
||||
.DEFAULT_GOAL := build
|
||||
|
||||
clean:
|
||||
@@ -46,8 +46,14 @@ build-dotnet-conditional:
|
||||
fi; \
|
||||
fi
|
||||
|
||||
# Download smartctl.exe at build time for Windows (skips if already present)
|
||||
fetch-smartctl-conditional:
|
||||
@if [ "$(OS)" = "windows" ]; then \
|
||||
go generate -run fetchsmartctl ./agent; \
|
||||
fi
|
||||
|
||||
# Update build-agent to include conditional .NET build
|
||||
build-agent: tidy build-dotnet-conditional
|
||||
build-agent: tidy build-dotnet-conditional fetch-smartctl-conditional
|
||||
GOOS=$(OS) GOARCH=$(ARCH) go build -o ./build/beszel-agent_$(OS)_$(ARCH)$(EXE_EXT) -ldflags "-w -s" ./internal/cmd/agent
|
||||
|
||||
build-hub: tidy $(if $(filter false,$(SKIP_WEB)),build-web-ui)
|
||||
|
||||
@@ -12,10 +12,12 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gliderlabs/ssh"
|
||||
"github.com/henrygd/beszel"
|
||||
"github.com/henrygd/beszel/agent/deltatracker"
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
"github.com/shirou/gopsutil/v4/host"
|
||||
gossh "golang.org/x/crypto/ssh"
|
||||
@@ -29,12 +31,15 @@ type Agent struct {
|
||||
fsNames []string // List of filesystem device names being monitored
|
||||
fsStats map[string]*system.FsStats // Keeps track of disk stats for each filesystem
|
||||
diskPrev map[uint16]map[string]prevDisk // Previous disk I/O counters per cache interval
|
||||
diskUsageCacheDuration time.Duration // How long to cache disk usage (to avoid waking sleeping disks)
|
||||
lastDiskUsageUpdate time.Time // Last time disk usage was collected
|
||||
netInterfaces map[string]struct{} // Stores all valid network interfaces
|
||||
netIoStats map[uint16]system.NetIoStats // Keeps track of bandwidth usage per cache interval
|
||||
netInterfaceDeltaTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64] // Per-cache-time NIC delta trackers
|
||||
dockerManager *dockerManager // Manages Docker API requests
|
||||
sensorConfig *SensorConfig // Sensors config
|
||||
systemInfo system.Info // Host system info
|
||||
systemInfo system.Info // Host system info (dynamic)
|
||||
systemDetails system.Details // Host system details (static, once-per-connection)
|
||||
gpuManager *GPUManager // Manages GPU data
|
||||
cache *systemDataCache // Cache for system stats based on cache time
|
||||
connectionManager *ConnectionManager // Channel to signal connection events
|
||||
@@ -43,6 +48,7 @@ type Agent struct {
|
||||
dataDir string // Directory for persisting data
|
||||
keys []gossh.PublicKey // SSH public keys
|
||||
smartManager *SmartManager // Manages SMART data
|
||||
systemdManager *systemdManager // Manages systemd services
|
||||
}
|
||||
|
||||
// NewAgent creates a new agent with the given data directory for persisting data.
|
||||
@@ -68,6 +74,17 @@ func NewAgent(dataDir ...string) (agent *Agent, err error) {
|
||||
|
||||
agent.memCalc, _ = GetEnv("MEM_CALC")
|
||||
agent.sensorConfig = agent.newSensorConfig()
|
||||
|
||||
// Parse disk usage cache duration (e.g., "15m", "1h") to avoid waking sleeping disks
|
||||
if diskUsageCache, exists := GetEnv("DISK_USAGE_CACHE"); exists {
|
||||
if duration, err := time.ParseDuration(diskUsageCache); err == nil {
|
||||
agent.diskUsageCacheDuration = duration
|
||||
slog.Info("DISK_USAGE_CACHE", "duration", duration)
|
||||
} else {
|
||||
slog.Warn("Invalid DISK_USAGE_CACHE", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Set up slog with a log level determined by the LOG_LEVEL env var
|
||||
if logLevelStr, exists := GetEnv("LOG_LEVEL"); exists {
|
||||
switch strings.ToLower(logLevelStr) {
|
||||
@@ -83,8 +100,21 @@ func NewAgent(dataDir ...string) (agent *Agent, err error) {
|
||||
|
||||
slog.Debug(beszel.Version)
|
||||
|
||||
// initialize docker manager
|
||||
agent.dockerManager = newDockerManager()
|
||||
|
||||
// initialize system info
|
||||
agent.initializeSystemInfo()
|
||||
agent.refreshSystemDetails()
|
||||
|
||||
// SMART_INTERVAL env var to update smart data at this interval
|
||||
if smartIntervalEnv, exists := GetEnv("SMART_INTERVAL"); exists {
|
||||
if duration, err := time.ParseDuration(smartIntervalEnv); err == nil && duration > 0 {
|
||||
agent.systemDetails.SmartInterval = duration
|
||||
slog.Info("SMART_INTERVAL", "duration", duration)
|
||||
} else {
|
||||
slog.Warn("Invalid SMART_INTERVAL", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// initialize connection manager
|
||||
agent.connectionManager = newConnectionManager(agent)
|
||||
@@ -98,8 +128,10 @@ func NewAgent(dataDir ...string) (agent *Agent, err error) {
|
||||
// initialize net io stats
|
||||
agent.initializeNetIoStats()
|
||||
|
||||
// initialize docker manager
|
||||
agent.dockerManager = newDockerManager(agent)
|
||||
agent.systemdManager, err = newSystemdManager()
|
||||
if err != nil {
|
||||
slog.Debug("Systemd", "err", err)
|
||||
}
|
||||
|
||||
agent.smartManager, err = NewSmartManager()
|
||||
if err != nil {
|
||||
@@ -114,7 +146,7 @@ func NewAgent(dataDir ...string) (agent *Agent, err error) {
|
||||
|
||||
// if debugging, print stats
|
||||
if agent.debug {
|
||||
slog.Debug("Stats", "data", agent.gatherStats(0))
|
||||
slog.Debug("Stats", "data", agent.gatherStats(common.DataRequestOptions{CacheTimeMs: 60_000, IncludeDetails: true}))
|
||||
}
|
||||
|
||||
return agent, nil
|
||||
@@ -129,10 +161,11 @@ func GetEnv(key string) (value string, exists bool) {
|
||||
return os.LookupEnv(key)
|
||||
}
|
||||
|
||||
func (a *Agent) gatherStats(cacheTimeMs uint16) *system.CombinedData {
|
||||
func (a *Agent) gatherStats(options common.DataRequestOptions) *system.CombinedData {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
|
||||
cacheTimeMs := options.CacheTimeMs
|
||||
data, isCached := a.cache.Get(cacheTimeMs)
|
||||
if isCached {
|
||||
slog.Debug("Cached data", "cacheTimeMs", cacheTimeMs)
|
||||
@@ -143,6 +176,12 @@ func (a *Agent) gatherStats(cacheTimeMs uint16) *system.CombinedData {
|
||||
Stats: a.getSystemStats(cacheTimeMs),
|
||||
Info: a.systemInfo,
|
||||
}
|
||||
|
||||
// Include static system details only when requested
|
||||
if options.IncludeDetails {
|
||||
data.Details = &a.systemDetails
|
||||
}
|
||||
|
||||
// slog.Info("System data", "data", data, "cacheTimeMs", cacheTimeMs)
|
||||
|
||||
if a.dockerManager != nil {
|
||||
@@ -154,7 +193,20 @@ func (a *Agent) gatherStats(cacheTimeMs uint16) *system.CombinedData {
|
||||
}
|
||||
}
|
||||
|
||||
// skip updating systemd services if cache time is not the default 60sec interval
|
||||
if a.systemdManager != nil && cacheTimeMs == 60_000 {
|
||||
totalCount := uint16(a.systemdManager.getServiceStatsCount())
|
||||
if totalCount > 0 {
|
||||
numFailed := a.systemdManager.getFailedServiceCount()
|
||||
data.Info.Services = []uint16{totalCount, numFailed}
|
||||
}
|
||||
if a.systemdManager.hasFreshStats {
|
||||
data.SystemdServices = a.systemdManager.getServiceStats(nil, false)
|
||||
}
|
||||
}
|
||||
|
||||
data.Stats.ExtraFs = make(map[string]*system.FsStats)
|
||||
data.Info.ExtraFsPct = make(map[string]float64)
|
||||
for name, stats := range a.fsStats {
|
||||
if !stats.Root && stats.DiskTotal > 0 {
|
||||
// Use custom name if available, otherwise use device name
|
||||
@@ -163,6 +215,11 @@ func (a *Agent) gatherStats(cacheTimeMs uint16) *system.CombinedData {
|
||||
key = stats.Name
|
||||
}
|
||||
data.Stats.ExtraFs[key] = stats
|
||||
// Add percentages to Info struct for dashboard
|
||||
if stats.DiskTotal > 0 {
|
||||
pct := twoDecimals((stats.DiskUsed / stats.DiskTotal) * 100)
|
||||
data.Info.ExtraFsPct[key] = pct
|
||||
}
|
||||
}
|
||||
}
|
||||
slog.Debug("Extra FS", "data", data.Stats.ExtraFs)
|
||||
@@ -187,8 +244,9 @@ func (a *Agent) getFingerprint() string {
|
||||
|
||||
// if no fingerprint is found, generate one
|
||||
fingerprint, err := host.HostID()
|
||||
if err != nil || fingerprint == "" {
|
||||
fingerprint = a.systemInfo.Hostname + a.systemInfo.CpuModel
|
||||
// we ignore a commonly known "product_uuid" known not to be unique
|
||||
if err != nil || fingerprint == "" || fingerprint == "03000200-0400-0500-0006-000700080009" {
|
||||
fingerprint = a.systemDetails.Hostname + a.systemDetails.CpuModel
|
||||
}
|
||||
|
||||
// hash fingerprint
|
||||
|
||||
@@ -22,7 +22,7 @@ func createTestCacheData() *system.CombinedData {
|
||||
DiskTotal: 100000,
|
||||
},
|
||||
Info: system.Info{
|
||||
Hostname: "test-host",
|
||||
AgentVersion: "0.12.0",
|
||||
},
|
||||
Containers: []*container.Stats{
|
||||
{
|
||||
@@ -128,7 +128,7 @@ func TestCacheMultipleIntervals(t *testing.T) {
|
||||
Mem: 16384,
|
||||
},
|
||||
Info: system.Info{
|
||||
Hostname: "test-host-2",
|
||||
AgentVersion: "0.12.0",
|
||||
},
|
||||
Containers: []*container.Stats{},
|
||||
}
|
||||
@@ -171,7 +171,7 @@ func TestCacheOverwrite(t *testing.T) {
|
||||
Mem: 32768,
|
||||
},
|
||||
Info: system.Info{
|
||||
Hostname: "updated-host",
|
||||
AgentVersion: "0.12.0",
|
||||
},
|
||||
Containers: []*container.Stats{},
|
||||
}
|
||||
|
||||
@@ -5,15 +5,16 @@ package battery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"log/slog"
|
||||
"math"
|
||||
|
||||
"github.com/distatus/battery"
|
||||
)
|
||||
|
||||
var systemHasBattery = false
|
||||
var haveCheckedBattery = false
|
||||
var (
|
||||
systemHasBattery = false
|
||||
haveCheckedBattery = false
|
||||
)
|
||||
|
||||
// HasReadableBattery checks if the system has a battery and returns true if it does.
|
||||
func HasReadableBattery() bool {
|
||||
@@ -21,22 +22,20 @@ func HasReadableBattery() bool {
|
||||
return systemHasBattery
|
||||
}
|
||||
haveCheckedBattery = true
|
||||
batteries,err := battery.GetAll()
|
||||
if err != nil {
|
||||
// even if there's errors getting some batteries, the system
|
||||
// definitely has a battery if the list is not empty.
|
||||
// This list will include everything `battery` can find,
|
||||
// including things like bluetooth devices.
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
batteries, err := battery.GetAll()
|
||||
for _, bat := range batteries {
|
||||
if bat != nil && (bat.Full > 0 || bat.Design > 0) {
|
||||
systemHasBattery = true
|
||||
break
|
||||
}
|
||||
}
|
||||
systemHasBattery = len(batteries) > 0
|
||||
if !systemHasBattery {
|
||||
slog.Debug("No battery found", "err", err)
|
||||
}
|
||||
return systemHasBattery
|
||||
}
|
||||
|
||||
// GetBatteryStats returns the current battery percent and charge state
|
||||
// GetBatteryStats returns the current battery percent and charge state
|
||||
// percent = (current charge of all batteries) / (sum of designed/full capacity of all batteries)
|
||||
func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
||||
if !HasReadableBattery() {
|
||||
@@ -53,21 +52,26 @@ func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
||||
totalCharge := float64(0)
|
||||
errs, partialErrs := err.(battery.Errors)
|
||||
|
||||
batteryState = math.MaxUint8
|
||||
|
||||
for i, bat := range batteries {
|
||||
if partialErrs && errs[i] != nil {
|
||||
// if there were some errors, like missing data, skip it
|
||||
continue
|
||||
}
|
||||
if bat.Full == 0 {
|
||||
if bat == nil || bat.Full == 0 {
|
||||
// skip batteries with no capacity. Charge is unlikely to ever be zero, but
|
||||
// we can't guarantee that, so don't skip based on charge.
|
||||
continue
|
||||
}
|
||||
totalCapacity += bat.Full
|
||||
totalCharge += bat.Current
|
||||
totalCharge += min(bat.Current, bat.Full)
|
||||
if bat.State.Raw >= 0 {
|
||||
batteryState = uint8(bat.State.Raw)
|
||||
}
|
||||
}
|
||||
|
||||
if totalCapacity == 0 {
|
||||
if totalCapacity == 0 || batteryState == math.MaxUint8 {
|
||||
// for macs there's sometimes a ghost battery with 0 capacity
|
||||
// https://github.com/distatus/battery/issues/34
|
||||
// Instead of skipping over those batteries, we'll check for total 0 capacity
|
||||
@@ -76,6 +80,5 @@ func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
||||
}
|
||||
|
||||
batteryPercent = uint8(totalCharge / totalCapacity * 100)
|
||||
batteryState = uint8(batteries[0].State.Raw)
|
||||
return batteryPercent, batteryState, nil
|
||||
}
|
||||
|
||||
@@ -15,8 +15,6 @@ import (
|
||||
|
||||
"github.com/henrygd/beszel"
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
"github.com/henrygd/beszel/internal/entities/smart"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"github.com/lxzan/gws"
|
||||
@@ -200,7 +198,7 @@ func (client *WebSocketClient) handleAuthChallenge(msg *common.HubRequest[cbor.R
|
||||
|
||||
if authRequest.NeedSysInfo {
|
||||
response.Name, _ = GetEnv("SYSTEM_NAME")
|
||||
response.Hostname = client.agent.systemInfo.Hostname
|
||||
response.Hostname = client.agent.systemDetails.Hostname
|
||||
serverAddr := client.agent.connectionManager.serverOptions.Addr
|
||||
_, response.Port, _ = net.SplitHostPort(serverAddr)
|
||||
}
|
||||
@@ -258,38 +256,16 @@ func (client *WebSocketClient) sendMessage(data any) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// sendResponse sends a response with optional request ID for the new protocol
|
||||
// sendResponse sends a response with optional request ID.
|
||||
// For ID-based requests, we must populate legacy typed fields for backward
|
||||
// compatibility with older hubs (<= 0.17) that don't read the generic Data field.
|
||||
func (client *WebSocketClient) sendResponse(data any, requestID *uint32) error {
|
||||
if requestID != nil {
|
||||
// New format with ID - use typed fields
|
||||
response := common.AgentResponse{
|
||||
Id: requestID,
|
||||
}
|
||||
|
||||
// Set the appropriate typed field based on data type
|
||||
switch v := data.(type) {
|
||||
case *system.CombinedData:
|
||||
response.SystemData = v
|
||||
case *common.FingerprintResponse:
|
||||
response.Fingerprint = v
|
||||
case string:
|
||||
response.String = &v
|
||||
case map[string]smart.SmartData:
|
||||
response.SmartData = v
|
||||
// case []byte:
|
||||
// response.RawBytes = v
|
||||
// case string:
|
||||
// response.RawBytes = []byte(v)
|
||||
default:
|
||||
// For any other type, convert to error
|
||||
response.Error = fmt.Sprintf("unsupported response type: %T", data)
|
||||
}
|
||||
|
||||
response := newAgentResponse(data, requestID)
|
||||
return client.sendMessage(response)
|
||||
} else {
|
||||
// Legacy format - send data directly
|
||||
return client.sendMessage(data)
|
||||
}
|
||||
// Legacy format - send data directly
|
||||
return client.sendMessage(data)
|
||||
}
|
||||
|
||||
// getUserAgent returns one of two User-Agent strings based on current time.
|
||||
|
||||
92
agent/cpu.go
92
agent/cpu.go
@@ -4,10 +4,12 @@ import (
|
||||
"math"
|
||||
"runtime"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
"github.com/shirou/gopsutil/v4/cpu"
|
||||
)
|
||||
|
||||
var lastCpuTimes = make(map[uint16]cpu.TimesStat)
|
||||
var lastPerCoreCpuTimes = make(map[uint16][]cpu.TimesStat)
|
||||
|
||||
// init initializes the CPU monitoring by storing the initial CPU times
|
||||
// for the default 60-second cache interval.
|
||||
@@ -15,23 +17,92 @@ func init() {
|
||||
if times, err := cpu.Times(false); err == nil {
|
||||
lastCpuTimes[60000] = times[0]
|
||||
}
|
||||
if perCoreTimes, err := cpu.Times(true); err == nil {
|
||||
lastPerCoreCpuTimes[60000] = perCoreTimes
|
||||
}
|
||||
}
|
||||
|
||||
// getCpuPercent calculates the CPU usage percentage using cached previous measurements.
|
||||
// It uses the specified cache time interval to determine the time window for calculation.
|
||||
// Returns the CPU usage percentage (0-100) and any error encountered.
|
||||
func getCpuPercent(cacheTimeMs uint16) (float64, error) {
|
||||
// CpuMetrics contains detailed CPU usage breakdown
|
||||
type CpuMetrics struct {
|
||||
Total float64
|
||||
User float64
|
||||
System float64
|
||||
Iowait float64
|
||||
Steal float64
|
||||
Idle float64
|
||||
}
|
||||
|
||||
// getCpuMetrics calculates detailed CPU usage metrics using cached previous measurements.
|
||||
// It returns percentages for total, user, system, iowait, and steal time.
|
||||
func getCpuMetrics(cacheTimeMs uint16) (CpuMetrics, error) {
|
||||
times, err := cpu.Times(false)
|
||||
if err != nil || len(times) == 0 {
|
||||
return 0, err
|
||||
return CpuMetrics{}, err
|
||||
}
|
||||
// if cacheTimeMs is not in lastCpuTimes, use 60000 as fallback lastCpuTime
|
||||
if _, ok := lastCpuTimes[cacheTimeMs]; !ok {
|
||||
lastCpuTimes[cacheTimeMs] = lastCpuTimes[60000]
|
||||
}
|
||||
delta := calculateBusy(lastCpuTimes[cacheTimeMs], times[0])
|
||||
|
||||
t1 := lastCpuTimes[cacheTimeMs]
|
||||
t2 := times[0]
|
||||
|
||||
t1All, _ := getAllBusy(t1)
|
||||
t2All, _ := getAllBusy(t2)
|
||||
|
||||
totalDelta := t2All - t1All
|
||||
if totalDelta <= 0 {
|
||||
return CpuMetrics{}, nil
|
||||
}
|
||||
|
||||
metrics := CpuMetrics{
|
||||
Total: calculateBusy(t1, t2),
|
||||
User: clampPercent((t2.User - t1.User) / totalDelta * 100),
|
||||
System: clampPercent((t2.System - t1.System) / totalDelta * 100),
|
||||
Iowait: clampPercent((t2.Iowait - t1.Iowait) / totalDelta * 100),
|
||||
Steal: clampPercent((t2.Steal - t1.Steal) / totalDelta * 100),
|
||||
Idle: clampPercent((t2.Idle - t1.Idle) / totalDelta * 100),
|
||||
}
|
||||
|
||||
lastCpuTimes[cacheTimeMs] = times[0]
|
||||
return delta, nil
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
// clampPercent ensures the percentage is between 0 and 100
|
||||
func clampPercent(value float64) float64 {
|
||||
return math.Min(100, math.Max(0, value))
|
||||
}
|
||||
|
||||
// getPerCoreCpuUsage calculates per-core CPU busy usage as integer percentages (0-100).
|
||||
// It uses cached previous measurements for the provided cache interval.
|
||||
func getPerCoreCpuUsage(cacheTimeMs uint16) (system.Uint8Slice, error) {
|
||||
perCoreTimes, err := cpu.Times(true)
|
||||
if err != nil || len(perCoreTimes) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize cache if needed
|
||||
if _, ok := lastPerCoreCpuTimes[cacheTimeMs]; !ok {
|
||||
lastPerCoreCpuTimes[cacheTimeMs] = lastPerCoreCpuTimes[60000]
|
||||
}
|
||||
|
||||
lastTimes := lastPerCoreCpuTimes[cacheTimeMs]
|
||||
|
||||
// Limit to the number of cores available in both samples
|
||||
length := len(perCoreTimes)
|
||||
if len(lastTimes) < length {
|
||||
length = len(lastTimes)
|
||||
}
|
||||
|
||||
usage := make([]uint8, length)
|
||||
for i := 0; i < length; i++ {
|
||||
t1 := lastTimes[i]
|
||||
t2 := perCoreTimes[i]
|
||||
usage[i] = uint8(math.Round(calculateBusy(t1, t2)))
|
||||
}
|
||||
|
||||
lastPerCoreCpuTimes[cacheTimeMs] = perCoreTimes
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// calculateBusy calculates the CPU busy percentage between two time points.
|
||||
@@ -41,13 +112,10 @@ func calculateBusy(t1, t2 cpu.TimesStat) float64 {
|
||||
t1All, t1Busy := getAllBusy(t1)
|
||||
t2All, t2Busy := getAllBusy(t2)
|
||||
|
||||
if t2Busy <= t1Busy {
|
||||
if t2All <= t1All || t2Busy <= t1Busy {
|
||||
return 0
|
||||
}
|
||||
if t2All <= t1All {
|
||||
return 100
|
||||
}
|
||||
return math.Min(100, math.Max(0, (t2Busy-t1Busy)/(t2All-t1All)*100))
|
||||
return clampPercent((t2Busy - t1Busy) / (t2All - t1All) * 100)
|
||||
}
|
||||
|
||||
// getAllBusy calculates the total CPU time and busy CPU time from CPU times statistics.
|
||||
|
||||
@@ -31,6 +31,7 @@ func (a *Agent) initializeDiskInfo() {
|
||||
filesystem, _ := GetEnv("FILESYSTEM")
|
||||
efPath := "/extra-filesystems"
|
||||
hasRoot := false
|
||||
isWindows := runtime.GOOS == "windows"
|
||||
|
||||
partitions, err := disk.Partitions(false)
|
||||
if err != nil {
|
||||
@@ -38,6 +39,13 @@ func (a *Agent) initializeDiskInfo() {
|
||||
}
|
||||
slog.Debug("Disk", "partitions", partitions)
|
||||
|
||||
// trim trailing backslash for Windows devices (#1361)
|
||||
if isWindows {
|
||||
for i, p := range partitions {
|
||||
partitions[i].Device = strings.TrimSuffix(p.Device, "\\")
|
||||
}
|
||||
}
|
||||
|
||||
// ioContext := context.WithValue(a.sensorsContext,
|
||||
// common.EnvKey, common.EnvMap{common.HostProcEnvKey: "/tmp/testproc"},
|
||||
// )
|
||||
@@ -52,7 +60,7 @@ func (a *Agent) initializeDiskInfo() {
|
||||
// Helper function to add a filesystem to fsStats if it doesn't exist
|
||||
addFsStat := func(device, mountpoint string, root bool, customName ...string) {
|
||||
var key string
|
||||
if runtime.GOOS == "windows" {
|
||||
if isWindows {
|
||||
key = device
|
||||
} else {
|
||||
key = filepath.Base(device)
|
||||
@@ -87,6 +95,9 @@ func (a *Agent) initializeDiskInfo() {
|
||||
}
|
||||
}
|
||||
|
||||
// Get the appropriate root mount point for this system
|
||||
rootMountPoint := a.getRootMountPoint()
|
||||
|
||||
// Use FILESYSTEM env var to find root filesystem
|
||||
if filesystem != "" {
|
||||
for _, p := range partitions {
|
||||
@@ -130,7 +141,7 @@ func (a *Agent) initializeDiskInfo() {
|
||||
for _, p := range partitions {
|
||||
// fmt.Println(p.Device, p.Mountpoint)
|
||||
// Binary root fallback or docker root fallback
|
||||
if !hasRoot && (p.Mountpoint == "/" || (p.Mountpoint == "/etc/hosts" && strings.HasPrefix(p.Device, "/dev"))) {
|
||||
if !hasRoot && (p.Mountpoint == rootMountPoint || (p.Mountpoint == "/etc/hosts" && strings.HasPrefix(p.Device, "/dev"))) {
|
||||
fs, match := findIoDevice(filepath.Base(p.Device), diskIoCounters, a.fsStats)
|
||||
if match {
|
||||
addFsStat(fs, p.Mountpoint, true)
|
||||
@@ -166,8 +177,8 @@ func (a *Agent) initializeDiskInfo() {
|
||||
// If no root filesystem set, use fallback
|
||||
if !hasRoot {
|
||||
rootDevice, _ := findIoDevice(filepath.Base(filesystem), diskIoCounters, a.fsStats)
|
||||
slog.Info("Root disk", "mountpoint", "/", "io", rootDevice)
|
||||
a.fsStats[rootDevice] = &system.FsStats{Root: true, Mountpoint: "/"}
|
||||
slog.Info("Root disk", "mountpoint", rootMountPoint, "io", rootDevice)
|
||||
a.fsStats[rootDevice] = &system.FsStats{Root: true, Mountpoint: rootMountPoint}
|
||||
}
|
||||
|
||||
a.initializeDiskIoStats(diskIoCounters)
|
||||
@@ -214,8 +225,19 @@ func (a *Agent) initializeDiskIoStats(diskIoCounters map[string]disk.IOCountersS
|
||||
|
||||
// Updates disk usage statistics for all monitored filesystems
|
||||
func (a *Agent) updateDiskUsage(systemStats *system.Stats) {
|
||||
// Check if we should skip extra filesystem collection to avoid waking sleeping disks.
|
||||
// Root filesystem is always updated since it can't be sleeping while the agent runs.
|
||||
// Always collect on first call (lastDiskUsageUpdate is zero) or if caching is disabled.
|
||||
cacheExtraFs := a.diskUsageCacheDuration > 0 &&
|
||||
!a.lastDiskUsageUpdate.IsZero() &&
|
||||
time.Since(a.lastDiskUsageUpdate) < a.diskUsageCacheDuration
|
||||
|
||||
// disk usage
|
||||
for _, stats := range a.fsStats {
|
||||
// Skip non-root filesystems if caching is active
|
||||
if cacheExtraFs && !stats.Root {
|
||||
continue
|
||||
}
|
||||
if d, err := disk.Usage(stats.Mountpoint); err == nil {
|
||||
stats.DiskTotal = bytesToGigabytes(d.Total)
|
||||
stats.DiskUsed = bytesToGigabytes(d.Used)
|
||||
@@ -233,6 +255,11 @@ func (a *Agent) updateDiskUsage(systemStats *system.Stats) {
|
||||
stats.TotalWrite = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Update the last disk usage update time when we've collected extra filesystems
|
||||
if !cacheExtraFs {
|
||||
a.lastDiskUsageUpdate = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
// Updates disk I/O statistics for all monitored filesystems
|
||||
@@ -304,3 +331,32 @@ func (a *Agent) updateDiskIo(cacheTimeMs uint16, systemStats *system.Stats) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getRootMountPoint returns the appropriate root mount point for the system
|
||||
// For immutable systems like Fedora Silverblue, it returns /sysroot instead of /
|
||||
func (a *Agent) getRootMountPoint() string {
|
||||
// 1. Check if /etc/os-release contains indicators of an immutable system
|
||||
if osReleaseContent, err := os.ReadFile("/etc/os-release"); err == nil {
|
||||
content := string(osReleaseContent)
|
||||
if strings.Contains(content, "fedora") && strings.Contains(content, "silverblue") ||
|
||||
strings.Contains(content, "coreos") ||
|
||||
strings.Contains(content, "flatcar") ||
|
||||
strings.Contains(content, "rhel-atomic") ||
|
||||
strings.Contains(content, "centos-atomic") {
|
||||
// Verify that /sysroot exists before returning it
|
||||
if _, err := os.Stat("/sysroot"); err == nil {
|
||||
return "/sysroot"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Check if /run/ostree is present (ostree-based systems like Silverblue)
|
||||
if _, err := os.Stat("/run/ostree"); err == nil {
|
||||
// Verify that /sysroot exists before returning it
|
||||
if _, err := os.Stat("/sysroot"); err == nil {
|
||||
return "/sysroot"
|
||||
}
|
||||
}
|
||||
|
||||
return "/"
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
"github.com/shirou/gopsutil/v4/disk"
|
||||
@@ -233,3 +234,86 @@ func TestExtraFsKeyGeneration(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDiskUsageCaching(t *testing.T) {
|
||||
t.Run("caching disabled updates all filesystems", func(t *testing.T) {
|
||||
agent := &Agent{
|
||||
fsStats: map[string]*system.FsStats{
|
||||
"sda": {Root: true, Mountpoint: "/"},
|
||||
"sdb": {Root: false, Mountpoint: "/mnt/storage"},
|
||||
},
|
||||
diskUsageCacheDuration: 0, // caching disabled
|
||||
}
|
||||
|
||||
var stats system.Stats
|
||||
agent.updateDiskUsage(&stats)
|
||||
|
||||
// Both should be updated (non-zero values from disk.Usage)
|
||||
// Root stats should be populated in systemStats
|
||||
assert.True(t, agent.lastDiskUsageUpdate.IsZero() || !agent.lastDiskUsageUpdate.IsZero(),
|
||||
"lastDiskUsageUpdate should be set when caching is disabled")
|
||||
})
|
||||
|
||||
t.Run("caching enabled always updates root filesystem", func(t *testing.T) {
|
||||
agent := &Agent{
|
||||
fsStats: map[string]*system.FsStats{
|
||||
"sda": {Root: true, Mountpoint: "/", DiskTotal: 100, DiskUsed: 50},
|
||||
"sdb": {Root: false, Mountpoint: "/mnt/storage", DiskTotal: 200, DiskUsed: 100},
|
||||
},
|
||||
diskUsageCacheDuration: 1 * time.Hour,
|
||||
lastDiskUsageUpdate: time.Now(), // cache is fresh
|
||||
}
|
||||
|
||||
// Store original extra fs values
|
||||
originalExtraTotal := agent.fsStats["sdb"].DiskTotal
|
||||
originalExtraUsed := agent.fsStats["sdb"].DiskUsed
|
||||
|
||||
var stats system.Stats
|
||||
agent.updateDiskUsage(&stats)
|
||||
|
||||
// Root should be updated (systemStats populated from disk.Usage call)
|
||||
// We can't easily check if disk.Usage was called, but we verify the flow works
|
||||
|
||||
// Extra filesystem should retain cached values (not reset)
|
||||
assert.Equal(t, originalExtraTotal, agent.fsStats["sdb"].DiskTotal,
|
||||
"extra filesystem DiskTotal should be unchanged when cached")
|
||||
assert.Equal(t, originalExtraUsed, agent.fsStats["sdb"].DiskUsed,
|
||||
"extra filesystem DiskUsed should be unchanged when cached")
|
||||
})
|
||||
|
||||
t.Run("first call always updates all filesystems", func(t *testing.T) {
|
||||
agent := &Agent{
|
||||
fsStats: map[string]*system.FsStats{
|
||||
"sda": {Root: true, Mountpoint: "/"},
|
||||
"sdb": {Root: false, Mountpoint: "/mnt/storage"},
|
||||
},
|
||||
diskUsageCacheDuration: 1 * time.Hour,
|
||||
// lastDiskUsageUpdate is zero (first call)
|
||||
}
|
||||
|
||||
var stats system.Stats
|
||||
agent.updateDiskUsage(&stats)
|
||||
|
||||
// After first call, lastDiskUsageUpdate should be set
|
||||
assert.False(t, agent.lastDiskUsageUpdate.IsZero(),
|
||||
"lastDiskUsageUpdate should be set after first call")
|
||||
})
|
||||
|
||||
t.Run("expired cache updates extra filesystems", func(t *testing.T) {
|
||||
agent := &Agent{
|
||||
fsStats: map[string]*system.FsStats{
|
||||
"sda": {Root: true, Mountpoint: "/"},
|
||||
"sdb": {Root: false, Mountpoint: "/mnt/storage"},
|
||||
},
|
||||
diskUsageCacheDuration: 1 * time.Millisecond,
|
||||
lastDiskUsageUpdate: time.Now().Add(-1 * time.Second), // cache expired
|
||||
}
|
||||
|
||||
var stats system.Stats
|
||||
agent.updateDiskUsage(&stats)
|
||||
|
||||
// lastDiskUsageUpdate should be refreshed since cache expired
|
||||
assert.True(t, time.Since(agent.lastDiskUsageUpdate) < time.Second,
|
||||
"lastDiskUsageUpdate should be refreshed when cache expires")
|
||||
})
|
||||
}
|
||||
|
||||
119
agent/docker.go
119
agent/docker.go
@@ -13,6 +13,8 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -23,6 +25,10 @@ import (
|
||||
"github.com/blang/semver"
|
||||
)
|
||||
|
||||
// ansiEscapePattern matches ANSI escape sequences (colors, cursor movement, etc.)
|
||||
// This includes CSI sequences like \x1b[...m and simple escapes like \x1b[K
|
||||
var ansiEscapePattern = regexp.MustCompile(`\x1b\[[0-9;]*[a-zA-Z]|\x1b\][^\x07]*\x07|\x1b[@-Z\\-_]`)
|
||||
|
||||
const (
|
||||
// Docker API timeout in milliseconds
|
||||
dockerTimeoutMs = 2100
|
||||
@@ -32,6 +38,12 @@ const (
|
||||
maxMemoryUsage uint64 = 100 * 1024 * 1024 * 1024 * 1024
|
||||
// Number of log lines to request when fetching container logs
|
||||
dockerLogsTail = 200
|
||||
// Maximum size of a single log frame (1MB) to prevent memory exhaustion
|
||||
// A single log line larger than 1MB is likely an error or misconfiguration
|
||||
maxLogFrameSize = 1024 * 1024
|
||||
// Maximum total log content size (5MB) to prevent memory exhaustion
|
||||
// This provides a reasonable limit for network transfer and browser rendering
|
||||
maxTotalLogSize = 5 * 1024 * 1024
|
||||
)
|
||||
|
||||
type dockerManager struct {
|
||||
@@ -47,6 +59,8 @@ type dockerManager struct {
|
||||
buf *bytes.Buffer // Buffer to store and read response bodies
|
||||
decoder *json.Decoder // Reusable JSON decoder that reads from buf
|
||||
apiStats *container.ApiStats // Reusable API stats object
|
||||
excludeContainers []string // Patterns to exclude containers by name
|
||||
usingPodman bool // Whether the Docker Engine API is running on Podman
|
||||
|
||||
// Cache-time-aware tracking for CPU stats (similar to cpu.go)
|
||||
// Maps cache time intervals to container-specific CPU usage tracking
|
||||
@@ -88,6 +102,19 @@ func (d *dockerManager) dequeue() {
|
||||
}
|
||||
}
|
||||
|
||||
// shouldExcludeContainer checks if a container name matches any exclusion pattern
|
||||
func (dm *dockerManager) shouldExcludeContainer(name string) bool {
|
||||
if len(dm.excludeContainers) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, pattern := range dm.excludeContainers {
|
||||
if match, _ := path.Match(pattern, name); match {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns stats for all running containers with cache-time-aware delta tracking
|
||||
func (dm *dockerManager) getDockerStats(cacheTimeMs uint16) ([]*container.Stats, error) {
|
||||
resp, err := dm.client.Get("http://localhost/containers/json")
|
||||
@@ -115,6 +142,13 @@ func (dm *dockerManager) getDockerStats(cacheTimeMs uint16) ([]*container.Stats,
|
||||
|
||||
for _, ctr := range dm.apiContainerList {
|
||||
ctr.IdShort = ctr.Id[:12]
|
||||
|
||||
// Skip this container if it matches the exclusion pattern
|
||||
if dm.shouldExcludeContainer(ctr.Names[0][1:]) {
|
||||
slog.Debug("Excluding container", "name", ctr.Names[0][1:])
|
||||
continue
|
||||
}
|
||||
|
||||
dm.validIds[ctr.IdShort] = struct{}{}
|
||||
// check if container is less than 1 minute old (possible restart)
|
||||
// note: can't use Created field because it's not updated on restart
|
||||
@@ -301,6 +335,8 @@ func validateCpuPercentage(cpuPct float64, containerName string) error {
|
||||
func updateContainerStatsValues(stats *container.Stats, cpuPct float64, usedMemory uint64, sent_delta, recv_delta uint64, readTime time.Time) {
|
||||
stats.Cpu = twoDecimals(cpuPct)
|
||||
stats.Mem = bytesToMegabytes(float64(usedMemory))
|
||||
stats.Bandwidth = [2]uint64{sent_delta, recv_delta}
|
||||
// TODO(0.19+): stop populating NetworkSent/NetworkRecv (deprecated in 0.18.3)
|
||||
stats.NetworkSent = bytesToMegabytes(float64(sent_delta))
|
||||
stats.NetworkRecv = bytesToMegabytes(float64(recv_delta))
|
||||
stats.PrevReadTime = readTime
|
||||
@@ -369,6 +405,8 @@ func (dm *dockerManager) updateContainerStats(ctr *container.ApiInfo, cacheTimeM
|
||||
// reset current stats
|
||||
stats.Cpu = 0
|
||||
stats.Mem = 0
|
||||
stats.Bandwidth = [2]uint64{0, 0}
|
||||
// TODO(0.19+): stop populating NetworkSent/NetworkRecv (deprecated in 0.18.3)
|
||||
stats.NetworkSent = 0
|
||||
stats.NetworkRecv = 0
|
||||
|
||||
@@ -445,7 +483,7 @@ func (dm *dockerManager) deleteContainerStatsSync(id string) {
|
||||
}
|
||||
|
||||
// Creates a new http client for Docker or Podman API
|
||||
func newDockerManager(a *Agent) *dockerManager {
|
||||
func newDockerManager() *dockerManager {
|
||||
dockerHost, exists := GetEnv("DOCKER_HOST")
|
||||
if exists {
|
||||
// return nil if set to empty string
|
||||
@@ -497,6 +535,19 @@ func newDockerManager(a *Agent) *dockerManager {
|
||||
userAgent: "Docker-Client/",
|
||||
}
|
||||
|
||||
// Read container exclusion patterns from environment variable
|
||||
var excludeContainers []string
|
||||
if excludeStr, set := GetEnv("EXCLUDE_CONTAINERS"); set && excludeStr != "" {
|
||||
parts := strings.SplitSeq(excludeStr, ",")
|
||||
for part := range parts {
|
||||
trimmed := strings.TrimSpace(part)
|
||||
if trimmed != "" {
|
||||
excludeContainers = append(excludeContainers, trimmed)
|
||||
}
|
||||
}
|
||||
slog.Info("EXCLUDE_CONTAINERS", "patterns", excludeContainers)
|
||||
}
|
||||
|
||||
manager := &dockerManager{
|
||||
client: &http.Client{
|
||||
Timeout: timeout,
|
||||
@@ -506,6 +557,7 @@ func newDockerManager(a *Agent) *dockerManager {
|
||||
sem: make(chan struct{}, 5),
|
||||
apiContainerList: []*container.ApiInfo{},
|
||||
apiStats: &container.ApiStats{},
|
||||
excludeContainers: excludeContainers,
|
||||
|
||||
// Initialize cache-time-aware tracking structures
|
||||
lastCpuContainer: make(map[uint16]map[string]uint64),
|
||||
@@ -517,7 +569,7 @@ func newDockerManager(a *Agent) *dockerManager {
|
||||
|
||||
// If using podman, return client
|
||||
if strings.Contains(dockerHost, "podman") {
|
||||
a.systemInfo.Podman = true
|
||||
manager.usingPodman = true
|
||||
manager.goodDockerVersion = true
|
||||
return manager
|
||||
}
|
||||
@@ -646,17 +698,27 @@ func (dm *dockerManager) getLogs(ctx context.Context, containerID string) (strin
|
||||
}
|
||||
|
||||
var builder strings.Builder
|
||||
if err := decodeDockerLogStream(resp.Body, &builder); err != nil {
|
||||
multiplexed := resp.Header.Get("Content-Type") == "application/vnd.docker.multiplexed-stream"
|
||||
if err := decodeDockerLogStream(resp.Body, &builder, multiplexed); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return builder.String(), nil
|
||||
// Strip ANSI escape sequences from logs for clean display in web UI
|
||||
logs := builder.String()
|
||||
if strings.Contains(logs, "\x1b") {
|
||||
logs = ansiEscapePattern.ReplaceAllString(logs, "")
|
||||
}
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
func decodeDockerLogStream(reader io.Reader, builder *strings.Builder) error {
|
||||
func decodeDockerLogStream(reader io.Reader, builder *strings.Builder, multiplexed bool) error {
|
||||
if !multiplexed {
|
||||
_, err := io.Copy(builder, io.LimitReader(reader, maxTotalLogSize))
|
||||
return err
|
||||
}
|
||||
const headerSize = 8
|
||||
var header [headerSize]byte
|
||||
buf := make([]byte, 0, dockerLogsTail*200)
|
||||
totalBytesRead := 0
|
||||
|
||||
for {
|
||||
if _, err := io.ReadFull(reader, header[:]); err != nil {
|
||||
@@ -671,30 +733,45 @@ func decodeDockerLogStream(reader io.Reader, builder *strings.Builder) error {
|
||||
continue
|
||||
}
|
||||
|
||||
buf = allocateBuffer(buf, int(frameLen))
|
||||
if _, err := io.ReadFull(reader, buf[:frameLen]); err != nil {
|
||||
// Prevent memory exhaustion from excessively large frames
|
||||
if frameLen > maxLogFrameSize {
|
||||
return fmt.Errorf("log frame size (%d) exceeds maximum (%d)", frameLen, maxLogFrameSize)
|
||||
}
|
||||
|
||||
// Check if reading this frame would exceed total log size limit
|
||||
if totalBytesRead+int(frameLen) > maxTotalLogSize {
|
||||
// Read and discard remaining data to avoid blocking
|
||||
_, _ = io.CopyN(io.Discard, reader, int64(frameLen))
|
||||
slog.Debug("Truncating logs: limit reached", "read", totalBytesRead, "limit", maxTotalLogSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
n, err := io.CopyN(builder, reader, int64(frameLen))
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
if len(buf) > 0 {
|
||||
builder.Write(buf[:min(int(frameLen), len(buf))])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
builder.Write(buf[:frameLen])
|
||||
totalBytesRead += int(n)
|
||||
}
|
||||
}
|
||||
|
||||
func allocateBuffer(current []byte, needed int) []byte {
|
||||
if cap(current) >= needed {
|
||||
return current[:needed]
|
||||
// GetHostInfo fetches the system info from Docker
|
||||
func (dm *dockerManager) GetHostInfo() (info container.HostInfo, err error) {
|
||||
resp, err := dm.client.Get("http://localhost/info")
|
||||
if err != nil {
|
||||
return info, err
|
||||
}
|
||||
return make([]byte, needed)
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&info); err != nil {
|
||||
return info, err
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
func (dm *dockerManager) IsPodman() bool {
|
||||
return dm.usingPodman
|
||||
}
|
||||
|
||||
@@ -4,8 +4,10 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -182,11 +184,12 @@ func TestUpdateContainerStatsValues(t *testing.T) {
|
||||
// Check memory (should be converted to MB: 1048576 bytes = 1 MB)
|
||||
assert.Equal(t, 1.0, stats.Mem)
|
||||
|
||||
// Check network sent (should be converted to MB: 524288 bytes = 0.5 MB)
|
||||
assert.Equal(t, 0.5, stats.NetworkSent)
|
||||
// Check bandwidth (raw bytes)
|
||||
assert.Equal(t, [2]uint64{524288, 262144}, stats.Bandwidth)
|
||||
|
||||
// Check network recv (should be converted to MB: 262144 bytes = 0.25 MB)
|
||||
assert.Equal(t, 0.25, stats.NetworkRecv)
|
||||
// Deprecated fields still populated for backward compatibility with older hubs
|
||||
assert.Equal(t, 0.5, stats.NetworkSent) // 524288 bytes = 0.5 MB
|
||||
assert.Equal(t, 0.25, stats.NetworkRecv) // 262144 bytes = 0.25 MB
|
||||
|
||||
// Check read time
|
||||
assert.Equal(t, testTime, stats.PrevReadTime)
|
||||
@@ -525,8 +528,10 @@ func TestContainerStatsInitialization(t *testing.T) {
|
||||
|
||||
assert.Equal(t, 45.67, stats.Cpu)
|
||||
assert.Equal(t, 2.0, stats.Mem)
|
||||
assert.Equal(t, 1.0, stats.NetworkSent)
|
||||
assert.Equal(t, 0.5, stats.NetworkRecv)
|
||||
assert.Equal(t, [2]uint64{1048576, 524288}, stats.Bandwidth)
|
||||
// Deprecated fields still populated for backward compatibility with older hubs
|
||||
assert.Equal(t, 1.0, stats.NetworkSent) // 1048576 bytes = 1 MB
|
||||
assert.Equal(t, 0.5, stats.NetworkRecv) // 524288 bytes = 0.5 MB
|
||||
assert.Equal(t, testTime, stats.PrevReadTime)
|
||||
}
|
||||
|
||||
@@ -687,6 +692,8 @@ func TestContainerStatsEndToEndWithRealData(t *testing.T) {
|
||||
|
||||
assert.Equal(t, cpuPct, testStats.Cpu)
|
||||
assert.Equal(t, bytesToMegabytes(float64(usedMemory)), testStats.Mem)
|
||||
assert.Equal(t, [2]uint64{1000000, 500000}, testStats.Bandwidth)
|
||||
// Deprecated fields still populated for backward compatibility with older hubs
|
||||
assert.Equal(t, bytesToMegabytes(1000000), testStats.NetworkSent)
|
||||
assert.Equal(t, bytesToMegabytes(500000), testStats.NetworkRecv)
|
||||
assert.Equal(t, testTime, testStats.PrevReadTime)
|
||||
@@ -800,6 +807,24 @@ func TestNetworkRateCalculationFormula(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetHostInfo(t *testing.T) {
|
||||
data, err := os.ReadFile("test-data/system_info.json")
|
||||
require.NoError(t, err)
|
||||
|
||||
var info container.HostInfo
|
||||
err = json.Unmarshal(data, &info)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "6.8.0-31-generic", info.KernelVersion)
|
||||
assert.Equal(t, "Ubuntu 24.04 LTS", info.OperatingSystem)
|
||||
// assert.Equal(t, "24.04", info.OSVersion)
|
||||
// assert.Equal(t, "linux", info.OSType)
|
||||
// assert.Equal(t, "x86_64", info.Architecture)
|
||||
assert.EqualValues(t, 4, info.NCPU)
|
||||
assert.EqualValues(t, 2095882240, info.MemTotal)
|
||||
// assert.Equal(t, "27.0.1", info.ServerVersion)
|
||||
}
|
||||
|
||||
func TestDeltaTrackerCacheTimeIsolation(t *testing.T) {
|
||||
// Test that different cache times have separate DeltaTracker instances
|
||||
dm := &dockerManager{
|
||||
@@ -911,6 +936,8 @@ func TestConstantsAndUtilityFunctions(t *testing.T) {
|
||||
assert.Equal(t, uint16(60000), defaultCacheTimeMs)
|
||||
assert.Equal(t, uint64(5e9), maxNetworkSpeedBps)
|
||||
assert.Equal(t, 2100, dockerTimeoutMs)
|
||||
assert.Equal(t, uint32(1024*1024), uint32(maxLogFrameSize)) // 1MB
|
||||
assert.Equal(t, 5*1024*1024, maxTotalLogSize) // 5MB
|
||||
|
||||
// Test utility functions
|
||||
assert.Equal(t, 1.5, twoDecimals(1.499))
|
||||
@@ -921,3 +948,301 @@ func TestConstantsAndUtilityFunctions(t *testing.T) {
|
||||
assert.Equal(t, 0.5, bytesToMegabytes(524288)) // 512 KB
|
||||
assert.Equal(t, 0.0, bytesToMegabytes(0))
|
||||
}
|
||||
|
||||
func TestDecodeDockerLogStream(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input []byte
|
||||
expected string
|
||||
expectError bool
|
||||
multiplexed bool
|
||||
}{
|
||||
{
|
||||
name: "simple log entry",
|
||||
input: []byte{
|
||||
// Frame 1: stdout, 11 bytes
|
||||
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0B,
|
||||
'H', 'e', 'l', 'l', 'o', ' ', 'W', 'o', 'r', 'l', 'd',
|
||||
},
|
||||
expected: "Hello World",
|
||||
expectError: false,
|
||||
multiplexed: true,
|
||||
},
|
||||
{
|
||||
name: "multiple frames",
|
||||
input: []byte{
|
||||
// Frame 1: stdout, 5 bytes
|
||||
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
|
||||
'H', 'e', 'l', 'l', 'o',
|
||||
// Frame 2: stdout, 5 bytes
|
||||
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
|
||||
'W', 'o', 'r', 'l', 'd',
|
||||
},
|
||||
expected: "HelloWorld",
|
||||
expectError: false,
|
||||
multiplexed: true,
|
||||
},
|
||||
{
|
||||
name: "zero length frame",
|
||||
input: []byte{
|
||||
// Frame 1: stdout, 0 bytes
|
||||
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
// Frame 2: stdout, 5 bytes
|
||||
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
|
||||
'H', 'e', 'l', 'l', 'o',
|
||||
},
|
||||
expected: "Hello",
|
||||
expectError: false,
|
||||
multiplexed: true,
|
||||
},
|
||||
{
|
||||
name: "empty input",
|
||||
input: []byte{},
|
||||
expected: "",
|
||||
expectError: false,
|
||||
multiplexed: true,
|
||||
},
|
||||
{
|
||||
name: "raw stream (not multiplexed)",
|
||||
input: []byte("raw log content"),
|
||||
expected: "raw log content",
|
||||
multiplexed: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
reader := bytes.NewReader(tt.input)
|
||||
var builder strings.Builder
|
||||
err := decodeDockerLogStream(reader, &builder, tt.multiplexed)
|
||||
|
||||
if tt.expectError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expected, builder.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeDockerLogStreamMemoryProtection(t *testing.T) {
|
||||
t.Run("excessively large frame should error", func(t *testing.T) {
|
||||
// Create a frame with size exceeding maxLogFrameSize
|
||||
excessiveSize := uint32(maxLogFrameSize + 1)
|
||||
input := []byte{
|
||||
// Frame header with excessive size
|
||||
0x01, 0x00, 0x00, 0x00,
|
||||
byte(excessiveSize >> 24), byte(excessiveSize >> 16), byte(excessiveSize >> 8), byte(excessiveSize),
|
||||
}
|
||||
|
||||
reader := bytes.NewReader(input)
|
||||
var builder strings.Builder
|
||||
err := decodeDockerLogStream(reader, &builder, true)
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "log frame size")
|
||||
assert.Contains(t, err.Error(), "exceeds maximum")
|
||||
})
|
||||
|
||||
t.Run("total size limit should truncate", func(t *testing.T) {
|
||||
// Create frames that exceed maxTotalLogSize (5MB)
|
||||
// Use frames within maxLogFrameSize (1MB) to avoid single-frame rejection
|
||||
frameSize := uint32(800 * 1024) // 800KB per frame
|
||||
var input []byte
|
||||
|
||||
// Frames 1-6: 800KB each (total 4.8MB - within 5MB limit)
|
||||
for i := 0; i < 6; i++ {
|
||||
char := byte('A' + i)
|
||||
frameHeader := []byte{
|
||||
0x01, 0x00, 0x00, 0x00,
|
||||
byte(frameSize >> 24), byte(frameSize >> 16), byte(frameSize >> 8), byte(frameSize),
|
||||
}
|
||||
input = append(input, frameHeader...)
|
||||
input = append(input, bytes.Repeat([]byte{char}, int(frameSize))...)
|
||||
}
|
||||
|
||||
// Frame 7: 800KB (would bring total to 5.6MB, exceeding 5MB limit - should be truncated)
|
||||
frame7Header := []byte{
|
||||
0x01, 0x00, 0x00, 0x00,
|
||||
byte(frameSize >> 24), byte(frameSize >> 16), byte(frameSize >> 8), byte(frameSize),
|
||||
}
|
||||
input = append(input, frame7Header...)
|
||||
input = append(input, bytes.Repeat([]byte{'Z'}, int(frameSize))...)
|
||||
|
||||
reader := bytes.NewReader(input)
|
||||
var builder strings.Builder
|
||||
err := decodeDockerLogStream(reader, &builder, true)
|
||||
|
||||
// Should complete without error (graceful truncation)
|
||||
assert.NoError(t, err)
|
||||
// Should have read 6 frames (4.8MB total, stopping before 7th would exceed 5MB limit)
|
||||
expectedSize := int(frameSize) * 6
|
||||
assert.Equal(t, expectedSize, builder.Len())
|
||||
// Should contain A-F but not Z
|
||||
result := builder.String()
|
||||
assert.Contains(t, result, "A")
|
||||
assert.Contains(t, result, "F")
|
||||
assert.NotContains(t, result, "Z")
|
||||
})
|
||||
}
|
||||
|
||||
func TestShouldExcludeContainer(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
containerName string
|
||||
patterns []string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "empty patterns excludes nothing",
|
||||
containerName: "any-container",
|
||||
patterns: []string{},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "exact match - excluded",
|
||||
containerName: "test-web",
|
||||
patterns: []string{"test-web", "test-api"},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "exact match - not excluded",
|
||||
containerName: "prod-web",
|
||||
patterns: []string{"test-web", "test-api"},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "wildcard prefix match - excluded",
|
||||
containerName: "test-web",
|
||||
patterns: []string{"test-*"},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "wildcard prefix match - not excluded",
|
||||
containerName: "prod-web",
|
||||
patterns: []string{"test-*"},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "wildcard suffix match - excluded",
|
||||
containerName: "myapp-staging",
|
||||
patterns: []string{"*-staging"},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "wildcard suffix match - not excluded",
|
||||
containerName: "myapp-prod",
|
||||
patterns: []string{"*-staging"},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "wildcard both sides match - excluded",
|
||||
containerName: "test-myapp-staging",
|
||||
patterns: []string{"*-myapp-*"},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "wildcard both sides match - not excluded",
|
||||
containerName: "prod-yourapp-live",
|
||||
patterns: []string{"*-myapp-*"},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "multiple patterns - matches first",
|
||||
containerName: "test-container",
|
||||
patterns: []string{"test-*", "*-staging"},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "multiple patterns - matches second",
|
||||
containerName: "myapp-staging",
|
||||
patterns: []string{"test-*", "*-staging"},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "multiple patterns - no match",
|
||||
containerName: "prod-web",
|
||||
patterns: []string{"test-*", "*-staging"},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "mixed exact and wildcard - exact match",
|
||||
containerName: "temp-container",
|
||||
patterns: []string{"temp-container", "test-*"},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "mixed exact and wildcard - wildcard match",
|
||||
containerName: "test-web",
|
||||
patterns: []string{"temp-container", "test-*"},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dm := &dockerManager{
|
||||
excludeContainers: tt.patterns,
|
||||
}
|
||||
result := dm.shouldExcludeContainer(tt.containerName)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAnsiEscapePattern(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "no ANSI codes",
|
||||
input: "Hello, World!",
|
||||
expected: "Hello, World!",
|
||||
},
|
||||
{
|
||||
name: "simple color code",
|
||||
input: "\x1b[34mINFO\x1b[0m client mode",
|
||||
expected: "INFO client mode",
|
||||
},
|
||||
{
|
||||
name: "multiple color codes",
|
||||
input: "\x1b[31mERROR\x1b[0m: \x1b[33mWarning\x1b[0m message",
|
||||
expected: "ERROR: Warning message",
|
||||
},
|
||||
{
|
||||
name: "bold and color",
|
||||
input: "\x1b[1;32mSUCCESS\x1b[0m",
|
||||
expected: "SUCCESS",
|
||||
},
|
||||
{
|
||||
name: "cursor movement codes",
|
||||
input: "Line 1\x1b[KLine 2",
|
||||
expected: "Line 1Line 2",
|
||||
},
|
||||
{
|
||||
name: "256 color code",
|
||||
input: "\x1b[38;5;196mRed text\x1b[0m",
|
||||
expected: "Red text",
|
||||
},
|
||||
{
|
||||
name: "RGB/truecolor code",
|
||||
input: "\x1b[38;2;255;0;0mRed text\x1b[0m",
|
||||
expected: "Red text",
|
||||
},
|
||||
{
|
||||
name: "mixed content with newlines",
|
||||
input: "\x1b[34m2024-01-01 12:00:00\x1b[0m INFO Starting\n\x1b[31m2024-01-01 12:00:01\x1b[0m ERROR Failed",
|
||||
expected: "2024-01-01 12:00:00 INFO Starting\n2024-01-01 12:00:01 ERROR Failed",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := ansiEscapePattern.ReplaceAllString(tt.input, "")
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
70
agent/gpu.go
70
agent/gpu.go
@@ -5,6 +5,7 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"maps"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
@@ -14,14 +15,13 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
|
||||
"golang.org/x/exp/slog"
|
||||
)
|
||||
|
||||
const (
|
||||
// Commands
|
||||
nvidiaSmiCmd string = "nvidia-smi"
|
||||
rocmSmiCmd string = "rocm-smi"
|
||||
amdgpuCmd string = "amdgpu" // internal cmd for sysfs collection
|
||||
tegraStatsCmd string = "tegrastats"
|
||||
|
||||
// Polling intervals
|
||||
@@ -42,8 +42,10 @@ type GPUManager struct {
|
||||
sync.Mutex
|
||||
nvidiaSmi bool
|
||||
rocmSmi bool
|
||||
amdgpu bool
|
||||
tegrastats bool
|
||||
intelGpuStats bool
|
||||
nvml bool
|
||||
GpuDataMap map[string]*system.GPUData
|
||||
// lastAvgData stores the last calculated averages for each GPU
|
||||
// Used when a collection happens before new data arrives (Count == 0)
|
||||
@@ -136,10 +138,10 @@ func (gm *GPUManager) getJetsonParser() func(output []byte) bool {
|
||||
// use closure to avoid recompiling the regex
|
||||
ramPattern := regexp.MustCompile(`RAM (\d+)/(\d+)MB`)
|
||||
gr3dPattern := regexp.MustCompile(`GR3D_FREQ (\d+)%`)
|
||||
tempPattern := regexp.MustCompile(`tj@(\d+\.?\d*)C`)
|
||||
tempPattern := regexp.MustCompile(`(?:tj|GPU)@(\d+\.?\d*)C`)
|
||||
// Orin Nano / NX do not have GPU specific power monitor
|
||||
// TODO: Maybe use VDD_IN for Nano / NX and add a total system power chart
|
||||
powerPattern := regexp.MustCompile(`(GPU_SOC|CPU_GPU_CV) (\d+)mW`)
|
||||
powerPattern := regexp.MustCompile(`(GPU_SOC|CPU_GPU_CV)\s+(\d+)mW|VDD_SYS_GPU\s+(\d+)/\d+`)
|
||||
|
||||
// jetson devices have only one gpu so we'll just initialize here
|
||||
gpuData := &system.GPUData{Name: "GPU"}
|
||||
@@ -168,7 +170,13 @@ func (gm *GPUManager) getJetsonParser() func(output []byte) bool {
|
||||
// Parse power usage
|
||||
powerMatches := powerPattern.FindSubmatch(output)
|
||||
if powerMatches != nil {
|
||||
power, _ := strconv.ParseFloat(string(powerMatches[2]), 64)
|
||||
// powerMatches[2] is the "(GPU_SOC|CPU_GPU_CV) <N>mW" capture
|
||||
// powerMatches[3] is the "VDD_SYS_GPU <N>/<N>" capture
|
||||
powerStr := string(powerMatches[2])
|
||||
if powerStr == "" {
|
||||
powerStr = string(powerMatches[3])
|
||||
}
|
||||
power, _ := strconv.ParseFloat(powerStr, 64)
|
||||
gpuData.Power += power / milliwattsInAWatt
|
||||
}
|
||||
gpuData.Count++
|
||||
@@ -231,10 +239,11 @@ func (gm *GPUManager) parseAmdData(output []byte) bool {
|
||||
totalMemory, _ := strconv.ParseFloat(v.MemoryTotal, 64)
|
||||
usage, _ := strconv.ParseFloat(v.Usage, 64)
|
||||
|
||||
if _, ok := gm.GpuDataMap[v.ID]; !ok {
|
||||
gm.GpuDataMap[v.ID] = &system.GPUData{Name: v.Name}
|
||||
id := v.ID
|
||||
if _, ok := gm.GpuDataMap[id]; !ok {
|
||||
gm.GpuDataMap[id] = &system.GPUData{Name: v.Name}
|
||||
}
|
||||
gpu := gm.GpuDataMap[v.ID]
|
||||
gpu := gm.GpuDataMap[id]
|
||||
gpu.Temperature, _ = strconv.ParseFloat(v.Temperature, 64)
|
||||
gpu.MemoryUsed = bytesToMegabytes(memoryUsage)
|
||||
gpu.MemoryTotal = bytesToMegabytes(totalMemory)
|
||||
@@ -297,8 +306,13 @@ func (gm *GPUManager) calculateGPUAverage(id string, gpu *system.GPUData, cacheK
|
||||
currentCount := uint32(gpu.Count)
|
||||
deltaCount := gm.calculateDeltaCount(currentCount, lastSnapshot)
|
||||
|
||||
// If no new data arrived, use last known average
|
||||
// If no new data arrived
|
||||
if deltaCount == 0 {
|
||||
// If GPU appears suspended (instantaneous values are 0), return zero values
|
||||
// Otherwise return last known average for temporary collection gaps
|
||||
if gpu.Temperature == 0 && gpu.MemoryUsed == 0 {
|
||||
return system.GPUData{Name: gpu.Name}
|
||||
}
|
||||
return gm.lastAvgData[id] // zero value if not found
|
||||
}
|
||||
|
||||
@@ -387,7 +401,13 @@ func (gm *GPUManager) detectGPUs() error {
|
||||
gm.nvidiaSmi = true
|
||||
}
|
||||
if _, err := exec.LookPath(rocmSmiCmd); err == nil {
|
||||
gm.rocmSmi = true
|
||||
if val, _ := GetEnv("AMD_SYSFS"); val == "true" {
|
||||
gm.amdgpu = true
|
||||
} else {
|
||||
gm.rocmSmi = true
|
||||
}
|
||||
} else if gm.hasAmdSysfs() {
|
||||
gm.amdgpu = true
|
||||
}
|
||||
if _, err := exec.LookPath(tegraStatsCmd); err == nil {
|
||||
gm.tegrastats = true
|
||||
@@ -396,10 +416,10 @@ func (gm *GPUManager) detectGPUs() error {
|
||||
if _, err := exec.LookPath(intelGpuStatsCmd); err == nil {
|
||||
gm.intelGpuStats = true
|
||||
}
|
||||
if gm.nvidiaSmi || gm.rocmSmi || gm.tegrastats || gm.intelGpuStats {
|
||||
if gm.nvidiaSmi || gm.rocmSmi || gm.amdgpu || gm.tegrastats || gm.intelGpuStats || gm.nvml {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("no GPU found - install nvidia-smi, rocm-smi, tegrastats, or intel_gpu_top")
|
||||
return fmt.Errorf("no GPU found - install nvidia-smi, rocm-smi, or intel_gpu_top")
|
||||
}
|
||||
|
||||
// startCollector starts the appropriate GPU data collector based on the command
|
||||
@@ -436,6 +456,12 @@ func (gm *GPUManager) startCollector(command string) {
|
||||
collector.cmdArgs = []string{"--interval", tegraStatsInterval}
|
||||
collector.parse = gm.getJetsonParser()
|
||||
go collector.start()
|
||||
case amdgpuCmd:
|
||||
go func() {
|
||||
if err := gm.collectAmdStats(); err != nil {
|
||||
slog.Warn("Error collecting AMD GPU data via sysfs", "err", err)
|
||||
}
|
||||
}()
|
||||
case rocmSmiCmd:
|
||||
collector.cmdArgs = []string{"--showid", "--showtemp", "--showuse", "--showpower", "--showproductname", "--showmeminfo", "vram", "--json"}
|
||||
collector.parse = gm.parseAmdData
|
||||
@@ -447,7 +473,7 @@ func (gm *GPUManager) startCollector(command string) {
|
||||
if failures > maxFailureRetries {
|
||||
break
|
||||
}
|
||||
slog.Warn("Error collecting AMD GPU data", "err", err)
|
||||
slog.Warn("Error collecting AMD GPU data via rocm-smi", "err", err)
|
||||
}
|
||||
time.Sleep(rocmSmiInterval)
|
||||
}
|
||||
@@ -467,11 +493,27 @@ func NewGPUManager() (*GPUManager, error) {
|
||||
gm.GpuDataMap = make(map[string]*system.GPUData)
|
||||
|
||||
if gm.nvidiaSmi {
|
||||
gm.startCollector(nvidiaSmiCmd)
|
||||
if nvml, _ := GetEnv("NVML"); nvml == "true" {
|
||||
gm.nvml = true
|
||||
gm.nvidiaSmi = false
|
||||
collector := &nvmlCollector{gm: &gm}
|
||||
if err := collector.init(); err == nil {
|
||||
go collector.start()
|
||||
} else {
|
||||
slog.Warn("Failed to initialize NVML, falling back to nvidia-smi", "err", err)
|
||||
gm.nvidiaSmi = true
|
||||
gm.startCollector(nvidiaSmiCmd)
|
||||
}
|
||||
} else {
|
||||
gm.startCollector(nvidiaSmiCmd)
|
||||
}
|
||||
}
|
||||
if gm.rocmSmi {
|
||||
gm.startCollector(rocmSmiCmd)
|
||||
}
|
||||
if gm.amdgpu {
|
||||
gm.startCollector(amdgpuCmd)
|
||||
}
|
||||
if gm.tegrastats {
|
||||
gm.startCollector(tegraStatsCmd)
|
||||
}
|
||||
|
||||
184
agent/gpu_amd_linux.go
Normal file
184
agent/gpu_amd_linux.go
Normal file
@@ -0,0 +1,184 @@
|
||||
//go:build linux
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
)
|
||||
|
||||
// hasAmdSysfs returns true if any AMD GPU sysfs nodes are found
|
||||
func (gm *GPUManager) hasAmdSysfs() bool {
|
||||
cards, err := filepath.Glob("/sys/class/drm/card*/device/vendor")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
for _, vendorPath := range cards {
|
||||
vendor, err := os.ReadFile(vendorPath)
|
||||
if err == nil && strings.TrimSpace(string(vendor)) == "0x1002" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// collectAmdStats collects AMD GPU metrics directly from sysfs to avoid the overhead of rocm-smi
|
||||
func (gm *GPUManager) collectAmdStats() error {
|
||||
cards, err := filepath.Glob("/sys/class/drm/card*")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var amdGpuPaths []string
|
||||
for _, card := range cards {
|
||||
// Ignore symbolic links and non-main card directories
|
||||
if strings.Contains(filepath.Base(card), "-") || !isAmdGpu(card) {
|
||||
continue
|
||||
}
|
||||
amdGpuPaths = append(amdGpuPaths, card)
|
||||
}
|
||||
|
||||
if len(amdGpuPaths) == 0 {
|
||||
return errNoValidData
|
||||
}
|
||||
|
||||
slog.Debug("Using sysfs for AMD GPU data collection")
|
||||
|
||||
failures := 0
|
||||
for {
|
||||
hasData := false
|
||||
for _, cardPath := range amdGpuPaths {
|
||||
if gm.updateAmdGpuData(cardPath) {
|
||||
hasData = true
|
||||
}
|
||||
}
|
||||
if !hasData {
|
||||
failures++
|
||||
if failures > maxFailureRetries {
|
||||
return errNoValidData
|
||||
}
|
||||
slog.Warn("No AMD GPU data from sysfs", "failures", failures)
|
||||
time.Sleep(retryWaitTime)
|
||||
continue
|
||||
}
|
||||
failures = 0
|
||||
time.Sleep(rocmSmiInterval)
|
||||
}
|
||||
}
|
||||
|
||||
func isAmdGpu(cardPath string) bool {
|
||||
vendorPath := filepath.Join(cardPath, "device/vendor")
|
||||
vendor, err := os.ReadFile(vendorPath)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return strings.TrimSpace(string(vendor)) == "0x1002"
|
||||
}
|
||||
|
||||
// updateAmdGpuData reads GPU metrics from sysfs and updates the GPU data map.
|
||||
// Returns true if at least some data was successfully read.
|
||||
func (gm *GPUManager) updateAmdGpuData(cardPath string) bool {
|
||||
devicePath := filepath.Join(cardPath, "device")
|
||||
id := filepath.Base(cardPath)
|
||||
|
||||
// Read all sysfs values first (no lock needed - these can be slow)
|
||||
usage, usageErr := readSysfsFloat(filepath.Join(devicePath, "gpu_busy_percent"))
|
||||
memUsed, memUsedErr := readSysfsFloat(filepath.Join(devicePath, "mem_info_vram_used"))
|
||||
memTotal, _ := readSysfsFloat(filepath.Join(devicePath, "mem_info_vram_total"))
|
||||
|
||||
var temp, power float64
|
||||
hwmons, _ := filepath.Glob(filepath.Join(devicePath, "hwmon/hwmon*"))
|
||||
for _, hwmonDir := range hwmons {
|
||||
if t, err := readSysfsFloat(filepath.Join(hwmonDir, "temp1_input")); err == nil {
|
||||
temp = t / 1000.0
|
||||
}
|
||||
if p, err := readSysfsFloat(filepath.Join(hwmonDir, "power1_average")); err == nil {
|
||||
power += p / 1000000.0
|
||||
} else if p, err := readSysfsFloat(filepath.Join(hwmonDir, "power1_input")); err == nil {
|
||||
power += p / 1000000.0
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we got any meaningful data
|
||||
if usageErr != nil && memUsedErr != nil && temp == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Single lock to update all values atomically
|
||||
gm.Lock()
|
||||
defer gm.Unlock()
|
||||
|
||||
gpu, ok := gm.GpuDataMap[id]
|
||||
if !ok {
|
||||
gpu = &system.GPUData{Name: getAmdGpuName(devicePath)}
|
||||
gm.GpuDataMap[id] = gpu
|
||||
}
|
||||
|
||||
if usageErr == nil {
|
||||
gpu.Usage += usage
|
||||
}
|
||||
gpu.MemoryUsed = bytesToMegabytes(memUsed)
|
||||
gpu.MemoryTotal = bytesToMegabytes(memTotal)
|
||||
gpu.Temperature = temp
|
||||
gpu.Power += power
|
||||
gpu.Count++
|
||||
return true
|
||||
}
|
||||
|
||||
func readSysfsFloat(path string) (float64, error) {
|
||||
val, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.ParseFloat(strings.TrimSpace(string(val)), 64)
|
||||
}
|
||||
|
||||
// getAmdGpuName attempts to get a descriptive GPU name.
|
||||
// First tries product_name (rarely available), then looks up the PCI device ID.
|
||||
// Falls back to showing the raw device ID if not found in the lookup table.
|
||||
func getAmdGpuName(devicePath string) string {
|
||||
// Try product_name first (works for some enterprise GPUs)
|
||||
if prod, err := os.ReadFile(filepath.Join(devicePath, "product_name")); err == nil {
|
||||
return strings.TrimSpace(string(prod))
|
||||
}
|
||||
|
||||
// Read PCI device ID and look it up
|
||||
if deviceID, err := os.ReadFile(filepath.Join(devicePath, "device")); err == nil {
|
||||
id := strings.TrimPrefix(strings.ToLower(strings.TrimSpace(string(deviceID))), "0x")
|
||||
if name, ok := getRadeonNames()[id]; ok {
|
||||
return fmt.Sprintf("Radeon %s", name)
|
||||
}
|
||||
return fmt.Sprintf("AMD GPU (%s)", id)
|
||||
}
|
||||
|
||||
return "AMD GPU"
|
||||
}
|
||||
|
||||
// getRadeonNames returns the AMD GPU name lookup table
|
||||
// Device IDs from https://pci-ids.ucw.cz/read/PC/1002
|
||||
var getRadeonNames = sync.OnceValue(func() map[string]string {
|
||||
return map[string]string{
|
||||
"7550": "RX 9070",
|
||||
"7590": "RX 9060 XT",
|
||||
"7551": "AI PRO R9700",
|
||||
|
||||
"744c": "RX 7900",
|
||||
|
||||
"1681": "680M",
|
||||
|
||||
"7448": "PRO W7900",
|
||||
"745e": "PRO W7800",
|
||||
"7470": "PRO W7700",
|
||||
"73e3": "PRO W6600",
|
||||
"7422": "PRO W6400",
|
||||
"7341": "PRO W5500",
|
||||
}
|
||||
})
|
||||
15
agent/gpu_amd_unsupported.go
Normal file
15
agent/gpu_amd_unsupported.go
Normal file
@@ -0,0 +1,15 @@
|
||||
//go:build !linux
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
func (gm *GPUManager) hasAmdSysfs() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (gm *GPUManager) collectAmdStats() error {
|
||||
return errors.ErrUnsupported
|
||||
}
|
||||
@@ -27,10 +27,11 @@ func (gm *GPUManager) updateIntelFromStats(sample *intelGpuStats) bool {
|
||||
defer gm.Unlock()
|
||||
|
||||
// only one gpu for now - cmd doesn't provide all by default
|
||||
gpuData, ok := gm.GpuDataMap["0"]
|
||||
id := "i0" // prefix with i to avoid conflicts with nvidia card ids
|
||||
gpuData, ok := gm.GpuDataMap[id]
|
||||
if !ok {
|
||||
gpuData = &system.GPUData{Name: "GPU", Engines: make(map[string]float64)}
|
||||
gm.GpuDataMap["0"] = gpuData
|
||||
gm.GpuDataMap[id] = gpuData
|
||||
}
|
||||
|
||||
gpuData.Power += sample.PowerGPU
|
||||
@@ -49,7 +50,12 @@ func (gm *GPUManager) updateIntelFromStats(sample *intelGpuStats) bool {
|
||||
|
||||
// collectIntelStats executes intel_gpu_top in text mode (-l) and parses the output
|
||||
func (gm *GPUManager) collectIntelStats() (err error) {
|
||||
cmd := exec.Command(intelGpuStatsCmd, "-s", intelGpuStatsInterval, "-l")
|
||||
// Build command arguments, optionally selecting a device via -d
|
||||
args := []string{"-s", intelGpuStatsInterval, "-l"}
|
||||
if dev, ok := GetEnv("INTEL_GPU_DEVICE"); ok && dev != "" {
|
||||
args = append(args, "-d", dev)
|
||||
}
|
||||
cmd := exec.Command(intelGpuStatsCmd, args...)
|
||||
// Avoid blocking if intel_gpu_top writes to stderr
|
||||
cmd.Stderr = io.Discard
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
@@ -129,7 +135,9 @@ func (gm *GPUManager) parseIntelHeaders(header1 string, header2 string) (engineN
|
||||
powerIndex = -1 // Initialize to -1, will be set to actual index if found
|
||||
// Collect engine names from header1
|
||||
for _, col := range h1 {
|
||||
key := strings.TrimRightFunc(col, func(r rune) bool { return r >= '0' && r <= '9' })
|
||||
key := strings.TrimRightFunc(col, func(r rune) bool {
|
||||
return (r >= '0' && r <= '9') || r == '/'
|
||||
})
|
||||
var friendly string
|
||||
switch key {
|
||||
case "RCS":
|
||||
|
||||
224
agent/gpu_nvml.go
Normal file
224
agent/gpu_nvml.go
Normal file
@@ -0,0 +1,224 @@
|
||||
//go:build amd64 && (windows || (linux && glibc))
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strings"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/ebitengine/purego"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
)
|
||||
|
||||
// NVML constants and types
|
||||
const (
|
||||
nvmlSuccess int = 0
|
||||
)
|
||||
|
||||
type nvmlDevice uintptr
|
||||
|
||||
type nvmlReturn int
|
||||
|
||||
type nvmlMemoryV1 struct {
|
||||
Total uint64
|
||||
Free uint64
|
||||
Used uint64
|
||||
}
|
||||
|
||||
type nvmlMemoryV2 struct {
|
||||
Version uint32
|
||||
Total uint64
|
||||
Reserved uint64
|
||||
Free uint64
|
||||
Used uint64
|
||||
}
|
||||
|
||||
type nvmlUtilization struct {
|
||||
Gpu uint32
|
||||
Memory uint32
|
||||
}
|
||||
|
||||
type nvmlPciInfo struct {
|
||||
BusId [16]byte
|
||||
Domain uint32
|
||||
Bus uint32
|
||||
Device uint32
|
||||
PciDeviceId uint32
|
||||
PciSubSystemId uint32
|
||||
}
|
||||
|
||||
// NVML function signatures
|
||||
var (
|
||||
nvmlInit func() nvmlReturn
|
||||
nvmlShutdown func() nvmlReturn
|
||||
nvmlDeviceGetCount func(count *uint32) nvmlReturn
|
||||
nvmlDeviceGetHandleByIndex func(index uint32, device *nvmlDevice) nvmlReturn
|
||||
nvmlDeviceGetName func(device nvmlDevice, name *byte, length uint32) nvmlReturn
|
||||
nvmlDeviceGetMemoryInfo func(device nvmlDevice, memory uintptr) nvmlReturn
|
||||
nvmlDeviceGetUtilizationRates func(device nvmlDevice, utilization *nvmlUtilization) nvmlReturn
|
||||
nvmlDeviceGetTemperature func(device nvmlDevice, sensorType int, temp *uint32) nvmlReturn
|
||||
nvmlDeviceGetPowerUsage func(device nvmlDevice, power *uint32) nvmlReturn
|
||||
nvmlDeviceGetPciInfo func(device nvmlDevice, pci *nvmlPciInfo) nvmlReturn
|
||||
nvmlErrorString func(result nvmlReturn) string
|
||||
)
|
||||
|
||||
type nvmlCollector struct {
|
||||
gm *GPUManager
|
||||
lib uintptr
|
||||
devices []nvmlDevice
|
||||
bdfs []string
|
||||
isV2 bool
|
||||
}
|
||||
|
||||
func (c *nvmlCollector) init() error {
|
||||
slog.Debug("NVML: Initializing")
|
||||
libPath := getNVMLPath()
|
||||
|
||||
lib, err := openLibrary(libPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load %s: %w", libPath, err)
|
||||
}
|
||||
c.lib = lib
|
||||
|
||||
purego.RegisterLibFunc(&nvmlInit, lib, "nvmlInit")
|
||||
purego.RegisterLibFunc(&nvmlShutdown, lib, "nvmlShutdown")
|
||||
purego.RegisterLibFunc(&nvmlDeviceGetCount, lib, "nvmlDeviceGetCount")
|
||||
purego.RegisterLibFunc(&nvmlDeviceGetHandleByIndex, lib, "nvmlDeviceGetHandleByIndex")
|
||||
purego.RegisterLibFunc(&nvmlDeviceGetName, lib, "nvmlDeviceGetName")
|
||||
// Try to get v2 memory info, fallback to v1 if not available
|
||||
if hasSymbol(lib, "nvmlDeviceGetMemoryInfo_v2") {
|
||||
c.isV2 = true
|
||||
purego.RegisterLibFunc(&nvmlDeviceGetMemoryInfo, lib, "nvmlDeviceGetMemoryInfo_v2")
|
||||
} else {
|
||||
purego.RegisterLibFunc(&nvmlDeviceGetMemoryInfo, lib, "nvmlDeviceGetMemoryInfo")
|
||||
}
|
||||
purego.RegisterLibFunc(&nvmlDeviceGetUtilizationRates, lib, "nvmlDeviceGetUtilizationRates")
|
||||
purego.RegisterLibFunc(&nvmlDeviceGetTemperature, lib, "nvmlDeviceGetTemperature")
|
||||
purego.RegisterLibFunc(&nvmlDeviceGetPowerUsage, lib, "nvmlDeviceGetPowerUsage")
|
||||
purego.RegisterLibFunc(&nvmlDeviceGetPciInfo, lib, "nvmlDeviceGetPciInfo")
|
||||
purego.RegisterLibFunc(&nvmlErrorString, lib, "nvmlErrorString")
|
||||
|
||||
if ret := nvmlInit(); ret != nvmlReturn(nvmlSuccess) {
|
||||
return fmt.Errorf("nvmlInit failed: %v", ret)
|
||||
}
|
||||
|
||||
var count uint32
|
||||
if ret := nvmlDeviceGetCount(&count); ret != nvmlReturn(nvmlSuccess) {
|
||||
return fmt.Errorf("nvmlDeviceGetCount failed: %v", ret)
|
||||
}
|
||||
|
||||
for i := uint32(0); i < count; i++ {
|
||||
var device nvmlDevice
|
||||
if ret := nvmlDeviceGetHandleByIndex(i, &device); ret == nvmlReturn(nvmlSuccess) {
|
||||
c.devices = append(c.devices, device)
|
||||
// Get BDF for power state check
|
||||
var pci nvmlPciInfo
|
||||
if ret := nvmlDeviceGetPciInfo(device, &pci); ret == nvmlReturn(nvmlSuccess) {
|
||||
busID := string(pci.BusId[:])
|
||||
if idx := strings.Index(busID, "\x00"); idx != -1 {
|
||||
busID = busID[:idx]
|
||||
}
|
||||
c.bdfs = append(c.bdfs, strings.ToLower(busID))
|
||||
} else {
|
||||
c.bdfs = append(c.bdfs, "")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *nvmlCollector) start() {
|
||||
defer nvmlShutdown()
|
||||
ticker := time.Tick(3 * time.Second)
|
||||
|
||||
for range ticker {
|
||||
c.collect()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *nvmlCollector) collect() {
|
||||
c.gm.Lock()
|
||||
defer c.gm.Unlock()
|
||||
|
||||
for i, device := range c.devices {
|
||||
id := fmt.Sprintf("%d", i)
|
||||
bdf := c.bdfs[i]
|
||||
|
||||
// Update GPUDataMap
|
||||
if _, ok := c.gm.GpuDataMap[id]; !ok {
|
||||
var nameBuf [64]byte
|
||||
if ret := nvmlDeviceGetName(device, &nameBuf[0], 64); ret != nvmlReturn(nvmlSuccess) {
|
||||
continue
|
||||
}
|
||||
name := string(nameBuf[:strings.Index(string(nameBuf[:]), "\x00")])
|
||||
name = strings.TrimPrefix(name, "NVIDIA ")
|
||||
c.gm.GpuDataMap[id] = &system.GPUData{Name: strings.TrimSuffix(name, " Laptop GPU")}
|
||||
}
|
||||
gpu := c.gm.GpuDataMap[id]
|
||||
|
||||
if bdf != "" && !c.isGPUActive(bdf) {
|
||||
slog.Debug("NVML: GPU is suspended, skipping", "bdf", bdf)
|
||||
gpu.Temperature = 0
|
||||
gpu.MemoryUsed = 0
|
||||
continue
|
||||
}
|
||||
|
||||
// Utilization
|
||||
var utilization nvmlUtilization
|
||||
if ret := nvmlDeviceGetUtilizationRates(device, &utilization); ret != nvmlReturn(nvmlSuccess) {
|
||||
slog.Debug("NVML: Utilization failed (GPU likely suspended)", "bdf", bdf, "ret", ret)
|
||||
gpu.Temperature = 0
|
||||
gpu.MemoryUsed = 0
|
||||
continue
|
||||
}
|
||||
|
||||
slog.Debug("NVML: Collecting data for GPU", "bdf", bdf)
|
||||
|
||||
// Temperature
|
||||
var temp uint32
|
||||
nvmlDeviceGetTemperature(device, 0, &temp) // 0 is NVML_TEMPERATURE_GPU
|
||||
|
||||
// Memory: only poll if GPU is active to avoid leaving D3cold state (#1522)
|
||||
if utilization.Gpu > 0 {
|
||||
var usedMem, totalMem uint64
|
||||
if c.isV2 {
|
||||
var memory nvmlMemoryV2
|
||||
memory.Version = 0x02000028 // (2 << 24) | 40 bytes
|
||||
if ret := nvmlDeviceGetMemoryInfo(device, uintptr(unsafe.Pointer(&memory))); ret != nvmlReturn(nvmlSuccess) {
|
||||
slog.Debug("NVML: MemoryInfo_v2 failed", "bdf", bdf, "ret", ret)
|
||||
} else {
|
||||
usedMem = memory.Used
|
||||
totalMem = memory.Total
|
||||
}
|
||||
} else {
|
||||
var memory nvmlMemoryV1
|
||||
if ret := nvmlDeviceGetMemoryInfo(device, uintptr(unsafe.Pointer(&memory))); ret != nvmlReturn(nvmlSuccess) {
|
||||
slog.Debug("NVML: MemoryInfo failed", "bdf", bdf, "ret", ret)
|
||||
} else {
|
||||
usedMem = memory.Used
|
||||
totalMem = memory.Total
|
||||
}
|
||||
}
|
||||
if totalMem > 0 {
|
||||
gpu.MemoryUsed = float64(usedMem) / 1024 / 1024 / mebibytesInAMegabyte
|
||||
gpu.MemoryTotal = float64(totalMem) / 1024 / 1024 / mebibytesInAMegabyte
|
||||
}
|
||||
} else {
|
||||
slog.Debug("NVML: Skipping memory info (utilization=0)", "bdf", bdf)
|
||||
}
|
||||
|
||||
// Power
|
||||
var power uint32
|
||||
nvmlDeviceGetPowerUsage(device, &power)
|
||||
|
||||
gpu.Temperature = float64(temp)
|
||||
gpu.Usage += float64(utilization.Gpu)
|
||||
gpu.Power += float64(power) / 1000.0
|
||||
gpu.Count++
|
||||
slog.Debug("NVML: Collected data", "gpu", gpu)
|
||||
}
|
||||
}
|
||||
57
agent/gpu_nvml_linux.go
Normal file
57
agent/gpu_nvml_linux.go
Normal file
@@ -0,0 +1,57 @@
|
||||
//go:build glibc && linux && amd64
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/ebitengine/purego"
|
||||
)
|
||||
|
||||
func openLibrary(name string) (uintptr, error) {
|
||||
return purego.Dlopen(name, purego.RTLD_NOW|purego.RTLD_GLOBAL)
|
||||
}
|
||||
|
||||
func getNVMLPath() string {
|
||||
return "libnvidia-ml.so.1"
|
||||
}
|
||||
|
||||
func hasSymbol(lib uintptr, symbol string) bool {
|
||||
_, err := purego.Dlsym(lib, symbol)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (c *nvmlCollector) isGPUActive(bdf string) bool {
|
||||
// runtime_status
|
||||
statusPath := filepath.Join("/sys/bus/pci/devices", bdf, "power/runtime_status")
|
||||
status, err := os.ReadFile(statusPath)
|
||||
if err != nil {
|
||||
slog.Debug("NVML: Can't read runtime_status", "bdf", bdf, "err", err)
|
||||
return true // Assume active if we can't read status
|
||||
}
|
||||
statusStr := strings.TrimSpace(string(status))
|
||||
if statusStr != "active" && statusStr != "resuming" {
|
||||
slog.Debug("NVML: GPU not active", "bdf", bdf, "status", statusStr)
|
||||
return false
|
||||
}
|
||||
|
||||
// power_state (D0 check)
|
||||
// Find any drm card device power_state
|
||||
pstatePathPattern := filepath.Join("/sys/bus/pci/devices", bdf, "drm/card*/device/power_state")
|
||||
matches, _ := filepath.Glob(pstatePathPattern)
|
||||
if len(matches) > 0 {
|
||||
pstate, err := os.ReadFile(matches[0])
|
||||
if err == nil {
|
||||
pstateStr := strings.TrimSpace(string(pstate))
|
||||
if pstateStr != "D0" {
|
||||
slog.Debug("NVML: GPU not in D0 state", "bdf", bdf, "pstate", pstateStr)
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
33
agent/gpu_nvml_unsupported.go
Normal file
33
agent/gpu_nvml_unsupported.go
Normal file
@@ -0,0 +1,33 @@
|
||||
//go:build (!linux && !windows) || !amd64 || (linux && !glibc)
|
||||
|
||||
package agent
|
||||
|
||||
import "fmt"
|
||||
|
||||
type nvmlCollector struct {
|
||||
gm *GPUManager
|
||||
}
|
||||
|
||||
func (c *nvmlCollector) init() error {
|
||||
return fmt.Errorf("nvml not supported on this platform")
|
||||
}
|
||||
|
||||
func (c *nvmlCollector) start() {}
|
||||
|
||||
func (c *nvmlCollector) collect() {}
|
||||
|
||||
func openLibrary(name string) (uintptr, error) {
|
||||
return 0, fmt.Errorf("nvml not supported on this platform")
|
||||
}
|
||||
|
||||
func getNVMLPath() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func hasSymbol(lib uintptr, symbol string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *nvmlCollector) isGPUActive(bdf string) bool {
|
||||
return true
|
||||
}
|
||||
25
agent/gpu_nvml_windows.go
Normal file
25
agent/gpu_nvml_windows.go
Normal file
@@ -0,0 +1,25 @@
|
||||
//go:build windows && amd64
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
func openLibrary(name string) (uintptr, error) {
|
||||
handle, err := windows.LoadLibrary(name)
|
||||
return uintptr(handle), err
|
||||
}
|
||||
|
||||
func getNVMLPath() string {
|
||||
return "nvml.dll"
|
||||
}
|
||||
|
||||
func hasSymbol(lib uintptr, symbol string) bool {
|
||||
_, err := windows.GetProcAddress(windows.Handle(lib), symbol)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (c *nvmlCollector) isGPUActive(bdf string) bool {
|
||||
return true
|
||||
}
|
||||
@@ -4,8 +4,10 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -305,6 +307,19 @@ func TestParseJetsonData(t *testing.T) {
|
||||
Count: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "orin-style output with GPU@ temp and VDD_SYS_GPU power",
|
||||
input: "RAM 3276/7859MB (lfb 5x4MB) SWAP 1626/12122MB (cached 181MB) CPU [44%@1421,49%@2031,67%@2034,17%@1420,25%@1419,8%@1420] EMC_FREQ 1%@1866 GR3D_FREQ 0%@114 APE 150 MTS fg 1% bg 1% PLL@42.5C MCPU@42.5C PMIC@50C Tboard@38C GPU@39.5C BCPU@42.5C thermal@41.3C Tdiode@39.25C VDD_SYS_GPU 182/182 VDD_SYS_SOC 730/730 VDD_4V0_WIFI 0/0 VDD_IN 5297/5297 VDD_SYS_CPU 1917/1917 VDD_SYS_DDR 1241/1241",
|
||||
wantMetrics: &system.GPUData{
|
||||
Name: "GPU",
|
||||
MemoryUsed: 3276.0,
|
||||
MemoryTotal: 7859.0,
|
||||
Usage: 0.0,
|
||||
Power: 0.182, // 182mW -> 0.182W
|
||||
Temperature: 39.5,
|
||||
Count: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -823,7 +838,7 @@ func TestInitializeSnapshots(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCalculateGPUAverage(t *testing.T) {
|
||||
t.Run("returns zero value when deltaCount is zero", func(t *testing.T) {
|
||||
t.Run("returns cached average when deltaCount is zero", func(t *testing.T) {
|
||||
gm := &GPUManager{
|
||||
lastSnapshots: map[uint16]map[string]*gpuSnapshot{
|
||||
5000: {
|
||||
@@ -836,9 +851,10 @@ func TestCalculateGPUAverage(t *testing.T) {
|
||||
}
|
||||
|
||||
gpu := &system.GPUData{
|
||||
Count: 10.0, // Same as snapshot, so delta = 0
|
||||
Usage: 100.0,
|
||||
Power: 200.0,
|
||||
Count: 10.0, // Same as snapshot, so delta = 0
|
||||
Usage: 100.0,
|
||||
Power: 200.0,
|
||||
Temperature: 50.0, // Non-zero to avoid "suspended" check
|
||||
}
|
||||
|
||||
result := gm.calculateGPUAverage("0", gpu, 5000)
|
||||
@@ -847,6 +863,31 @@ func TestCalculateGPUAverage(t *testing.T) {
|
||||
assert.Equal(t, 100.0, result.Power, "Should return cached average")
|
||||
})
|
||||
|
||||
t.Run("returns zero value when GPU is suspended", func(t *testing.T) {
|
||||
gm := &GPUManager{
|
||||
lastSnapshots: map[uint16]map[string]*gpuSnapshot{
|
||||
5000: {
|
||||
"0": {count: 10, usage: 100, power: 200},
|
||||
},
|
||||
},
|
||||
lastAvgData: map[string]system.GPUData{
|
||||
"0": {Usage: 50.0, Power: 100.0},
|
||||
},
|
||||
}
|
||||
|
||||
gpu := &system.GPUData{
|
||||
Name: "Test GPU",
|
||||
Count: 10.0,
|
||||
Temperature: 0,
|
||||
MemoryUsed: 0,
|
||||
}
|
||||
|
||||
result := gm.calculateGPUAverage("0", gpu, 5000)
|
||||
|
||||
assert.Equal(t, 0.0, result.Usage, "Should return zero usage")
|
||||
assert.Equal(t, 0.0, result.Power, "Should return zero power")
|
||||
})
|
||||
|
||||
t.Run("calculates average for standard GPU", func(t *testing.T) {
|
||||
gm := &GPUManager{
|
||||
lastSnapshots: map[uint16]map[string]*gpuSnapshot{
|
||||
@@ -1344,7 +1385,7 @@ func TestIntelUpdateFromStats(t *testing.T) {
|
||||
ok := gm.updateIntelFromStats(&sample1)
|
||||
assert.True(t, ok)
|
||||
|
||||
gpu := gm.GpuDataMap["0"]
|
||||
gpu := gm.GpuDataMap["i0"]
|
||||
require.NotNil(t, gpu)
|
||||
assert.Equal(t, "GPU", gpu.Name)
|
||||
assert.EqualValues(t, 10.5, gpu.Power)
|
||||
@@ -1366,7 +1407,7 @@ func TestIntelUpdateFromStats(t *testing.T) {
|
||||
ok = gm.updateIntelFromStats(&sample2)
|
||||
assert.True(t, ok)
|
||||
|
||||
gpu = gm.GpuDataMap["0"]
|
||||
gpu = gm.GpuDataMap["i0"]
|
||||
require.NotNil(t, gpu)
|
||||
assert.EqualValues(t, 10.5, gpu.Power)
|
||||
assert.EqualValues(t, 30.0, gpu.Engines["Render/3D"]) // 20 + 10
|
||||
@@ -1405,7 +1446,7 @@ echo "298 295 278 51 2.20 3.12 1675 942 5.75 1 2 9.50
|
||||
t.Fatalf("collectIntelStats error: %v", err)
|
||||
}
|
||||
|
||||
gpu := gm.GpuDataMap["0"]
|
||||
gpu := gm.GpuDataMap["i0"]
|
||||
require.NotNil(t, gpu)
|
||||
// Power should be sum of samples 2-4 (first is skipped): 2.0 + 1.8 + 2.2 = 6.0
|
||||
assert.EqualValues(t, 6.0, gpu.Power)
|
||||
@@ -1437,6 +1478,15 @@ func TestParseIntelHeaders(t *testing.T) {
|
||||
wantPowerIndex: 4, // "gpu" is at index 4
|
||||
wantPreEngineCols: 8, // 17 total cols - 3*3 = 8
|
||||
},
|
||||
{
|
||||
name: "basic headers with RCS BCS VCS using index in name",
|
||||
header1: "Freq MHz IRQ RC6 Power W IMC MiB/s RCS/0 BCS/1 VCS/2",
|
||||
header2: " req act /s % gpu pkg rd wr % se wa % se wa % se wa",
|
||||
wantEngineNames: []string{"RCS", "BCS", "VCS"},
|
||||
wantFriendlyNames: []string{"Render/3D", "Blitter", "Video"},
|
||||
wantPowerIndex: 4, // "gpu" is at index 4
|
||||
wantPreEngineCols: 8, // 17 total cols - 3*3 = 8
|
||||
},
|
||||
{
|
||||
name: "headers with only RCS",
|
||||
header1: "Freq MHz IRQ RC6 Power W IMC MiB/s RCS",
|
||||
@@ -1624,3 +1674,42 @@ func TestParseIntelData(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntelCollectorDeviceEnv(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
t.Setenv("PATH", dir)
|
||||
|
||||
// Prepare a file to capture args
|
||||
argsFile := filepath.Join(dir, "args.txt")
|
||||
|
||||
// Create a fake intel_gpu_top that records its arguments and prints minimal valid output
|
||||
scriptPath := filepath.Join(dir, "intel_gpu_top")
|
||||
script := fmt.Sprintf(`#!/bin/sh
|
||||
echo "$@" > %s
|
||||
echo "Freq MHz IRQ RC6 Power W IMC MiB/s RCS VCS"
|
||||
echo " req act /s %% gpu pkg rd wr %% se wa %% se wa"
|
||||
echo "226 223 338 58 2.00 2.69 1820 965 0.00 0 0 0.00 0 0"
|
||||
echo "189 187 412 67 1.80 2.45 1950 823 8.50 2 1 15.00 1 0"
|
||||
`, argsFile)
|
||||
if err := os.WriteFile(scriptPath, []byte(script), 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Set device selector via prefixed env var
|
||||
t.Setenv("BESZEL_AGENT_INTEL_GPU_DEVICE", "sriov")
|
||||
|
||||
gm := &GPUManager{GpuDataMap: make(map[string]*system.GPUData)}
|
||||
if err := gm.collectIntelStats(); err != nil {
|
||||
t.Fatalf("collectIntelStats error: %v", err)
|
||||
}
|
||||
|
||||
// Verify that -d sriov was passed
|
||||
data, err := os.ReadFile(argsFile)
|
||||
if err != nil {
|
||||
t.Fatalf("failed reading args file: %v", err)
|
||||
}
|
||||
argsStr := strings.TrimSpace(string(data))
|
||||
require.Contains(t, argsStr, "-d sriov")
|
||||
require.Contains(t, argsStr, "-s ")
|
||||
require.Contains(t, argsStr, "-l")
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
"github.com/henrygd/beszel/internal/entities/smart"
|
||||
|
||||
"golang.org/x/exp/slog"
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
// HandlerContext provides context for request handlers
|
||||
@@ -50,6 +50,7 @@ func NewHandlerRegistry() *HandlerRegistry {
|
||||
registry.Register(common.GetContainerLogs, &GetContainerLogsHandler{})
|
||||
registry.Register(common.GetContainerInfo, &GetContainerInfoHandler{})
|
||||
registry.Register(common.GetSmartData, &GetSmartDataHandler{})
|
||||
registry.Register(common.GetSystemdInfo, &GetSystemdInfoHandler{})
|
||||
|
||||
return registry
|
||||
}
|
||||
@@ -93,7 +94,7 @@ func (h *GetDataHandler) Handle(hctx *HandlerContext) error {
|
||||
var options common.DataRequestOptions
|
||||
_ = cbor.Unmarshal(hctx.Request.Data, &options)
|
||||
|
||||
sysStats := hctx.Agent.gatherStats(options.CacheTimeMs)
|
||||
sysStats := hctx.Agent.gatherStats(options)
|
||||
return hctx.SendResponse(sysStats, hctx.RequestID)
|
||||
}
|
||||
|
||||
@@ -168,9 +169,37 @@ func (h *GetSmartDataHandler) Handle(hctx *HandlerContext) error {
|
||||
// return empty map to indicate no data
|
||||
return hctx.SendResponse(map[string]smart.SmartData{}, hctx.RequestID)
|
||||
}
|
||||
if err := hctx.Agent.smartManager.Refresh(); err != nil {
|
||||
if err := hctx.Agent.smartManager.Refresh(false); err != nil {
|
||||
slog.Debug("smart refresh failed", "err", err)
|
||||
}
|
||||
data := hctx.Agent.smartManager.GetCurrentData()
|
||||
return hctx.SendResponse(data, hctx.RequestID)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// GetSystemdInfoHandler handles detailed systemd service info requests
|
||||
type GetSystemdInfoHandler struct{}
|
||||
|
||||
func (h *GetSystemdInfoHandler) Handle(hctx *HandlerContext) error {
|
||||
if hctx.Agent.systemdManager == nil {
|
||||
return errors.ErrUnsupported
|
||||
}
|
||||
|
||||
var req common.SystemdInfoRequest
|
||||
if err := cbor.Unmarshal(hctx.Request.Data, &req); err != nil {
|
||||
return err
|
||||
}
|
||||
if req.ServiceName == "" {
|
||||
return errors.New("service name is required")
|
||||
}
|
||||
|
||||
details, err := hctx.Agent.systemdManager.getServiceDetails(req.ServiceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return hctx.SendResponse(details, hctx.RequestID)
|
||||
}
|
||||
|
||||
@@ -9,11 +9,31 @@ import (
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
// healthFile is the path to the health file
|
||||
var healthFile = filepath.Join(os.TempDir(), "beszel_health")
|
||||
var healthFile = getHealthFilePath()
|
||||
|
||||
func getHealthFilePath() string {
|
||||
filename := "beszel_health"
|
||||
if runtime.GOOS == "linux" {
|
||||
fullPath := filepath.Join("/dev/shm", filename)
|
||||
if err := updateHealthFile(fullPath); err == nil {
|
||||
return fullPath
|
||||
}
|
||||
}
|
||||
return filepath.Join(os.TempDir(), filename)
|
||||
}
|
||||
|
||||
func updateHealthFile(path string) error {
|
||||
file, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return file.Close()
|
||||
}
|
||||
|
||||
// Check checks if the agent is connected by checking the modification time of the health file
|
||||
func Check() error {
|
||||
@@ -30,11 +50,7 @@ func Check() error {
|
||||
|
||||
// Update updates the modification time of the health file
|
||||
func Update() error {
|
||||
file, err := os.Create(healthFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return file.Close()
|
||||
return updateHealthFile(healthFile)
|
||||
}
|
||||
|
||||
// CleanUp removes the health file
|
||||
|
||||
@@ -52,7 +52,12 @@ class Program
|
||||
foreach (var sensor in hardware.Sensors)
|
||||
{
|
||||
var validTemp = sensor.SensorType == SensorType.Temperature && sensor.Value.HasValue;
|
||||
if (!validTemp || sensor.Name.Contains("Distance"))
|
||||
if (!validTemp ||
|
||||
sensor.Name.IndexOf("Distance", StringComparison.OrdinalIgnoreCase) >= 0 ||
|
||||
sensor.Name.IndexOf("Limit", StringComparison.OrdinalIgnoreCase) >= 0 ||
|
||||
sensor.Name.IndexOf("Critical", StringComparison.OrdinalIgnoreCase) >= 0 ||
|
||||
sensor.Name.IndexOf("Warning", StringComparison.OrdinalIgnoreCase) >= 0 ||
|
||||
sensor.Name.IndexOf("Resolution", StringComparison.OrdinalIgnoreCase) >= 0)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -3,9 +3,11 @@
|
||||
<OutputType>Exe</OutputType>
|
||||
<TargetFramework>net48</TargetFramework>
|
||||
<Platforms>x64</Platforms>
|
||||
<RuntimeIdentifier>win-x64</RuntimeIdentifier>
|
||||
<AppendRuntimeIdentifierToOutputPath>false</AppendRuntimeIdentifierToOutputPath>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="LibreHardwareMonitorLib" Version="0.9.4" />
|
||||
<PackageReference Include="LibreHardwareMonitorLib" Version="0.9.5" />
|
||||
</ItemGroup>
|
||||
</Project>
|
||||
|
||||
31
agent/response.go
Normal file
31
agent/response.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
"github.com/henrygd/beszel/internal/entities/smart"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||
)
|
||||
|
||||
// newAgentResponse creates an AgentResponse using legacy typed fields.
|
||||
// This maintains backward compatibility with <= 0.17 hubs that expect specific fields.
|
||||
func newAgentResponse(data any, requestID *uint32) common.AgentResponse {
|
||||
response := common.AgentResponse{Id: requestID}
|
||||
switch v := data.(type) {
|
||||
case *system.CombinedData:
|
||||
response.SystemData = v
|
||||
case *common.FingerprintResponse:
|
||||
response.Fingerprint = v
|
||||
case string:
|
||||
response.String = &v
|
||||
case map[string]smart.SmartData:
|
||||
response.SmartData = v
|
||||
case systemd.ServiceDetails:
|
||||
response.ServiceInfo = v
|
||||
default:
|
||||
// For unknown types, use the generic Data field
|
||||
response.Data, _ = cbor.Marshal(data)
|
||||
}
|
||||
return response
|
||||
}
|
||||
@@ -163,16 +163,9 @@ func (a *Agent) handleSSHRequest(w io.Writer, req *common.HubRequest[cbor.RawMes
|
||||
}
|
||||
|
||||
// responder that writes AgentResponse to stdout
|
||||
// Uses legacy typed fields for backward compatibility with <= 0.17
|
||||
sshResponder := func(data any, requestID *uint32) error {
|
||||
response := common.AgentResponse{Id: requestID}
|
||||
switch v := data.(type) {
|
||||
case *system.CombinedData:
|
||||
response.SystemData = v
|
||||
case string:
|
||||
response.String = &v
|
||||
default:
|
||||
response.Error = fmt.Sprintf("unsupported response type: %T", data)
|
||||
}
|
||||
response := newAgentResponse(data, requestID)
|
||||
return cbor.NewEncoder(w).Encode(response)
|
||||
}
|
||||
|
||||
@@ -196,7 +189,7 @@ func (a *Agent) handleSSHRequest(w io.Writer, req *common.HubRequest[cbor.RawMes
|
||||
|
||||
// handleLegacyStats serves the legacy one-shot stats payload for older hubs
|
||||
func (a *Agent) handleLegacyStats(w io.Writer, hubVersion semver.Version) error {
|
||||
stats := a.gatherStats(60_000)
|
||||
stats := a.gatherStats(common.DataRequestOptions{CacheTimeMs: 60_000})
|
||||
return a.writeToSession(w, stats, hubVersion)
|
||||
}
|
||||
|
||||
|
||||
@@ -513,7 +513,7 @@ func TestWriteToSessionEncoding(t *testing.T) {
|
||||
err = json.Unmarshal([]byte(encodedData), &decodedJson)
|
||||
assert.Error(t, err, "Should not be valid JSON data")
|
||||
|
||||
assert.Equal(t, testData.Info.Hostname, decodedCbor.Info.Hostname)
|
||||
assert.Equal(t, testData.Details.Hostname, decodedCbor.Details.Hostname)
|
||||
assert.Equal(t, testData.Stats.Cpu, decodedCbor.Stats.Cpu)
|
||||
} else {
|
||||
// Should be JSON - try to decode as JSON
|
||||
@@ -526,7 +526,7 @@ func TestWriteToSessionEncoding(t *testing.T) {
|
||||
assert.Error(t, err, "Should not be valid CBOR data")
|
||||
|
||||
// Verify the decoded JSON data matches our test data
|
||||
assert.Equal(t, testData.Info.Hostname, decodedJson.Info.Hostname)
|
||||
assert.Equal(t, testData.Details.Hostname, decodedJson.Details.Hostname)
|
||||
assert.Equal(t, testData.Stats.Cpu, decodedJson.Stats.Cpu)
|
||||
|
||||
// Verify it looks like JSON (starts with '{' and contains readable field names)
|
||||
@@ -550,13 +550,12 @@ func createTestCombinedData() *system.CombinedData {
|
||||
DiskUsed: 549755813888, // 512GB
|
||||
DiskPct: 50.0,
|
||||
},
|
||||
Details: &system.Details{
|
||||
Hostname: "test-host",
|
||||
},
|
||||
Info: system.Info{
|
||||
Hostname: "test-host",
|
||||
Cores: 8,
|
||||
CpuModel: "Test CPU Model",
|
||||
Uptime: 3600,
|
||||
AgentVersion: "0.12.0",
|
||||
Os: system.Linux,
|
||||
},
|
||||
Containers: []*container.Stats{
|
||||
{
|
||||
|
||||
854
agent/smart.go
854
agent/smart.go
File diff suppressed because it is too large
Load Diff
9
agent/smart_nonwindows.go
Normal file
9
agent/smart_nonwindows.go
Normal file
@@ -0,0 +1,9 @@
|
||||
//go:build !windows
|
||||
|
||||
package agent
|
||||
|
||||
import "errors"
|
||||
|
||||
func ensureEmbeddedSmartctl() (string, error) {
|
||||
return "", errors.ErrUnsupported
|
||||
}
|
||||
954
agent/smart_test.go
Normal file
954
agent/smart_test.go
Normal file
@@ -0,0 +1,954 @@
|
||||
//go:build testing
|
||||
// +build testing
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/smart"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParseSmartForScsi(t *testing.T) {
|
||||
fixturePath := filepath.Join("test-data", "smart", "scsi.json")
|
||||
data, err := os.ReadFile(fixturePath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed reading fixture: %v", err)
|
||||
}
|
||||
|
||||
sm := &SmartManager{
|
||||
SmartDataMap: make(map[string]*smart.SmartData),
|
||||
}
|
||||
|
||||
hasData, exitStatus := sm.parseSmartForScsi(data)
|
||||
if !hasData {
|
||||
t.Fatalf("expected SCSI data to parse successfully")
|
||||
}
|
||||
if exitStatus != 0 {
|
||||
t.Fatalf("expected exit status 0, got %d", exitStatus)
|
||||
}
|
||||
|
||||
deviceData, ok := sm.SmartDataMap["9YHSDH9B"]
|
||||
if !ok {
|
||||
t.Fatalf("expected smart data entry for serial 9YHSDH9B")
|
||||
}
|
||||
|
||||
assert.Equal(t, deviceData.ModelName, "YADRO WUH721414AL4204")
|
||||
assert.Equal(t, deviceData.SerialNumber, "9YHSDH9B")
|
||||
assert.Equal(t, deviceData.FirmwareVersion, "C240")
|
||||
assert.Equal(t, deviceData.DiskName, "/dev/sde")
|
||||
assert.Equal(t, deviceData.DiskType, "scsi")
|
||||
assert.EqualValues(t, deviceData.Temperature, 34)
|
||||
assert.Equal(t, deviceData.SmartStatus, "PASSED")
|
||||
assert.EqualValues(t, deviceData.Capacity, 14000519643136)
|
||||
|
||||
if len(deviceData.Attributes) == 0 {
|
||||
t.Fatalf("expected attributes to be populated")
|
||||
}
|
||||
|
||||
assertAttrValue(t, deviceData.Attributes, "PowerOnHours", 458)
|
||||
assertAttrValue(t, deviceData.Attributes, "PowerOnMinutes", 25)
|
||||
assertAttrValue(t, deviceData.Attributes, "GrownDefectList", 0)
|
||||
assertAttrValue(t, deviceData.Attributes, "StartStopCycles", 2)
|
||||
assertAttrValue(t, deviceData.Attributes, "LoadUnloadCycles", 418)
|
||||
assertAttrValue(t, deviceData.Attributes, "ReadGigabytesProcessed", 3641)
|
||||
assertAttrValue(t, deviceData.Attributes, "WriteGigabytesProcessed", 2124590)
|
||||
assertAttrValue(t, deviceData.Attributes, "VerifyGigabytesProcessed", 0)
|
||||
}
|
||||
|
||||
func TestParseSmartForSata(t *testing.T) {
|
||||
fixturePath := filepath.Join("test-data", "smart", "sda.json")
|
||||
data, err := os.ReadFile(fixturePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
sm := &SmartManager{
|
||||
SmartDataMap: make(map[string]*smart.SmartData),
|
||||
}
|
||||
|
||||
hasData, exitStatus := sm.parseSmartForSata(data)
|
||||
require.True(t, hasData)
|
||||
assert.Equal(t, 64, exitStatus)
|
||||
|
||||
deviceData, ok := sm.SmartDataMap["9C40918040082"]
|
||||
require.True(t, ok, "expected smart data entry for serial 9C40918040082")
|
||||
|
||||
assert.Equal(t, "P3-2TB", deviceData.ModelName)
|
||||
assert.Equal(t, "X0104A0", deviceData.FirmwareVersion)
|
||||
assert.Equal(t, "/dev/sda", deviceData.DiskName)
|
||||
assert.Equal(t, "sat", deviceData.DiskType)
|
||||
assert.Equal(t, uint8(31), deviceData.Temperature)
|
||||
assert.Equal(t, "PASSED", deviceData.SmartStatus)
|
||||
assert.Equal(t, uint64(2048408248320), deviceData.Capacity)
|
||||
if assert.NotEmpty(t, deviceData.Attributes) {
|
||||
assertAttrValue(t, deviceData.Attributes, "Temperature_Celsius", 31)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseSmartForSataDeviceStatisticsTemperature(t *testing.T) {
|
||||
jsonPayload := []byte(`{
|
||||
"smartctl": {"exit_status": 0},
|
||||
"device": {"name": "/dev/sdb", "type": "sat"},
|
||||
"model_name": "SanDisk SSD U110 16GB",
|
||||
"serial_number": "DEVSTAT123",
|
||||
"firmware_version": "U21B001",
|
||||
"user_capacity": {"bytes": 16013942784},
|
||||
"smart_status": {"passed": true},
|
||||
"ata_smart_attributes": {"table": []},
|
||||
"ata_device_statistics": {
|
||||
"pages": [
|
||||
{
|
||||
"number": 5,
|
||||
"name": "Temperature Statistics",
|
||||
"table": [
|
||||
{"name": "Current Temperature", "value": 22, "flags": {"valid": true}}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}`)
|
||||
|
||||
sm := &SmartManager{SmartDataMap: make(map[string]*smart.SmartData)}
|
||||
hasData, exitStatus := sm.parseSmartForSata(jsonPayload)
|
||||
require.True(t, hasData)
|
||||
assert.Equal(t, 0, exitStatus)
|
||||
|
||||
deviceData, ok := sm.SmartDataMap["DEVSTAT123"]
|
||||
require.True(t, ok, "expected smart data entry for serial DEVSTAT123")
|
||||
assert.Equal(t, uint8(22), deviceData.Temperature)
|
||||
}
|
||||
|
||||
func TestParseSmartForSataParentheticalRawValue(t *testing.T) {
|
||||
jsonPayload := []byte(`{
|
||||
"smartctl": {"exit_status": 0},
|
||||
"device": {"name": "/dev/sdz", "type": "sat"},
|
||||
"model_name": "Example",
|
||||
"serial_number": "PARENTHESES123",
|
||||
"firmware_version": "1.0",
|
||||
"user_capacity": {"bytes": 1024},
|
||||
"smart_status": {"passed": true},
|
||||
"temperature": {"current": 25},
|
||||
"ata_smart_attributes": {
|
||||
"table": [
|
||||
{
|
||||
"id": 9,
|
||||
"name": "Power_On_Hours",
|
||||
"value": 93,
|
||||
"worst": 55,
|
||||
"thresh": 0,
|
||||
"when_failed": "",
|
||||
"raw": {
|
||||
"value": 57891864217128,
|
||||
"string": "39925 (212 206 0)"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}`)
|
||||
|
||||
sm := &SmartManager{SmartDataMap: make(map[string]*smart.SmartData)}
|
||||
|
||||
hasData, exitStatus := sm.parseSmartForSata(jsonPayload)
|
||||
require.True(t, hasData)
|
||||
assert.Equal(t, 0, exitStatus)
|
||||
|
||||
data, ok := sm.SmartDataMap["PARENTHESES123"]
|
||||
require.True(t, ok)
|
||||
require.Len(t, data.Attributes, 1)
|
||||
|
||||
attr := data.Attributes[0]
|
||||
assert.Equal(t, uint64(39925), attr.RawValue)
|
||||
assert.Equal(t, "39925 (212 206 0)", attr.RawString)
|
||||
}
|
||||
|
||||
func TestParseSmartForNvme(t *testing.T) {
|
||||
fixturePath := filepath.Join("test-data", "smart", "nvme0.json")
|
||||
data, err := os.ReadFile(fixturePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
sm := &SmartManager{
|
||||
SmartDataMap: make(map[string]*smart.SmartData),
|
||||
}
|
||||
|
||||
hasData, exitStatus := sm.parseSmartForNvme(data)
|
||||
require.True(t, hasData)
|
||||
assert.Equal(t, 0, exitStatus)
|
||||
|
||||
deviceData, ok := sm.SmartDataMap["2024031600129"]
|
||||
require.True(t, ok, "expected smart data entry for serial 2024031600129")
|
||||
|
||||
assert.Equal(t, "PELADN 512GB", deviceData.ModelName)
|
||||
assert.Equal(t, "VC2S038E", deviceData.FirmwareVersion)
|
||||
assert.Equal(t, "/dev/nvme0", deviceData.DiskName)
|
||||
assert.Equal(t, "nvme", deviceData.DiskType)
|
||||
assert.Equal(t, uint8(61), deviceData.Temperature)
|
||||
assert.Equal(t, "PASSED", deviceData.SmartStatus)
|
||||
assert.Equal(t, uint64(512110190592), deviceData.Capacity)
|
||||
if assert.NotEmpty(t, deviceData.Attributes) {
|
||||
assertAttrValue(t, deviceData.Attributes, "PercentageUsed", 0)
|
||||
assertAttrValue(t, deviceData.Attributes, "DataUnitsWritten", 16040567)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasDataForDevice(t *testing.T) {
|
||||
sm := &SmartManager{
|
||||
SmartDataMap: map[string]*smart.SmartData{
|
||||
"serial-1": {DiskName: "/dev/sda"},
|
||||
"serial-2": nil,
|
||||
},
|
||||
}
|
||||
|
||||
assert.True(t, sm.hasDataForDevice("/dev/sda"))
|
||||
assert.False(t, sm.hasDataForDevice("/dev/sdb"))
|
||||
}
|
||||
|
||||
func TestDevicesSnapshotReturnsCopy(t *testing.T) {
|
||||
originalDevice := &DeviceInfo{Name: "/dev/sda"}
|
||||
sm := &SmartManager{
|
||||
SmartDevices: []*DeviceInfo{
|
||||
originalDevice,
|
||||
{Name: "/dev/sdb"},
|
||||
},
|
||||
}
|
||||
|
||||
snapshot := sm.devicesSnapshot()
|
||||
require.Len(t, snapshot, 2)
|
||||
|
||||
sm.SmartDevices[0] = &DeviceInfo{Name: "/dev/sdz"}
|
||||
assert.Equal(t, "/dev/sda", snapshot[0].Name)
|
||||
|
||||
snapshot[1] = &DeviceInfo{Name: "/dev/nvme0"}
|
||||
assert.Equal(t, "/dev/sdb", sm.SmartDevices[1].Name)
|
||||
|
||||
sm.SmartDevices = append(sm.SmartDevices, &DeviceInfo{Name: "/dev/nvme1"})
|
||||
assert.Len(t, snapshot, 2)
|
||||
}
|
||||
|
||||
func TestScanDevicesWithEnvOverrideAndSeparator(t *testing.T) {
|
||||
t.Setenv("SMART_DEVICES_SEPARATOR", "|")
|
||||
t.Setenv("SMART_DEVICES", "/dev/sda:jmb39x-q,0|/dev/nvme0:nvme")
|
||||
|
||||
sm := &SmartManager{
|
||||
SmartDataMap: make(map[string]*smart.SmartData),
|
||||
}
|
||||
|
||||
err := sm.ScanDevices(true)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, sm.SmartDevices, 2)
|
||||
assert.Equal(t, "/dev/sda", sm.SmartDevices[0].Name)
|
||||
assert.Equal(t, "jmb39x-q,0", sm.SmartDevices[0].Type)
|
||||
assert.Equal(t, "/dev/nvme0", sm.SmartDevices[1].Name)
|
||||
assert.Equal(t, "nvme", sm.SmartDevices[1].Type)
|
||||
}
|
||||
|
||||
func TestScanDevicesWithEnvOverride(t *testing.T) {
|
||||
t.Setenv("SMART_DEVICES", "/dev/sda:sat, /dev/nvme0:nvme")
|
||||
|
||||
sm := &SmartManager{
|
||||
SmartDataMap: make(map[string]*smart.SmartData),
|
||||
}
|
||||
|
||||
err := sm.ScanDevices(true)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, sm.SmartDevices, 2)
|
||||
assert.Equal(t, "/dev/sda", sm.SmartDevices[0].Name)
|
||||
assert.Equal(t, "sat", sm.SmartDevices[0].Type)
|
||||
assert.Equal(t, "/dev/nvme0", sm.SmartDevices[1].Name)
|
||||
assert.Equal(t, "nvme", sm.SmartDevices[1].Type)
|
||||
}
|
||||
|
||||
func TestScanDevicesWithEnvOverrideInvalid(t *testing.T) {
|
||||
t.Setenv("SMART_DEVICES", ":sat")
|
||||
|
||||
sm := &SmartManager{
|
||||
SmartDataMap: make(map[string]*smart.SmartData),
|
||||
}
|
||||
|
||||
err := sm.ScanDevices(true)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestScanDevicesWithEnvOverrideEmpty(t *testing.T) {
|
||||
t.Setenv("SMART_DEVICES", " ")
|
||||
|
||||
sm := &SmartManager{
|
||||
SmartDataMap: make(map[string]*smart.SmartData),
|
||||
}
|
||||
|
||||
err := sm.ScanDevices(true)
|
||||
assert.ErrorIs(t, err, errNoValidSmartData)
|
||||
assert.Empty(t, sm.SmartDevices)
|
||||
}
|
||||
|
||||
func TestSmartctlArgsWithoutType(t *testing.T) {
|
||||
device := &DeviceInfo{Name: "/dev/sda"}
|
||||
|
||||
sm := &SmartManager{}
|
||||
|
||||
args := sm.smartctlArgs(device, true)
|
||||
assert.Equal(t, []string{"-a", "--json=c", "-n", "standby", "/dev/sda"}, args)
|
||||
}
|
||||
|
||||
func TestSmartctlArgs(t *testing.T) {
|
||||
sm := &SmartManager{}
|
||||
|
||||
sataDevice := &DeviceInfo{Name: "/dev/sda", Type: "sat"}
|
||||
assert.Equal(t,
|
||||
[]string{"-d", "sat", "-a", "--json=c", "-l", "devstat", "-n", "standby", "/dev/sda"},
|
||||
sm.smartctlArgs(sataDevice, true),
|
||||
)
|
||||
|
||||
assert.Equal(t,
|
||||
[]string{"-d", "sat", "-a", "--json=c", "-l", "devstat", "/dev/sda"},
|
||||
sm.smartctlArgs(sataDevice, false),
|
||||
)
|
||||
|
||||
nvmeDevice := &DeviceInfo{Name: "/dev/nvme0", Type: "nvme"}
|
||||
assert.Equal(t,
|
||||
[]string{"-d", "nvme", "-a", "--json=c", "-n", "standby", "/dev/nvme0"},
|
||||
sm.smartctlArgs(nvmeDevice, true),
|
||||
)
|
||||
|
||||
assert.Equal(t,
|
||||
[]string{"-a", "--json=c", "-n", "standby"},
|
||||
sm.smartctlArgs(nil, true),
|
||||
)
|
||||
}
|
||||
|
||||
func TestResolveRefreshError(t *testing.T) {
|
||||
scanErr := errors.New("scan failed")
|
||||
collectErr := errors.New("collect failed")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
devices []*DeviceInfo
|
||||
data map[string]*smart.SmartData
|
||||
scanErr error
|
||||
collectErr error
|
||||
expectedErr error
|
||||
expectNoErr bool
|
||||
}{
|
||||
{
|
||||
name: "no devices returns scan error",
|
||||
devices: nil,
|
||||
data: make(map[string]*smart.SmartData),
|
||||
scanErr: scanErr,
|
||||
expectedErr: scanErr,
|
||||
},
|
||||
{
|
||||
name: "has data ignores errors",
|
||||
devices: []*DeviceInfo{{Name: "/dev/sda"}},
|
||||
data: map[string]*smart.SmartData{"serial": {}},
|
||||
scanErr: scanErr,
|
||||
collectErr: collectErr,
|
||||
expectNoErr: true,
|
||||
},
|
||||
{
|
||||
name: "collect error preferred",
|
||||
devices: []*DeviceInfo{{Name: "/dev/sda"}},
|
||||
data: make(map[string]*smart.SmartData),
|
||||
collectErr: collectErr,
|
||||
expectedErr: collectErr,
|
||||
},
|
||||
{
|
||||
name: "scan error returned when no data",
|
||||
devices: []*DeviceInfo{{Name: "/dev/sda"}},
|
||||
data: make(map[string]*smart.SmartData),
|
||||
scanErr: scanErr,
|
||||
expectedErr: scanErr,
|
||||
},
|
||||
{
|
||||
name: "no errors returns sentinel",
|
||||
devices: []*DeviceInfo{{Name: "/dev/sda"}},
|
||||
data: make(map[string]*smart.SmartData),
|
||||
expectedErr: errNoValidSmartData,
|
||||
},
|
||||
{
|
||||
name: "no devices collect error",
|
||||
devices: nil,
|
||||
data: make(map[string]*smart.SmartData),
|
||||
collectErr: collectErr,
|
||||
expectedErr: collectErr,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
sm := &SmartManager{
|
||||
SmartDevices: tt.devices,
|
||||
SmartDataMap: tt.data,
|
||||
}
|
||||
|
||||
err := sm.resolveRefreshError(tt.scanErr, tt.collectErr)
|
||||
if tt.expectNoErr {
|
||||
assert.NoError(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
if tt.expectedErr == nil {
|
||||
assert.NoError(t, err)
|
||||
} else {
|
||||
assert.Equal(t, tt.expectedErr, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseScan(t *testing.T) {
|
||||
sm := &SmartManager{
|
||||
SmartDataMap: map[string]*smart.SmartData{
|
||||
"serial-active": {DiskName: "/dev/sda"},
|
||||
"serial-stale": {DiskName: "/dev/sdb"},
|
||||
},
|
||||
}
|
||||
|
||||
scanJSON := []byte(`{
|
||||
"devices": [
|
||||
{"name": "/dev/sda", "type": "sat", "info_name": "/dev/sda [SAT]", "protocol": "ATA"},
|
||||
{"name": "/dev/nvme0", "type": "nvme", "info_name": "/dev/nvme0", "protocol": "NVMe"}
|
||||
]
|
||||
}`)
|
||||
|
||||
devices, hasData := sm.parseScan(scanJSON)
|
||||
assert.True(t, hasData)
|
||||
|
||||
sm.updateSmartDevices(devices)
|
||||
|
||||
require.Len(t, sm.SmartDevices, 2)
|
||||
assert.Equal(t, "/dev/sda", sm.SmartDevices[0].Name)
|
||||
assert.Equal(t, "sat", sm.SmartDevices[0].Type)
|
||||
assert.Equal(t, "/dev/nvme0", sm.SmartDevices[1].Name)
|
||||
assert.Equal(t, "nvme", sm.SmartDevices[1].Type)
|
||||
|
||||
_, activeExists := sm.SmartDataMap["serial-active"]
|
||||
assert.True(t, activeExists, "active smart data should be preserved when device path remains")
|
||||
|
||||
_, staleExists := sm.SmartDataMap["serial-stale"]
|
||||
assert.False(t, staleExists, "stale smart data entry should be removed when device path disappears")
|
||||
}
|
||||
|
||||
func TestMergeDeviceListsPrefersConfigured(t *testing.T) {
|
||||
scanned := []*DeviceInfo{
|
||||
{Name: "/dev/sda", Type: "sat", InfoName: "scan-info", Protocol: "ATA"},
|
||||
{Name: "/dev/nvme0", Type: "nvme"},
|
||||
}
|
||||
|
||||
configured := []*DeviceInfo{
|
||||
{Name: "/dev/sda", Type: "sat-override"},
|
||||
{Name: "/dev/sdb", Type: "sat"},
|
||||
}
|
||||
|
||||
merged := mergeDeviceLists(nil, scanned, configured)
|
||||
require.Len(t, merged, 3)
|
||||
|
||||
byName := make(map[string]*DeviceInfo, len(merged))
|
||||
for _, dev := range merged {
|
||||
byName[dev.Name] = dev
|
||||
}
|
||||
|
||||
require.Contains(t, byName, "/dev/sda")
|
||||
assert.Equal(t, "sat-override", byName["/dev/sda"].Type, "configured type should override scanned type")
|
||||
assert.Equal(t, "scan-info", byName["/dev/sda"].InfoName, "scan metadata should be preserved when config does not provide it")
|
||||
|
||||
require.Contains(t, byName, "/dev/nvme0")
|
||||
assert.Equal(t, "nvme", byName["/dev/nvme0"].Type)
|
||||
|
||||
require.Contains(t, byName, "/dev/sdb")
|
||||
assert.Equal(t, "sat", byName["/dev/sdb"].Type)
|
||||
}
|
||||
|
||||
func TestMergeDeviceListsPreservesVerification(t *testing.T) {
|
||||
existing := []*DeviceInfo{
|
||||
{Name: "/dev/sda", Type: "sat+megaraid", parserType: "sat", typeVerified: true},
|
||||
}
|
||||
|
||||
scanned := []*DeviceInfo{
|
||||
{Name: "/dev/sda", Type: "nvme"},
|
||||
}
|
||||
|
||||
merged := mergeDeviceLists(existing, scanned, nil)
|
||||
require.Len(t, merged, 1)
|
||||
|
||||
device := merged[0]
|
||||
assert.True(t, device.typeVerified)
|
||||
assert.Equal(t, "sat", device.parserType)
|
||||
assert.Equal(t, "sat+megaraid", device.Type)
|
||||
}
|
||||
|
||||
func TestMergeDeviceListsUpdatesTypeWhenUnverified(t *testing.T) {
|
||||
existing := []*DeviceInfo{
|
||||
{Name: "/dev/sda", Type: "sat", parserType: "sat", typeVerified: false},
|
||||
}
|
||||
|
||||
scanned := []*DeviceInfo{
|
||||
{Name: "/dev/sda", Type: "nvme"},
|
||||
}
|
||||
|
||||
merged := mergeDeviceLists(existing, scanned, nil)
|
||||
require.Len(t, merged, 1)
|
||||
|
||||
device := merged[0]
|
||||
assert.False(t, device.typeVerified)
|
||||
assert.Equal(t, "nvme", device.Type)
|
||||
assert.Equal(t, "", device.parserType)
|
||||
}
|
||||
|
||||
func TestMergeDeviceListsHandlesDevicesWithSameNameAndDifferentTypes(t *testing.T) {
|
||||
// There are use cases where the same device name is re-used,
|
||||
// for example, a RAID controller with multiple drives.
|
||||
scanned := []*DeviceInfo{
|
||||
{Name: "/dev/sda", Type: "megaraid,0"},
|
||||
{Name: "/dev/sda", Type: "megaraid,1"},
|
||||
{Name: "/dev/sda", Type: "megaraid,2"},
|
||||
}
|
||||
|
||||
merged := mergeDeviceLists(nil, scanned, nil)
|
||||
require.Len(t, merged, 3, "should have 3 separate devices for RAID controller")
|
||||
|
||||
byKey := make(map[string]*DeviceInfo, len(merged))
|
||||
for _, dev := range merged {
|
||||
key := dev.Name + "|" + dev.Type
|
||||
byKey[key] = dev
|
||||
}
|
||||
|
||||
assert.Contains(t, byKey, "/dev/sda|megaraid,0")
|
||||
assert.Contains(t, byKey, "/dev/sda|megaraid,1")
|
||||
assert.Contains(t, byKey, "/dev/sda|megaraid,2")
|
||||
}
|
||||
|
||||
func TestMergeDeviceListsHandlesMixedRAIDAndRegular(t *testing.T) {
|
||||
// Test mixing RAID drives with regular devices
|
||||
scanned := []*DeviceInfo{
|
||||
{Name: "/dev/sda", Type: "megaraid,0"},
|
||||
{Name: "/dev/sda", Type: "megaraid,1"},
|
||||
{Name: "/dev/sdb", Type: "sat"},
|
||||
{Name: "/dev/nvme0", Type: "nvme"},
|
||||
}
|
||||
|
||||
merged := mergeDeviceLists(nil, scanned, nil)
|
||||
require.Len(t, merged, 4, "should have 4 separate devices")
|
||||
|
||||
byKey := make(map[string]*DeviceInfo, len(merged))
|
||||
for _, dev := range merged {
|
||||
key := dev.Name + "|" + dev.Type
|
||||
byKey[key] = dev
|
||||
}
|
||||
|
||||
assert.Contains(t, byKey, "/dev/sda|megaraid,0")
|
||||
assert.Contains(t, byKey, "/dev/sda|megaraid,1")
|
||||
assert.Contains(t, byKey, "/dev/sdb|sat")
|
||||
assert.Contains(t, byKey, "/dev/nvme0|nvme")
|
||||
}
|
||||
|
||||
func TestUpdateSmartDevicesPreservesRAIDDrives(t *testing.T) {
|
||||
// Test that updateSmartDevices correctly validates RAID drives using composite keys
|
||||
sm := &SmartManager{
|
||||
SmartDevices: []*DeviceInfo{
|
||||
{Name: "/dev/sda", Type: "megaraid,0"},
|
||||
{Name: "/dev/sda", Type: "megaraid,1"},
|
||||
},
|
||||
SmartDataMap: map[string]*smart.SmartData{
|
||||
"serial-0": {
|
||||
DiskName: "/dev/sda",
|
||||
DiskType: "megaraid,0",
|
||||
SerialNumber: "serial-0",
|
||||
},
|
||||
"serial-1": {
|
||||
DiskName: "/dev/sda",
|
||||
DiskType: "megaraid,1",
|
||||
SerialNumber: "serial-1",
|
||||
},
|
||||
"serial-stale": {
|
||||
DiskName: "/dev/sda",
|
||||
DiskType: "megaraid,2",
|
||||
SerialNumber: "serial-stale",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
sm.updateSmartDevices(sm.SmartDevices)
|
||||
|
||||
// serial-0 and serial-1 should be preserved (matching devices exist)
|
||||
assert.Contains(t, sm.SmartDataMap, "serial-0")
|
||||
assert.Contains(t, sm.SmartDataMap, "serial-1")
|
||||
// serial-stale should be removed (no matching device)
|
||||
assert.NotContains(t, sm.SmartDataMap, "serial-stale")
|
||||
}
|
||||
|
||||
func TestParseSmartOutputMarksVerified(t *testing.T) {
|
||||
fixturePath := filepath.Join("test-data", "smart", "nvme0.json")
|
||||
data, err := os.ReadFile(fixturePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
sm := &SmartManager{SmartDataMap: make(map[string]*smart.SmartData)}
|
||||
device := &DeviceInfo{Name: "/dev/nvme0"}
|
||||
|
||||
require.True(t, sm.parseSmartOutput(device, data))
|
||||
assert.Equal(t, "nvme", device.Type)
|
||||
assert.Equal(t, "nvme", device.parserType)
|
||||
assert.True(t, device.typeVerified)
|
||||
}
|
||||
|
||||
func TestParseSmartOutputKeepsCustomType(t *testing.T) {
|
||||
fixturePath := filepath.Join("test-data", "smart", "sda.json")
|
||||
data, err := os.ReadFile(fixturePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
sm := &SmartManager{SmartDataMap: make(map[string]*smart.SmartData)}
|
||||
device := &DeviceInfo{Name: "/dev/sda", Type: "sat+megaraid"}
|
||||
|
||||
require.True(t, sm.parseSmartOutput(device, data))
|
||||
assert.Equal(t, "sat+megaraid", device.Type)
|
||||
assert.Equal(t, "sat", device.parserType)
|
||||
assert.True(t, device.typeVerified)
|
||||
}
|
||||
|
||||
func TestParseSmartOutputResetsVerificationOnFailure(t *testing.T) {
|
||||
sm := &SmartManager{SmartDataMap: make(map[string]*smart.SmartData)}
|
||||
device := &DeviceInfo{Name: "/dev/sda", Type: "sat", parserType: "sat", typeVerified: true}
|
||||
|
||||
assert.False(t, sm.parseSmartOutput(device, []byte("not json")))
|
||||
assert.False(t, device.typeVerified)
|
||||
assert.Equal(t, "sat", device.parserType)
|
||||
}
|
||||
|
||||
func assertAttrValue(t *testing.T, attributes []*smart.SmartAttribute, name string, expected uint64) {
|
||||
t.Helper()
|
||||
attr := findAttr(attributes, name)
|
||||
if attr == nil {
|
||||
t.Fatalf("expected attribute %s to be present", name)
|
||||
}
|
||||
if attr.RawValue != expected {
|
||||
t.Fatalf("unexpected attribute %s value: got %d, want %d", name, attr.RawValue, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func findAttr(attributes []*smart.SmartAttribute, name string) *smart.SmartAttribute {
|
||||
for _, attr := range attributes {
|
||||
if attr != nil && attr.Name == name {
|
||||
return attr
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestIsVirtualDevice(t *testing.T) {
|
||||
sm := &SmartManager{}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
vendor string
|
||||
product string
|
||||
model string
|
||||
expected bool
|
||||
}{
|
||||
{"regular drive", "SEAGATE", "ST1000DM003", "ST1000DM003-1CH162", false},
|
||||
{"qemu virtual", "QEMU", "QEMU HARDDISK", "QEMU HARDDISK", true},
|
||||
{"virtualbox virtual", "VBOX", "HARDDISK", "VBOX HARDDISK", true},
|
||||
{"vmware virtual", "VMWARE", "Virtual disk", "VMWARE Virtual disk", true},
|
||||
{"virtual in model", "ATA", "VIRTUAL", "VIRTUAL DISK", true},
|
||||
{"iet virtual", "IET", "VIRTUAL-DISK", "VIRTUAL-DISK", true},
|
||||
{"hyper-v virtual", "MSFT", "VIRTUAL HD", "VIRTUAL HD", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
data := &smart.SmartInfoForSata{
|
||||
ScsiVendor: tt.vendor,
|
||||
ScsiProduct: tt.product,
|
||||
ModelName: tt.model,
|
||||
}
|
||||
result := sm.isVirtualDevice(data)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsVirtualDeviceNvme(t *testing.T) {
|
||||
sm := &SmartManager{}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
model string
|
||||
expected bool
|
||||
}{
|
||||
{"regular nvme", "Samsung SSD 970 EVO Plus 1TB", false},
|
||||
{"qemu virtual", "QEMU NVMe Ctrl", true},
|
||||
{"virtualbox virtual", "VBOX NVMe", true},
|
||||
{"vmware virtual", "VMWARE NVMe", true},
|
||||
{"virtual in model", "Virtual NVMe Device", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
data := &smart.SmartInfoForNvme{
|
||||
ModelName: tt.model,
|
||||
}
|
||||
result := sm.isVirtualDeviceNvme(data)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsVirtualDeviceScsi(t *testing.T) {
|
||||
sm := &SmartManager{}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
vendor string
|
||||
product string
|
||||
model string
|
||||
expected bool
|
||||
}{
|
||||
{"regular scsi", "SEAGATE", "ST1000DM003", "ST1000DM003-1CH162", false},
|
||||
{"qemu virtual", "QEMU", "QEMU HARDDISK", "QEMU HARDDISK", true},
|
||||
{"virtualbox virtual", "VBOX", "HARDDISK", "VBOX HARDDISK", true},
|
||||
{"vmware virtual", "VMWARE", "Virtual disk", "VMWARE Virtual disk", true},
|
||||
{"virtual in model", "ATA", "VIRTUAL", "VIRTUAL DISK", true},
|
||||
{"iet virtual", "IET", "VIRTUAL-DISK", "VIRTUAL-DISK", true},
|
||||
{"hyper-v virtual", "MSFT", "VIRTUAL HD", "VIRTUAL HD", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
data := &smart.SmartInfoForScsi{
|
||||
ScsiVendor: tt.vendor,
|
||||
ScsiProduct: tt.product,
|
||||
ScsiModelName: tt.model,
|
||||
}
|
||||
result := sm.isVirtualDeviceScsi(data)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRefreshExcludedDevices(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
envValue string
|
||||
expectedDevs map[string]struct{}
|
||||
}{
|
||||
{
|
||||
name: "empty env",
|
||||
envValue: "",
|
||||
expectedDevs: map[string]struct{}{},
|
||||
},
|
||||
{
|
||||
name: "single device",
|
||||
envValue: "/dev/sda",
|
||||
expectedDevs: map[string]struct{}{
|
||||
"/dev/sda": {},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple devices",
|
||||
envValue: "/dev/sda,/dev/sdb,/dev/nvme0",
|
||||
expectedDevs: map[string]struct{}{
|
||||
"/dev/sda": {},
|
||||
"/dev/sdb": {},
|
||||
"/dev/nvme0": {},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "devices with whitespace",
|
||||
envValue: " /dev/sda , /dev/sdb , /dev/nvme0 ",
|
||||
expectedDevs: map[string]struct{}{
|
||||
"/dev/sda": {},
|
||||
"/dev/sdb": {},
|
||||
"/dev/nvme0": {},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "duplicate devices",
|
||||
envValue: "/dev/sda,/dev/sdb,/dev/sda",
|
||||
expectedDevs: map[string]struct{}{
|
||||
"/dev/sda": {},
|
||||
"/dev/sdb": {},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty entries and whitespace",
|
||||
envValue: "/dev/sda,, /dev/sdb , , ",
|
||||
expectedDevs: map[string]struct{}{
|
||||
"/dev/sda": {},
|
||||
"/dev/sdb": {},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.envValue != "" {
|
||||
t.Setenv("EXCLUDE_SMART", tt.envValue)
|
||||
} else {
|
||||
// Ensure env var is not set for empty test
|
||||
os.Unsetenv("EXCLUDE_SMART")
|
||||
}
|
||||
|
||||
sm := &SmartManager{}
|
||||
sm.refreshExcludedDevices()
|
||||
|
||||
assert.Equal(t, tt.expectedDevs, sm.excludedDevices)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsExcludedDevice(t *testing.T) {
|
||||
sm := &SmartManager{
|
||||
excludedDevices: map[string]struct{}{
|
||||
"/dev/sda": {},
|
||||
"/dev/nvme0": {},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
deviceName string
|
||||
expectedBool bool
|
||||
}{
|
||||
{"excluded device sda", "/dev/sda", true},
|
||||
{"excluded device nvme0", "/dev/nvme0", true},
|
||||
{"non-excluded device sdb", "/dev/sdb", false},
|
||||
{"non-excluded device nvme1", "/dev/nvme1", false},
|
||||
{"empty device name", "", false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := sm.isExcludedDevice(tt.deviceName)
|
||||
assert.Equal(t, tt.expectedBool, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterExcludedDevices(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
excludedDevs map[string]struct{}
|
||||
inputDevices []*DeviceInfo
|
||||
expectedDevs []*DeviceInfo
|
||||
expectedLength int
|
||||
}{
|
||||
{
|
||||
name: "no exclusions",
|
||||
excludedDevs: map[string]struct{}{},
|
||||
inputDevices: []*DeviceInfo{
|
||||
{Name: "/dev/sda"},
|
||||
{Name: "/dev/sdb"},
|
||||
{Name: "/dev/nvme0"},
|
||||
},
|
||||
expectedDevs: []*DeviceInfo{
|
||||
{Name: "/dev/sda"},
|
||||
{Name: "/dev/sdb"},
|
||||
{Name: "/dev/nvme0"},
|
||||
},
|
||||
expectedLength: 3,
|
||||
},
|
||||
{
|
||||
name: "some devices excluded",
|
||||
excludedDevs: map[string]struct{}{
|
||||
"/dev/sda": {},
|
||||
"/dev/nvme0": {},
|
||||
},
|
||||
inputDevices: []*DeviceInfo{
|
||||
{Name: "/dev/sda"},
|
||||
{Name: "/dev/sdb"},
|
||||
{Name: "/dev/nvme0"},
|
||||
{Name: "/dev/nvme1"},
|
||||
},
|
||||
expectedDevs: []*DeviceInfo{
|
||||
{Name: "/dev/sdb"},
|
||||
{Name: "/dev/nvme1"},
|
||||
},
|
||||
expectedLength: 2,
|
||||
},
|
||||
{
|
||||
name: "all devices excluded",
|
||||
excludedDevs: map[string]struct{}{
|
||||
"/dev/sda": {},
|
||||
"/dev/sdb": {},
|
||||
},
|
||||
inputDevices: []*DeviceInfo{
|
||||
{Name: "/dev/sda"},
|
||||
{Name: "/dev/sdb"},
|
||||
},
|
||||
expectedDevs: []*DeviceInfo{},
|
||||
expectedLength: 0,
|
||||
},
|
||||
{
|
||||
name: "nil devices",
|
||||
excludedDevs: map[string]struct{}{},
|
||||
inputDevices: nil,
|
||||
expectedDevs: []*DeviceInfo{},
|
||||
expectedLength: 0,
|
||||
},
|
||||
{
|
||||
name: "filter nil and empty name devices",
|
||||
excludedDevs: map[string]struct{}{
|
||||
"/dev/sda": {},
|
||||
},
|
||||
inputDevices: []*DeviceInfo{
|
||||
{Name: "/dev/sda"},
|
||||
nil,
|
||||
{Name: ""},
|
||||
{Name: "/dev/sdb"},
|
||||
},
|
||||
expectedDevs: []*DeviceInfo{
|
||||
{Name: "/dev/sdb"},
|
||||
},
|
||||
expectedLength: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
sm := &SmartManager{
|
||||
excludedDevices: tt.excludedDevs,
|
||||
}
|
||||
|
||||
result := sm.filterExcludedDevices(tt.inputDevices)
|
||||
|
||||
assert.Len(t, result, tt.expectedLength)
|
||||
assert.Equal(t, tt.expectedDevs, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsNvmeControllerPath(t *testing.T) {
|
||||
tests := []struct {
|
||||
path string
|
||||
expected bool
|
||||
}{
|
||||
// Controller paths (should return true)
|
||||
{"/dev/nvme0", true},
|
||||
{"/dev/nvme1", true},
|
||||
{"/dev/nvme10", true},
|
||||
{"nvme0", true},
|
||||
|
||||
// Namespace paths (should return false)
|
||||
{"/dev/nvme0n1", false},
|
||||
{"/dev/nvme1n1", false},
|
||||
{"/dev/nvme0n1p1", false},
|
||||
{"nvme0n1", false},
|
||||
|
||||
// Non-NVMe paths (should return false)
|
||||
{"/dev/sda", false},
|
||||
{"/dev/sda1", false},
|
||||
{"/dev/hda", false},
|
||||
{"", false},
|
||||
{"/dev/nvme", false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.path, func(t *testing.T) {
|
||||
result := isNvmeControllerPath(tt.path)
|
||||
assert.Equal(t, tt.expected, result, "path: %s", tt.path)
|
||||
})
|
||||
}
|
||||
}
|
||||
40
agent/smart_windows.go
Normal file
40
agent/smart_windows.go
Normal file
@@ -0,0 +1,40 @@
|
||||
//go:build windows
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
)
|
||||
|
||||
//go:embed smartmontools/smartctl.exe
|
||||
var embeddedSmartctl []byte
|
||||
|
||||
var (
|
||||
smartctlOnce sync.Once
|
||||
smartctlPath string
|
||||
smartctlErr error
|
||||
)
|
||||
|
||||
func ensureEmbeddedSmartctl() (string, error) {
|
||||
smartctlOnce.Do(func() {
|
||||
destDir := filepath.Join(os.TempDir(), "beszel", "smartmontools")
|
||||
if err := os.MkdirAll(destDir, 0o755); err != nil {
|
||||
smartctlErr = fmt.Errorf("failed to create smartctl directory: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
destPath := filepath.Join(destDir, "smartctl.exe")
|
||||
if err := os.WriteFile(destPath, embeddedSmartctl, 0o755); err != nil {
|
||||
smartctlErr = fmt.Errorf("failed to write embedded smartctl: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
smartctlPath = destPath
|
||||
})
|
||||
|
||||
return smartctlPath, smartctlErr
|
||||
}
|
||||
135
agent/system.go
135
agent/system.go
@@ -2,15 +2,18 @@ package agent
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel"
|
||||
"github.com/henrygd/beszel/agent/battery"
|
||||
"github.com/henrygd/beszel/internal/entities/container"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
|
||||
"github.com/shirou/gopsutil/v4/cpu"
|
||||
@@ -27,41 +30,79 @@ type prevDisk struct {
|
||||
}
|
||||
|
||||
// Sets initial / non-changing values about the host system
|
||||
func (a *Agent) initializeSystemInfo() {
|
||||
func (a *Agent) refreshSystemDetails() {
|
||||
a.systemInfo.AgentVersion = beszel.Version
|
||||
a.systemInfo.Hostname, _ = os.Hostname()
|
||||
|
||||
// get host info from Docker if available
|
||||
var hostInfo container.HostInfo
|
||||
|
||||
if a.dockerManager != nil {
|
||||
a.systemDetails.Podman = a.dockerManager.IsPodman()
|
||||
hostInfo, _ = a.dockerManager.GetHostInfo()
|
||||
}
|
||||
|
||||
a.systemDetails.Hostname, _ = os.Hostname()
|
||||
if arch, err := host.KernelArch(); err == nil {
|
||||
a.systemDetails.Arch = arch
|
||||
} else {
|
||||
a.systemDetails.Arch = runtime.GOARCH
|
||||
}
|
||||
|
||||
platform, _, version, _ := host.PlatformInformation()
|
||||
|
||||
if platform == "darwin" {
|
||||
a.systemInfo.KernelVersion = version
|
||||
a.systemInfo.Os = system.Darwin
|
||||
a.systemDetails.Os = system.Darwin
|
||||
a.systemDetails.OsName = fmt.Sprintf("macOS %s", version)
|
||||
} else if strings.Contains(platform, "indows") {
|
||||
a.systemInfo.KernelVersion = fmt.Sprintf("%s %s", strings.Replace(platform, "Microsoft ", "", 1), version)
|
||||
a.systemInfo.Os = system.Windows
|
||||
a.systemDetails.Os = system.Windows
|
||||
a.systemDetails.OsName = strings.Replace(platform, "Microsoft ", "", 1)
|
||||
a.systemDetails.Kernel = version
|
||||
} else if platform == "freebsd" {
|
||||
a.systemInfo.Os = system.Freebsd
|
||||
a.systemInfo.KernelVersion = version
|
||||
a.systemDetails.Os = system.Freebsd
|
||||
a.systemDetails.Kernel, _ = host.KernelVersion()
|
||||
if prettyName, err := getOsPrettyName(); err == nil {
|
||||
a.systemDetails.OsName = prettyName
|
||||
} else {
|
||||
a.systemDetails.OsName = "FreeBSD"
|
||||
}
|
||||
} else {
|
||||
a.systemInfo.Os = system.Linux
|
||||
}
|
||||
|
||||
if a.systemInfo.KernelVersion == "" {
|
||||
a.systemInfo.KernelVersion, _ = host.KernelVersion()
|
||||
a.systemDetails.Os = system.Linux
|
||||
a.systemDetails.OsName = hostInfo.OperatingSystem
|
||||
if a.systemDetails.OsName == "" {
|
||||
if prettyName, err := getOsPrettyName(); err == nil {
|
||||
a.systemDetails.OsName = prettyName
|
||||
} else {
|
||||
a.systemDetails.OsName = platform
|
||||
}
|
||||
}
|
||||
a.systemDetails.Kernel = hostInfo.KernelVersion
|
||||
if a.systemDetails.Kernel == "" {
|
||||
a.systemDetails.Kernel, _ = host.KernelVersion()
|
||||
}
|
||||
}
|
||||
|
||||
// cpu model
|
||||
if info, err := cpu.Info(); err == nil && len(info) > 0 {
|
||||
a.systemInfo.CpuModel = info[0].ModelName
|
||||
a.systemDetails.CpuModel = info[0].ModelName
|
||||
}
|
||||
// cores / threads
|
||||
a.systemInfo.Cores, _ = cpu.Counts(false)
|
||||
if threads, err := cpu.Counts(true); err == nil {
|
||||
if threads > 0 && threads < a.systemInfo.Cores {
|
||||
// in lxc logical cores reflects container limits, so use that as cores if lower
|
||||
a.systemInfo.Cores = threads
|
||||
} else {
|
||||
a.systemInfo.Threads = threads
|
||||
cores, _ := cpu.Counts(false)
|
||||
threads := hostInfo.NCPU
|
||||
if threads == 0 {
|
||||
threads, _ = cpu.Counts(true)
|
||||
}
|
||||
// in lxc, logical cores reflects container limits, so use that as cores if lower
|
||||
if threads > 0 && threads < cores {
|
||||
cores = threads
|
||||
}
|
||||
a.systemDetails.Cores = cores
|
||||
a.systemDetails.Threads = threads
|
||||
|
||||
// total memory
|
||||
a.systemDetails.MemoryTotal = hostInfo.MemTotal
|
||||
if a.systemDetails.MemoryTotal == 0 {
|
||||
if v, err := mem.VirtualMemory(); err == nil {
|
||||
a.systemDetails.MemoryTotal = v.Total
|
||||
}
|
||||
}
|
||||
|
||||
@@ -83,12 +124,24 @@ func (a *Agent) getSystemStats(cacheTimeMs uint16) system.Stats {
|
||||
systemStats.Battery[1] = batteryState
|
||||
}
|
||||
|
||||
// cpu percent
|
||||
cpuPercent, err := getCpuPercent(cacheTimeMs)
|
||||
// cpu metrics
|
||||
cpuMetrics, err := getCpuMetrics(cacheTimeMs)
|
||||
if err == nil {
|
||||
systemStats.Cpu = twoDecimals(cpuPercent)
|
||||
systemStats.Cpu = twoDecimals(cpuMetrics.Total)
|
||||
systemStats.CpuBreakdown = []float64{
|
||||
twoDecimals(cpuMetrics.User),
|
||||
twoDecimals(cpuMetrics.System),
|
||||
twoDecimals(cpuMetrics.Iowait),
|
||||
twoDecimals(cpuMetrics.Steal),
|
||||
twoDecimals(cpuMetrics.Idle),
|
||||
}
|
||||
} else {
|
||||
slog.Error("Error getting cpu percent", "err", err)
|
||||
slog.Error("Error getting cpu metrics", "err", err)
|
||||
}
|
||||
|
||||
// per-core cpu usage
|
||||
if perCoreUsage, err := getPerCoreCpuUsage(cacheTimeMs); err == nil {
|
||||
systemStats.CpuCoresUsage = perCoreUsage
|
||||
}
|
||||
|
||||
// load average
|
||||
@@ -183,21 +236,16 @@ func (a *Agent) getSystemStats(cacheTimeMs uint16) system.Stats {
|
||||
}
|
||||
}
|
||||
|
||||
// update base system info
|
||||
// update system info
|
||||
a.systemInfo.ConnectionType = a.connectionManager.ConnectionType
|
||||
a.systemInfo.Cpu = systemStats.Cpu
|
||||
a.systemInfo.LoadAvg = systemStats.LoadAvg
|
||||
// TODO: remove these in future release in favor of load avg array
|
||||
a.systemInfo.LoadAvg1 = systemStats.LoadAvg[0]
|
||||
a.systemInfo.LoadAvg5 = systemStats.LoadAvg[1]
|
||||
a.systemInfo.LoadAvg15 = systemStats.LoadAvg[2]
|
||||
a.systemInfo.MemPct = systemStats.MemPct
|
||||
a.systemInfo.DiskPct = systemStats.DiskPct
|
||||
a.systemInfo.Battery = systemStats.Battery
|
||||
a.systemInfo.Uptime, _ = host.Uptime()
|
||||
// TODO: in future release, remove MB bandwidth values in favor of bytes
|
||||
a.systemInfo.Bandwidth = twoDecimals(systemStats.NetworkSent + systemStats.NetworkRecv)
|
||||
a.systemInfo.BandwidthBytes = systemStats.Bandwidth[0] + systemStats.Bandwidth[1]
|
||||
slog.Debug("sysinfo", "data", a.systemInfo)
|
||||
a.systemInfo.Threads = a.systemDetails.Threads
|
||||
|
||||
return systemStats
|
||||
}
|
||||
@@ -227,3 +275,24 @@ func getARCSize() (uint64, error) {
|
||||
|
||||
return 0, fmt.Errorf("failed to parse size field")
|
||||
}
|
||||
|
||||
// getOsPrettyName attempts to get the pretty OS name from /etc/os-release on Linux systems
|
||||
func getOsPrettyName() (string, error) {
|
||||
file, err := os.Open("/etc/os-release")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if after, ok := strings.CutPrefix(line, "PRETTY_NAME="); ok {
|
||||
value := after
|
||||
value = strings.Trim(value, `"`)
|
||||
return value, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", errors.New("pretty name not found")
|
||||
}
|
||||
|
||||
313
agent/systemd.go
Normal file
313
agent/systemd.go
Normal file
@@ -0,0 +1,313 @@
|
||||
//go:build linux
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"log/slog"
|
||||
"maps"
|
||||
"math"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-systemd/v22/dbus"
|
||||
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||
)
|
||||
|
||||
var errNoActiveTime = errors.New("no active time")
|
||||
|
||||
// systemdManager manages the collection of systemd service statistics.
|
||||
type systemdManager struct {
|
||||
sync.Mutex
|
||||
serviceStatsMap map[string]*systemd.Service
|
||||
isRunning bool
|
||||
hasFreshStats bool
|
||||
patterns []string
|
||||
}
|
||||
|
||||
// isSystemdAvailable checks if systemd is used on the system to avoid unnecessary connection attempts (#1548)
|
||||
func isSystemdAvailable() bool {
|
||||
paths := []string{
|
||||
"/run/systemd/system",
|
||||
"/run/dbus/system_bus_socket",
|
||||
"/var/run/dbus/system_bus_socket",
|
||||
}
|
||||
for _, path := range paths {
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if data, err := os.ReadFile("/proc/1/comm"); err == nil {
|
||||
return strings.TrimSpace(string(data)) == "systemd"
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// newSystemdManager creates a new systemdManager.
|
||||
func newSystemdManager() (*systemdManager, error) {
|
||||
if skipSystemd, _ := GetEnv("SKIP_SYSTEMD"); skipSystemd == "true" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Check if systemd is available on the system before attempting connection
|
||||
if !isSystemdAvailable() {
|
||||
slog.Debug("Systemd not available")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
conn, err := dbus.NewSystemConnectionContext(context.Background())
|
||||
if err != nil {
|
||||
slog.Debug("Error connecting to systemd", "err", err, "ref", "https://beszel.dev/guide/systemd")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
manager := &systemdManager{
|
||||
serviceStatsMap: make(map[string]*systemd.Service),
|
||||
patterns: getServicePatterns(),
|
||||
}
|
||||
|
||||
manager.startWorker(conn)
|
||||
|
||||
return manager, nil
|
||||
}
|
||||
|
||||
func (sm *systemdManager) startWorker(conn *dbus.Conn) {
|
||||
if sm.isRunning {
|
||||
return
|
||||
}
|
||||
sm.isRunning = true
|
||||
// prime the service stats map with the current services
|
||||
_ = sm.getServiceStats(conn, true)
|
||||
// update the services every 10 minutes
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(time.Minute * 10)
|
||||
_ = sm.getServiceStats(nil, true)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// getServiceStatsCount returns the number of systemd services.
|
||||
func (sm *systemdManager) getServiceStatsCount() int {
|
||||
return len(sm.serviceStatsMap)
|
||||
}
|
||||
|
||||
// getFailedServiceCount returns the number of systemd services in a failed state.
|
||||
func (sm *systemdManager) getFailedServiceCount() uint16 {
|
||||
sm.Lock()
|
||||
defer sm.Unlock()
|
||||
count := uint16(0)
|
||||
for _, service := range sm.serviceStatsMap {
|
||||
if service.State == systemd.StatusFailed {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// getServiceStats collects statistics for all running systemd services.
|
||||
func (sm *systemdManager) getServiceStats(conn *dbus.Conn, refresh bool) []*systemd.Service {
|
||||
// start := time.Now()
|
||||
// defer func() {
|
||||
// slog.Info("systemdManager.getServiceStats", "duration", time.Since(start))
|
||||
// }()
|
||||
|
||||
var services []*systemd.Service
|
||||
var err error
|
||||
|
||||
if !refresh {
|
||||
// return nil
|
||||
sm.Lock()
|
||||
defer sm.Unlock()
|
||||
for _, service := range sm.serviceStatsMap {
|
||||
services = append(services, service)
|
||||
}
|
||||
sm.hasFreshStats = false
|
||||
return services
|
||||
}
|
||||
|
||||
if conn == nil || !conn.Connected() {
|
||||
conn, err = dbus.NewSystemConnectionContext(context.Background())
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
defer conn.Close()
|
||||
}
|
||||
|
||||
units, err := conn.ListUnitsByPatternsContext(context.Background(), []string{"loaded"}, sm.patterns)
|
||||
if err != nil {
|
||||
slog.Error("Error listing systemd service units", "err", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Track which units are currently present to remove stale entries
|
||||
currentUnits := make(map[string]struct{}, len(units))
|
||||
|
||||
for _, unit := range units {
|
||||
currentUnits[unit.Name] = struct{}{}
|
||||
service, err := sm.updateServiceStats(conn, unit)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
services = append(services, service)
|
||||
}
|
||||
|
||||
// Remove services that no longer exist in systemd
|
||||
sm.Lock()
|
||||
for unitName := range sm.serviceStatsMap {
|
||||
if _, exists := currentUnits[unitName]; !exists {
|
||||
delete(sm.serviceStatsMap, unitName)
|
||||
}
|
||||
}
|
||||
sm.Unlock()
|
||||
|
||||
sm.hasFreshStats = true
|
||||
return services
|
||||
}
|
||||
|
||||
// updateServiceStats updates the statistics for a single systemd service.
|
||||
func (sm *systemdManager) updateServiceStats(conn *dbus.Conn, unit dbus.UnitStatus) (*systemd.Service, error) {
|
||||
sm.Lock()
|
||||
defer sm.Unlock()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// if service has never been active (no active since time), skip it
|
||||
if activeEnterTsProp, err := conn.GetUnitTypePropertyContext(ctx, unit.Name, "Unit", "ActiveEnterTimestamp"); err == nil {
|
||||
if ts, ok := activeEnterTsProp.Value.Value().(uint64); !ok || ts == 0 || ts == math.MaxUint64 {
|
||||
return nil, errNoActiveTime
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
service, serviceExists := sm.serviceStatsMap[unit.Name]
|
||||
if !serviceExists {
|
||||
service = &systemd.Service{Name: unescapeServiceName(strings.TrimSuffix(unit.Name, ".service"))}
|
||||
sm.serviceStatsMap[unit.Name] = service
|
||||
}
|
||||
|
||||
memPeak := service.MemPeak
|
||||
if memPeakProp, err := conn.GetUnitTypePropertyContext(ctx, unit.Name, "Service", "MemoryPeak"); err == nil {
|
||||
// If memPeak is MaxUint64 the api is saying it's not available
|
||||
if v, ok := memPeakProp.Value.Value().(uint64); ok && v != math.MaxUint64 {
|
||||
memPeak = v
|
||||
}
|
||||
}
|
||||
|
||||
var memUsage uint64
|
||||
if memProp, err := conn.GetUnitTypePropertyContext(ctx, unit.Name, "Service", "MemoryCurrent"); err == nil {
|
||||
// If memUsage is MaxUint64 the api is saying it's not available
|
||||
if v, ok := memProp.Value.Value().(uint64); ok && v != math.MaxUint64 {
|
||||
memUsage = v
|
||||
}
|
||||
}
|
||||
|
||||
service.State = systemd.ParseServiceStatus(unit.ActiveState)
|
||||
service.Sub = systemd.ParseServiceSubState(unit.SubState)
|
||||
|
||||
// some systems always return 0 for mem peak, so we should update the peak if the current usage is greater
|
||||
if memUsage > memPeak {
|
||||
memPeak = memUsage
|
||||
}
|
||||
|
||||
var cpuUsage uint64
|
||||
if cpuProp, err := conn.GetUnitTypePropertyContext(ctx, unit.Name, "Service", "CPUUsageNSec"); err == nil {
|
||||
if v, ok := cpuProp.Value.Value().(uint64); ok {
|
||||
cpuUsage = v
|
||||
}
|
||||
}
|
||||
|
||||
service.Mem = memUsage
|
||||
if memPeak > service.MemPeak {
|
||||
service.MemPeak = memPeak
|
||||
}
|
||||
service.UpdateCPUPercent(cpuUsage)
|
||||
|
||||
return service, nil
|
||||
}
|
||||
|
||||
// getServiceDetails collects extended information for a specific systemd service.
|
||||
func (sm *systemdManager) getServiceDetails(serviceName string) (systemd.ServiceDetails, error) {
|
||||
conn, err := dbus.NewSystemConnectionContext(context.Background())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
unitName := serviceName
|
||||
if !strings.HasSuffix(unitName, ".service") {
|
||||
unitName += ".service"
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
props, err := conn.GetUnitPropertiesContext(ctx, unitName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Start with all unit properties
|
||||
details := make(systemd.ServiceDetails)
|
||||
maps.Copy(details, props)
|
||||
|
||||
// // Add service-specific properties
|
||||
servicePropNames := []string{
|
||||
"MainPID", "ExecMainPID", "TasksCurrent", "TasksMax",
|
||||
"MemoryCurrent", "MemoryPeak", "MemoryLimit", "CPUUsageNSec",
|
||||
"NRestarts", "ExecMainStartTimestampRealtime", "Result",
|
||||
}
|
||||
|
||||
for _, propName := range servicePropNames {
|
||||
if variant, err := conn.GetUnitTypePropertyContext(ctx, unitName, "Service", propName); err == nil {
|
||||
value := variant.Value.Value()
|
||||
// Check if the value is MaxUint64, which indicates unlimited/infinite
|
||||
if uint64Value, ok := value.(uint64); ok && uint64Value == math.MaxUint64 {
|
||||
// Set to nil to indicate unlimited - frontend will handle this appropriately
|
||||
details[propName] = nil
|
||||
} else {
|
||||
details[propName] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return details, nil
|
||||
}
|
||||
|
||||
// unescapeServiceName unescapes systemd service names that contain C-style escape sequences like \x2d
|
||||
func unescapeServiceName(name string) string {
|
||||
if !strings.Contains(name, "\\x") {
|
||||
return name
|
||||
}
|
||||
unescaped, err := strconv.Unquote("\"" + name + "\"")
|
||||
if err != nil {
|
||||
return name
|
||||
}
|
||||
return unescaped
|
||||
}
|
||||
|
||||
// getServicePatterns returns the list of service patterns to match.
|
||||
// It reads from the SERVICE_PATTERNS environment variable if set,
|
||||
// otherwise defaults to "*service".
|
||||
func getServicePatterns() []string {
|
||||
patterns := []string{}
|
||||
if envPatterns, _ := GetEnv("SERVICE_PATTERNS"); envPatterns != "" {
|
||||
for pattern := range strings.SplitSeq(envPatterns, ",") {
|
||||
pattern = strings.TrimSpace(pattern)
|
||||
if pattern == "" {
|
||||
continue
|
||||
}
|
||||
if !strings.HasSuffix(pattern, ".service") {
|
||||
pattern += ".service"
|
||||
}
|
||||
patterns = append(patterns, pattern)
|
||||
}
|
||||
}
|
||||
if len(patterns) == 0 {
|
||||
patterns = []string{"*.service"}
|
||||
}
|
||||
return patterns
|
||||
}
|
||||
38
agent/systemd_nonlinux.go
Normal file
38
agent/systemd_nonlinux.go
Normal file
@@ -0,0 +1,38 @@
|
||||
//go:build !linux
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||
)
|
||||
|
||||
// systemdManager manages the collection of systemd service statistics.
|
||||
type systemdManager struct {
|
||||
hasFreshStats bool
|
||||
}
|
||||
|
||||
// newSystemdManager creates a new systemdManager.
|
||||
func newSystemdManager() (*systemdManager, error) {
|
||||
return &systemdManager{}, nil
|
||||
}
|
||||
|
||||
// getServiceStats returns nil for non-linux systems.
|
||||
func (sm *systemdManager) getServiceStats(conn any, refresh bool) []*systemd.Service {
|
||||
return nil
|
||||
}
|
||||
|
||||
// getServiceStatsCount returns 0 for non-linux systems.
|
||||
func (sm *systemdManager) getServiceStatsCount() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
// getFailedServiceCount returns 0 for non-linux systems.
|
||||
func (sm *systemdManager) getFailedServiceCount() uint16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (sm *systemdManager) getServiceDetails(string) (systemd.ServiceDetails, error) {
|
||||
return nil, errors.New("systemd manager unavailable")
|
||||
}
|
||||
53
agent/systemd_nonlinux_test.go
Normal file
53
agent/systemd_nonlinux_test.go
Normal file
@@ -0,0 +1,53 @@
|
||||
//go:build !linux && testing
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewSystemdManager(t *testing.T) {
|
||||
manager, err := newSystemdManager()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, manager)
|
||||
}
|
||||
|
||||
func TestSystemdManagerGetServiceStats(t *testing.T) {
|
||||
manager, err := newSystemdManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Test with refresh = true
|
||||
result := manager.getServiceStats("any-service", true)
|
||||
assert.Nil(t, result)
|
||||
|
||||
// Test with refresh = false
|
||||
result = manager.getServiceStats("any-service", false)
|
||||
assert.Nil(t, result)
|
||||
}
|
||||
|
||||
func TestSystemdManagerGetServiceDetails(t *testing.T) {
|
||||
manager, err := newSystemdManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
result, err := manager.getServiceDetails("any-service")
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "systemd manager unavailable", err.Error())
|
||||
assert.Nil(t, result)
|
||||
|
||||
// Test with empty service name
|
||||
result, err = manager.getServiceDetails("")
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "systemd manager unavailable", err.Error())
|
||||
assert.Nil(t, result)
|
||||
}
|
||||
|
||||
func TestSystemdManagerFields(t *testing.T) {
|
||||
manager, err := newSystemdManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// The non-linux manager should be a simple struct with no special fields
|
||||
// We can't test private fields directly, but we can test the methods work
|
||||
assert.NotNil(t, manager)
|
||||
}
|
||||
188
agent/systemd_test.go
Normal file
188
agent/systemd_test.go
Normal file
@@ -0,0 +1,188 @@
|
||||
//go:build linux && testing
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestUnescapeServiceName(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{"nginx.service", "nginx.service"}, // No escaping needed
|
||||
{"test\\x2dwith\\x2ddashes.service", "test-with-dashes.service"}, // \x2d is dash
|
||||
{"service\\x20with\\x20spaces.service", "service with spaces.service"}, // \x20 is space
|
||||
{"mixed\\x2dand\\x2dnormal", "mixed-and-normal"}, // Mixed escaped and normal
|
||||
{"no-escape-here", "no-escape-here"}, // No escape sequences
|
||||
{"", ""}, // Empty string
|
||||
{"\\x2d\\x2d", "--"}, // Multiple escapes
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.input, func(t *testing.T) {
|
||||
result := unescapeServiceName(test.input)
|
||||
assert.Equal(t, test.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnescapeServiceNameInvalid(t *testing.T) {
|
||||
// Test invalid escape sequences - should return original string
|
||||
invalidInputs := []string{
|
||||
"invalid\\x", // Incomplete escape
|
||||
"invalid\\xZZ", // Invalid hex
|
||||
"invalid\\x2", // Incomplete hex
|
||||
"invalid\\xyz", // Not a valid escape
|
||||
}
|
||||
|
||||
for _, input := range invalidInputs {
|
||||
t.Run(input, func(t *testing.T) {
|
||||
result := unescapeServiceName(input)
|
||||
assert.Equal(t, input, result, "Invalid escape sequences should return original string")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsSystemdAvailable(t *testing.T) {
|
||||
// Note: This test's result will vary based on the actual system running the tests
|
||||
// On systems with systemd, it should return true
|
||||
// On systems without systemd, it should return false
|
||||
result := isSystemdAvailable()
|
||||
|
||||
// Check if either the /run/systemd/system directory exists or PID 1 is systemd
|
||||
runSystemdExists := false
|
||||
if _, err := os.Stat("/run/systemd/system"); err == nil {
|
||||
runSystemdExists = true
|
||||
}
|
||||
|
||||
pid1IsSystemd := false
|
||||
if data, err := os.ReadFile("/proc/1/comm"); err == nil {
|
||||
pid1IsSystemd = strings.TrimSpace(string(data)) == "systemd"
|
||||
}
|
||||
|
||||
expected := runSystemdExists || pid1IsSystemd
|
||||
|
||||
assert.Equal(t, expected, result, "isSystemdAvailable should correctly detect systemd presence")
|
||||
|
||||
// Log the result for informational purposes
|
||||
if result {
|
||||
t.Log("Systemd is available on this system")
|
||||
} else {
|
||||
t.Log("Systemd is not available on this system")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetServicePatterns(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
prefixedEnv string
|
||||
unprefixedEnv string
|
||||
expected []string
|
||||
cleanupEnvVars bool
|
||||
}{
|
||||
{
|
||||
name: "default when no env var set",
|
||||
prefixedEnv: "",
|
||||
unprefixedEnv: "",
|
||||
expected: []string{"*.service"},
|
||||
cleanupEnvVars: true,
|
||||
},
|
||||
{
|
||||
name: "single pattern with prefixed env",
|
||||
prefixedEnv: "nginx",
|
||||
unprefixedEnv: "",
|
||||
expected: []string{"nginx.service"},
|
||||
cleanupEnvVars: true,
|
||||
},
|
||||
{
|
||||
name: "single pattern with unprefixed env",
|
||||
prefixedEnv: "",
|
||||
unprefixedEnv: "nginx",
|
||||
expected: []string{"nginx.service"},
|
||||
cleanupEnvVars: true,
|
||||
},
|
||||
{
|
||||
name: "prefixed env takes precedence",
|
||||
prefixedEnv: "nginx",
|
||||
unprefixedEnv: "apache",
|
||||
expected: []string{"nginx.service"},
|
||||
cleanupEnvVars: true,
|
||||
},
|
||||
{
|
||||
name: "multiple patterns",
|
||||
prefixedEnv: "nginx,apache,postgresql",
|
||||
unprefixedEnv: "",
|
||||
expected: []string{"nginx.service", "apache.service", "postgresql.service"},
|
||||
cleanupEnvVars: true,
|
||||
},
|
||||
{
|
||||
name: "patterns with .service suffix",
|
||||
prefixedEnv: "nginx.service,apache.service",
|
||||
unprefixedEnv: "",
|
||||
expected: []string{"nginx.service", "apache.service"},
|
||||
cleanupEnvVars: true,
|
||||
},
|
||||
{
|
||||
name: "mixed patterns with and without suffix",
|
||||
prefixedEnv: "nginx.service,apache,postgresql.service",
|
||||
unprefixedEnv: "",
|
||||
expected: []string{"nginx.service", "apache.service", "postgresql.service"},
|
||||
cleanupEnvVars: true,
|
||||
},
|
||||
{
|
||||
name: "patterns with whitespace",
|
||||
prefixedEnv: " nginx , apache , postgresql ",
|
||||
unprefixedEnv: "",
|
||||
expected: []string{"nginx.service", "apache.service", "postgresql.service"},
|
||||
cleanupEnvVars: true,
|
||||
},
|
||||
{
|
||||
name: "empty patterns are skipped",
|
||||
prefixedEnv: "nginx,,apache, ,postgresql",
|
||||
unprefixedEnv: "",
|
||||
expected: []string{"nginx.service", "apache.service", "postgresql.service"},
|
||||
cleanupEnvVars: true,
|
||||
},
|
||||
{
|
||||
name: "wildcard pattern",
|
||||
prefixedEnv: "*nginx*,*apache*",
|
||||
unprefixedEnv: "",
|
||||
expected: []string{"*nginx*.service", "*apache*.service"},
|
||||
cleanupEnvVars: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Clean up any existing env vars
|
||||
os.Unsetenv("BESZEL_AGENT_SERVICE_PATTERNS")
|
||||
os.Unsetenv("SERVICE_PATTERNS")
|
||||
|
||||
// Set up environment variables
|
||||
if tt.prefixedEnv != "" {
|
||||
os.Setenv("BESZEL_AGENT_SERVICE_PATTERNS", tt.prefixedEnv)
|
||||
}
|
||||
if tt.unprefixedEnv != "" {
|
||||
os.Setenv("SERVICE_PATTERNS", tt.unprefixedEnv)
|
||||
}
|
||||
|
||||
// Run the function
|
||||
result := getServicePatterns()
|
||||
|
||||
// Verify results
|
||||
assert.Equal(t, tt.expected, result, "Patterns should match expected values")
|
||||
|
||||
// Cleanup
|
||||
if tt.cleanupEnvVars {
|
||||
os.Unsetenv("BESZEL_AGENT_SERVICE_PATTERNS")
|
||||
os.Unsetenv("SERVICE_PATTERNS")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
272
agent/test-data/smart/nvme0.json
Normal file
272
agent/test-data/smart/nvme0.json
Normal file
@@ -0,0 +1,272 @@
|
||||
{
|
||||
"json_format_version": [
|
||||
1,
|
||||
0
|
||||
],
|
||||
"smartctl": {
|
||||
"version": [
|
||||
7,
|
||||
5
|
||||
],
|
||||
"pre_release": false,
|
||||
"svn_revision": "5714",
|
||||
"platform_info": "x86_64-linux-6.17.1-2-cachyos",
|
||||
"build_info": "(local build)",
|
||||
"argv": [
|
||||
"smartctl",
|
||||
"-aj",
|
||||
"/dev/nvme0"
|
||||
],
|
||||
"exit_status": 0
|
||||
},
|
||||
"local_time": {
|
||||
"time_t": 1761507494,
|
||||
"asctime": "Sun Oct 26 15:38:14 2025 EDT"
|
||||
},
|
||||
"device": {
|
||||
"name": "/dev/nvme0",
|
||||
"info_name": "/dev/nvme0",
|
||||
"type": "nvme",
|
||||
"protocol": "NVMe"
|
||||
},
|
||||
"model_name": "PELADN 512GB",
|
||||
"serial_number": "2024031600129",
|
||||
"firmware_version": "VC2S038E",
|
||||
"nvme_pci_vendor": {
|
||||
"id": 4332,
|
||||
"subsystem_id": 4332
|
||||
},
|
||||
"nvme_ieee_oui_identifier": 57420,
|
||||
"nvme_controller_id": 1,
|
||||
"nvme_version": {
|
||||
"string": "1.4",
|
||||
"value": 66560
|
||||
},
|
||||
"nvme_number_of_namespaces": 1,
|
||||
"nvme_namespaces": [
|
||||
{
|
||||
"id": 1,
|
||||
"size": {
|
||||
"blocks": 1000215216,
|
||||
"bytes": 512110190592
|
||||
},
|
||||
"capacity": {
|
||||
"blocks": 1000215216,
|
||||
"bytes": 512110190592
|
||||
},
|
||||
"utilization": {
|
||||
"blocks": 1000215216,
|
||||
"bytes": 512110190592
|
||||
},
|
||||
"formatted_lba_size": 512,
|
||||
"eui64": {
|
||||
"oui": 57420,
|
||||
"ext_id": 112094110470
|
||||
},
|
||||
"features": {
|
||||
"value": 0,
|
||||
"thin_provisioning": false,
|
||||
"na_fields": false,
|
||||
"dealloc_or_unwritten_block_error": false,
|
||||
"uid_reuse": false,
|
||||
"np_fields": false,
|
||||
"other": 0
|
||||
},
|
||||
"lba_formats": [
|
||||
{
|
||||
"formatted": true,
|
||||
"data_bytes": 512,
|
||||
"metadata_bytes": 0,
|
||||
"relative_performance": 0
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"user_capacity": {
|
||||
"blocks": 1000215216,
|
||||
"bytes": 512110190592
|
||||
},
|
||||
"logical_block_size": 512,
|
||||
"smart_support": {
|
||||
"available": true,
|
||||
"enabled": true
|
||||
},
|
||||
"nvme_firmware_update_capabilities": {
|
||||
"value": 2,
|
||||
"slots": 1,
|
||||
"first_slot_is_read_only": false,
|
||||
"activiation_without_reset": false,
|
||||
"multiple_update_detection": false,
|
||||
"other": 0
|
||||
},
|
||||
"nvme_optional_admin_commands": {
|
||||
"value": 23,
|
||||
"security_send_receive": true,
|
||||
"format_nvm": true,
|
||||
"firmware_download": true,
|
||||
"namespace_management": false,
|
||||
"self_test": true,
|
||||
"directives": false,
|
||||
"mi_send_receive": false,
|
||||
"virtualization_management": false,
|
||||
"doorbell_buffer_config": false,
|
||||
"get_lba_status": false,
|
||||
"command_and_feature_lockdown": false,
|
||||
"other": 0
|
||||
},
|
||||
"nvme_optional_nvm_commands": {
|
||||
"value": 94,
|
||||
"compare": false,
|
||||
"write_uncorrectable": true,
|
||||
"dataset_management": true,
|
||||
"write_zeroes": true,
|
||||
"save_select_feature_nonzero": true,
|
||||
"reservations": false,
|
||||
"timestamp": true,
|
||||
"verify": false,
|
||||
"copy": false,
|
||||
"other": 0
|
||||
},
|
||||
"nvme_log_page_attributes": {
|
||||
"value": 2,
|
||||
"smart_health_per_namespace": false,
|
||||
"commands_effects_log": true,
|
||||
"extended_get_log_page_cmd": false,
|
||||
"telemetry_log": false,
|
||||
"persistent_event_log": false,
|
||||
"supported_log_pages_log": false,
|
||||
"telemetry_data_area_4": false,
|
||||
"other": 0
|
||||
},
|
||||
"nvme_maximum_data_transfer_pages": 32,
|
||||
"nvme_composite_temperature_threshold": {
|
||||
"warning": 100,
|
||||
"critical": 110
|
||||
},
|
||||
"temperature": {
|
||||
"op_limit_max": 100,
|
||||
"critical_limit_max": 110,
|
||||
"current": 61
|
||||
},
|
||||
"nvme_power_states": [
|
||||
{
|
||||
"non_operational_state": false,
|
||||
"relative_read_latency": 0,
|
||||
"relative_read_throughput": 0,
|
||||
"relative_write_latency": 0,
|
||||
"relative_write_throughput": 0,
|
||||
"entry_latency_us": 230000,
|
||||
"exit_latency_us": 50000,
|
||||
"max_power": {
|
||||
"value": 800,
|
||||
"scale": 2,
|
||||
"units_per_watt": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"non_operational_state": false,
|
||||
"relative_read_latency": 1,
|
||||
"relative_read_throughput": 1,
|
||||
"relative_write_latency": 1,
|
||||
"relative_write_throughput": 1,
|
||||
"entry_latency_us": 4000,
|
||||
"exit_latency_us": 50000,
|
||||
"max_power": {
|
||||
"value": 400,
|
||||
"scale": 2,
|
||||
"units_per_watt": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"non_operational_state": false,
|
||||
"relative_read_latency": 2,
|
||||
"relative_read_throughput": 2,
|
||||
"relative_write_latency": 2,
|
||||
"relative_write_throughput": 2,
|
||||
"entry_latency_us": 4000,
|
||||
"exit_latency_us": 250000,
|
||||
"max_power": {
|
||||
"value": 300,
|
||||
"scale": 2,
|
||||
"units_per_watt": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"non_operational_state": true,
|
||||
"relative_read_latency": 3,
|
||||
"relative_read_throughput": 3,
|
||||
"relative_write_latency": 3,
|
||||
"relative_write_throughput": 3,
|
||||
"entry_latency_us": 5000,
|
||||
"exit_latency_us": 10000,
|
||||
"max_power": {
|
||||
"value": 300,
|
||||
"scale": 1,
|
||||
"units_per_watt": 10000
|
||||
}
|
||||
},
|
||||
{
|
||||
"non_operational_state": true,
|
||||
"relative_read_latency": 4,
|
||||
"relative_read_throughput": 4,
|
||||
"relative_write_latency": 4,
|
||||
"relative_write_throughput": 4,
|
||||
"entry_latency_us": 54000,
|
||||
"exit_latency_us": 45000,
|
||||
"max_power": {
|
||||
"value": 50,
|
||||
"scale": 1,
|
||||
"units_per_watt": 10000
|
||||
}
|
||||
}
|
||||
],
|
||||
"smart_status": {
|
||||
"passed": true,
|
||||
"nvme": {
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"nvme_smart_health_information_log": {
|
||||
"nsid": -1,
|
||||
"critical_warning": 0,
|
||||
"temperature": 61,
|
||||
"available_spare": 100,
|
||||
"available_spare_threshold": 32,
|
||||
"percentage_used": 0,
|
||||
"data_units_read": 6573104,
|
||||
"data_units_written": 16040567,
|
||||
"host_reads": 63241130,
|
||||
"host_writes": 253050006,
|
||||
"controller_busy_time": 0,
|
||||
"power_cycles": 430,
|
||||
"power_on_hours": 4399,
|
||||
"unsafe_shutdowns": 44,
|
||||
"media_errors": 0,
|
||||
"num_err_log_entries": 0,
|
||||
"warning_temp_time": 0,
|
||||
"critical_comp_time": 0
|
||||
},
|
||||
"spare_available": {
|
||||
"current_percent": 100,
|
||||
"threshold_percent": 32
|
||||
},
|
||||
"endurance_used": {
|
||||
"current_percent": 0
|
||||
},
|
||||
"power_cycle_count": 430,
|
||||
"power_on_time": {
|
||||
"hours": 4399
|
||||
},
|
||||
"nvme_error_information_log": {
|
||||
"size": 8,
|
||||
"read": 8,
|
||||
"unread": 0
|
||||
},
|
||||
"nvme_self_test_log": {
|
||||
"nsid": -1,
|
||||
"current_self_test_operation": {
|
||||
"value": 0,
|
||||
"string": "No self-test in progress"
|
||||
}
|
||||
}
|
||||
}
|
||||
36
agent/test-data/smart/scan.json
Normal file
36
agent/test-data/smart/scan.json
Normal file
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"json_format_version": [
|
||||
1,
|
||||
0
|
||||
],
|
||||
"smartctl": {
|
||||
"version": [
|
||||
7,
|
||||
5
|
||||
],
|
||||
"pre_release": false,
|
||||
"svn_revision": "5714",
|
||||
"platform_info": "x86_64-linux-6.17.1-2-cachyos",
|
||||
"build_info": "(local build)",
|
||||
"argv": [
|
||||
"smartctl",
|
||||
"--scan",
|
||||
"-j"
|
||||
],
|
||||
"exit_status": 0
|
||||
},
|
||||
"devices": [
|
||||
{
|
||||
"name": "/dev/sda",
|
||||
"info_name": "/dev/sda [SAT]",
|
||||
"type": "sat",
|
||||
"protocol": "ATA"
|
||||
},
|
||||
{
|
||||
"name": "/dev/nvme0",
|
||||
"info_name": "/dev/nvme0",
|
||||
"type": "nvme",
|
||||
"protocol": "NVMe"
|
||||
}
|
||||
]
|
||||
}
|
||||
125
agent/test-data/smart/scsi.json
Normal file
125
agent/test-data/smart/scsi.json
Normal file
@@ -0,0 +1,125 @@
|
||||
{
|
||||
"json_format_version": [
|
||||
1,
|
||||
0
|
||||
],
|
||||
"smartctl": {
|
||||
"version": [
|
||||
7,
|
||||
3
|
||||
],
|
||||
"svn_revision": "5338",
|
||||
"platform_info": "x86_64-linux-6.12.43+deb12-amd64",
|
||||
"build_info": "(local build)",
|
||||
"argv": [
|
||||
"smartctl",
|
||||
"-aj",
|
||||
"/dev/sde"
|
||||
],
|
||||
"exit_status": 0
|
||||
},
|
||||
"local_time": {
|
||||
"time_t": 1761502142,
|
||||
"asctime": "Sun Oct 21 21:09:02 2025 MSK"
|
||||
},
|
||||
"device": {
|
||||
"name": "/dev/sde",
|
||||
"info_name": "/dev/sde",
|
||||
"type": "scsi",
|
||||
"protocol": "SCSI"
|
||||
},
|
||||
"scsi_vendor": "YADRO",
|
||||
"scsi_product": "WUH721414AL4204",
|
||||
"scsi_model_name": "YADRO WUH721414AL4204",
|
||||
"scsi_revision": "C240",
|
||||
"scsi_version": "SPC-4",
|
||||
"user_capacity": {
|
||||
"blocks": 3418095616,
|
||||
"bytes": 14000519643136
|
||||
},
|
||||
"logical_block_size": 4096,
|
||||
"scsi_lb_provisioning": {
|
||||
"name": "fully provisioned",
|
||||
"value": 0,
|
||||
"management_enabled": {
|
||||
"name": "LBPME",
|
||||
"value": 0
|
||||
},
|
||||
"read_zeros": {
|
||||
"name": "LBPRZ",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"rotation_rate": 7200,
|
||||
"form_factor": {
|
||||
"scsi_value": 2,
|
||||
"name": "3.5 inches"
|
||||
},
|
||||
"logical_unit_id": "0x5000cca29063dc00",
|
||||
"serial_number": "9YHSDH9B",
|
||||
"device_type": {
|
||||
"scsi_terminology": "Peripheral Device Type [PDT]",
|
||||
"scsi_value": 0,
|
||||
"name": "disk"
|
||||
},
|
||||
"scsi_transport_protocol": {
|
||||
"name": "SAS (SPL-4)",
|
||||
"value": 6
|
||||
},
|
||||
"smart_support": {
|
||||
"available": true,
|
||||
"enabled": true
|
||||
},
|
||||
"temperature_warning": {
|
||||
"enabled": true
|
||||
},
|
||||
"smart_status": {
|
||||
"passed": true
|
||||
},
|
||||
"temperature": {
|
||||
"current": 34,
|
||||
"drive_trip": 85
|
||||
},
|
||||
"power_on_time": {
|
||||
"hours": 458,
|
||||
"minutes": 25
|
||||
},
|
||||
"scsi_start_stop_cycle_counter": {
|
||||
"year_of_manufacture": "2022",
|
||||
"week_of_manufacture": "41",
|
||||
"specified_cycle_count_over_device_lifetime": 50000,
|
||||
"accumulated_start_stop_cycles": 2,
|
||||
"specified_load_unload_count_over_device_lifetime": 600000,
|
||||
"accumulated_load_unload_cycles": 418
|
||||
},
|
||||
"scsi_grown_defect_list": 0,
|
||||
"scsi_error_counter_log": {
|
||||
"read": {
|
||||
"errors_corrected_by_eccfast": 0,
|
||||
"errors_corrected_by_eccdelayed": 0,
|
||||
"errors_corrected_by_rereads_rewrites": 0,
|
||||
"total_errors_corrected": 0,
|
||||
"correction_algorithm_invocations": 346,
|
||||
"gigabytes_processed": "3,641",
|
||||
"total_uncorrected_errors": 0
|
||||
},
|
||||
"write": {
|
||||
"errors_corrected_by_eccfast": 0,
|
||||
"errors_corrected_by_eccdelayed": 0,
|
||||
"errors_corrected_by_rereads_rewrites": 0,
|
||||
"total_errors_corrected": 0,
|
||||
"correction_algorithm_invocations": 4052,
|
||||
"gigabytes_processed": "2124,590",
|
||||
"total_uncorrected_errors": 0
|
||||
},
|
||||
"verify": {
|
||||
"errors_corrected_by_eccfast": 0,
|
||||
"errors_corrected_by_eccdelayed": 0,
|
||||
"errors_corrected_by_rereads_rewrites": 0,
|
||||
"total_errors_corrected": 0,
|
||||
"correction_algorithm_invocations": 223,
|
||||
"gigabytes_processed": "0,000",
|
||||
"total_uncorrected_errors": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
1013
agent/test-data/smart/sda.json
Normal file
1013
agent/test-data/smart/sda.json
Normal file
File diff suppressed because it is too large
Load Diff
17
agent/test-data/system_info.json
Normal file
17
agent/test-data/system_info.json
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS",
|
||||
"Containers": 14,
|
||||
"ContainersRunning": 3,
|
||||
"ContainersPaused": 1,
|
||||
"ContainersStopped": 10,
|
||||
"Images": 508,
|
||||
"Driver": "overlay2",
|
||||
"KernelVersion": "6.8.0-31-generic",
|
||||
"OperatingSystem": "Ubuntu 24.04 LTS",
|
||||
"OSVersion": "24.04",
|
||||
"OSType": "linux",
|
||||
"Architecture": "x86_64",
|
||||
"NCPU": 4,
|
||||
"MemTotal": 2095882240,
|
||||
"ServerVersion": "27.0.1"
|
||||
}
|
||||
130
agent/tools/fetchsmartctl/main.go
Normal file
130
agent/tools/fetchsmartctl/main.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"flag"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Download smartctl.exe from the given URL and save it to the given destination.
|
||||
// This is used to embed smartctl.exe in the Windows build.
|
||||
|
||||
func main() {
|
||||
url := flag.String("url", "", "URL to download smartctl.exe from (required)")
|
||||
out := flag.String("out", "", "Destination path for smartctl.exe (required)")
|
||||
sha := flag.String("sha", "", "Optional SHA1/SHA256 checksum for integrity validation")
|
||||
force := flag.Bool("force", false, "Force re-download even if destination exists")
|
||||
flag.Parse()
|
||||
|
||||
if *url == "" || *out == "" {
|
||||
fatalf("-url and -out are required")
|
||||
}
|
||||
|
||||
if !*force {
|
||||
if info, err := os.Stat(*out); err == nil && info.Size() > 0 {
|
||||
fmt.Println("smartctl.exe already present, skipping download")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := downloadFile(*url, *out, *sha); err != nil {
|
||||
fatalf("download failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func downloadFile(url, dest, shaHex string) error {
|
||||
// Prepare destination
|
||||
if err := os.MkdirAll(filepath.Dir(dest), 0o755); err != nil {
|
||||
return fmt.Errorf("create dir: %w", err)
|
||||
}
|
||||
|
||||
// HTTP client
|
||||
client := &http.Client{Timeout: 60 * time.Second}
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("new request: %w", err)
|
||||
}
|
||||
req.Header.Set("User-Agent", "beszel-fetchsmartctl/1.0")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("http get: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
return fmt.Errorf("unexpected HTTP status: %s", resp.Status)
|
||||
}
|
||||
|
||||
tmp := dest + ".tmp"
|
||||
f, err := os.OpenFile(tmp, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0o644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open tmp: %w", err)
|
||||
}
|
||||
|
||||
// Determine hash algorithm based on length (SHA1=40, SHA256=64)
|
||||
var hasher hash.Hash
|
||||
if shaHex := strings.TrimSpace(shaHex); shaHex != "" {
|
||||
cleanSha := strings.ToLower(strings.ReplaceAll(shaHex, " ", ""))
|
||||
switch len(cleanSha) {
|
||||
case 40:
|
||||
hasher = sha1.New()
|
||||
case 64:
|
||||
hasher = sha256.New()
|
||||
default:
|
||||
f.Close()
|
||||
os.Remove(tmp)
|
||||
return fmt.Errorf("unsupported hash length: %d (expected 40 for SHA1 or 64 for SHA256)", len(cleanSha))
|
||||
}
|
||||
}
|
||||
|
||||
var mw io.Writer = f
|
||||
if hasher != nil {
|
||||
mw = io.MultiWriter(f, hasher)
|
||||
}
|
||||
if _, err := io.Copy(mw, resp.Body); err != nil {
|
||||
f.Close()
|
||||
os.Remove(tmp)
|
||||
return fmt.Errorf("write tmp: %w", err)
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
os.Remove(tmp)
|
||||
return fmt.Errorf("close tmp: %w", err)
|
||||
}
|
||||
|
||||
if hasher != nil && shaHex != "" {
|
||||
cleanSha := strings.ToLower(strings.ReplaceAll(strings.TrimSpace(shaHex), " ", ""))
|
||||
got := strings.ToLower(hex.EncodeToString(hasher.Sum(nil)))
|
||||
if got != cleanSha {
|
||||
os.Remove(tmp)
|
||||
return fmt.Errorf("hash mismatch: got %s want %s", got, cleanSha)
|
||||
}
|
||||
}
|
||||
|
||||
// Make executable and move into place
|
||||
if err := os.Chmod(tmp, 0o755); err != nil {
|
||||
os.Remove(tmp)
|
||||
return fmt.Errorf("chmod: %w", err)
|
||||
}
|
||||
if err := os.Rename(tmp, dest); err != nil {
|
||||
os.Remove(tmp)
|
||||
return fmt.Errorf("rename: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println("smartctl.exe downloaded to", dest)
|
||||
return nil
|
||||
}
|
||||
|
||||
func fatalf(format string, a ...any) {
|
||||
fmt.Fprintf(os.Stderr, format+"\n", a...)
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -1,12 +1,10 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/henrygd/beszel/internal/ghupdate"
|
||||
)
|
||||
@@ -108,12 +106,12 @@ func Update(useMirror bool) error {
|
||||
}
|
||||
}
|
||||
|
||||
// 6) Fix SELinux context if necessary
|
||||
if err := handleSELinuxContext(exePath); err != nil {
|
||||
// Fix SELinux context if necessary
|
||||
if err := ghupdate.HandleSELinuxContext(exePath); err != nil {
|
||||
ghupdate.ColorPrintf(ghupdate.ColorYellow, "Warning: SELinux context handling: %v", err)
|
||||
}
|
||||
|
||||
// 7) Restart service if running under a recognised init system
|
||||
// Restart service if running under a recognised init system
|
||||
if r := detectRestarter(); r != nil {
|
||||
if err := r.Restart(); err != nil {
|
||||
ghupdate.ColorPrintf(ghupdate.ColorYellow, "Warning: failed to restart service: %v", err)
|
||||
@@ -128,41 +126,3 @@ func Update(useMirror bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleSELinuxContext restores or applies the correct SELinux label to the binary.
|
||||
func handleSELinuxContext(path string) error {
|
||||
out, err := exec.Command("getenforce").Output()
|
||||
if err != nil {
|
||||
// SELinux not enabled or getenforce not available
|
||||
return nil
|
||||
}
|
||||
state := strings.TrimSpace(string(out))
|
||||
if state == "Disabled" {
|
||||
return nil
|
||||
}
|
||||
|
||||
ghupdate.ColorPrint(ghupdate.ColorYellow, "SELinux is enabled; applying context…")
|
||||
var errs []string
|
||||
|
||||
// Try persistent context via semanage+restorecon
|
||||
if semanagePath, err := exec.LookPath("semanage"); err == nil {
|
||||
if err := exec.Command(semanagePath, "fcontext", "-a", "-t", "bin_t", path).Run(); err != nil {
|
||||
errs = append(errs, "semanage fcontext failed: "+err.Error())
|
||||
} else if restoreconPath, err := exec.LookPath("restorecon"); err == nil {
|
||||
if err := exec.Command(restoreconPath, "-v", path).Run(); err != nil {
|
||||
errs = append(errs, "restorecon failed: "+err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to temporary context via chcon
|
||||
if chconPath, err := exec.LookPath("chcon"); err == nil {
|
||||
if err := exec.Command(chconPath, "-t", "bin_t", path).Run(); err != nil {
|
||||
errs = append(errs, "chcon failed: "+err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("SELinux context errors: %s", strings.Join(errs, "; "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import "github.com/blang/semver"
|
||||
|
||||
const (
|
||||
// Version is the current version of the application.
|
||||
Version = "0.14.1"
|
||||
Version = "0.18.3"
|
||||
// AppName is the name of the application.
|
||||
AppName = "beszel"
|
||||
)
|
||||
|
||||
53
go.mod
53
go.mod
@@ -1,27 +1,27 @@
|
||||
module github.com/henrygd/beszel
|
||||
|
||||
go 1.25.1
|
||||
|
||||
// lock shoutrrr to specific version to allow review before updating
|
||||
replace github.com/nicholas-fedor/shoutrrr => github.com/nicholas-fedor/shoutrrr v0.9.1
|
||||
go 1.25.5
|
||||
|
||||
require (
|
||||
github.com/blang/semver v3.5.1+incompatible
|
||||
github.com/coreos/go-systemd/v22 v22.7.0
|
||||
github.com/distatus/battery v0.11.0
|
||||
github.com/ebitengine/purego v0.9.1
|
||||
github.com/fxamacker/cbor/v2 v2.9.0
|
||||
github.com/gliderlabs/ssh v0.3.8
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/lxzan/gws v1.8.9
|
||||
github.com/nicholas-fedor/shoutrrr v0.10.0
|
||||
github.com/nicholas-fedor/shoutrrr v0.13.1
|
||||
github.com/pocketbase/dbx v1.11.0
|
||||
github.com/pocketbase/pocketbase v0.30.1
|
||||
github.com/shirou/gopsutil/v4 v4.25.9
|
||||
github.com/pocketbase/pocketbase v0.36.2
|
||||
github.com/shirou/gopsutil/v4 v4.26.1
|
||||
github.com/spf13/cast v1.10.0
|
||||
github.com/spf13/cobra v1.10.1
|
||||
github.com/spf13/cobra v1.10.2
|
||||
github.com/spf13/pflag v1.0.10
|
||||
github.com/stretchr/testify v1.11.1
|
||||
golang.org/x/crypto v0.42.0
|
||||
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9
|
||||
golang.org/x/crypto v0.47.0
|
||||
golang.org/x/exp v0.0.0-20260112195511-716be5621a96
|
||||
golang.org/x/sys v0.40.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
@@ -33,37 +33,36 @@ require (
|
||||
github.com/dolthub/maphash v0.1.0 // indirect
|
||||
github.com/domodwyer/mailyak/v3 v3.6.2 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/ebitengine/purego v0.9.0 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.10 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.13 // indirect
|
||||
github.com/ganigeorgiev/fexpr v0.5.0 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-ozzo/ozzo-validation/v4 v4.3.0 // indirect
|
||||
github.com/go-sql-driver/mysql v1.9.1 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
|
||||
github.com/godbus/dbus/v5 v5.2.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.3.1 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20250827001030-24949be3fa54 // indirect
|
||||
github.com/klauspost/compress v1.18.3 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||
github.com/ncruces/go-strftime v1.0.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.15 // indirect
|
||||
github.com/tklauser/numcpus v0.10.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.16 // indirect
|
||||
github.com/tklauser/numcpus v0.11.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
golang.org/x/image v0.31.0 // indirect
|
||||
golang.org/x/net v0.44.0 // indirect
|
||||
golang.org/x/oauth2 v0.31.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||
golang.org/x/image v0.35.0 // indirect
|
||||
golang.org/x/net v0.49.0 // indirect
|
||||
golang.org/x/oauth2 v0.34.0 // indirect
|
||||
golang.org/x/sync v0.19.0 // indirect
|
||||
golang.org/x/term v0.39.0 // indirect
|
||||
golang.org/x/text v0.33.0 // indirect
|
||||
howett.net/plist v1.0.1 // indirect
|
||||
modernc.org/libc v1.66.3 // indirect
|
||||
modernc.org/libc v1.67.6 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
modernc.org/memory v1.11.0 // indirect
|
||||
modernc.org/sqlite v1.39.0 // indirect
|
||||
modernc.org/sqlite v1.44.3 // indirect
|
||||
)
|
||||
|
||||
142
go.sum
142
go.sum
@@ -9,6 +9,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
|
||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/coreos/go-systemd/v22 v22.7.0 h1:LAEzFkke61DFROc7zNLX/WA2i5J8gYqe0rSj9KI28KA=
|
||||
github.com/coreos/go-systemd/v22 v22.7.0/go.mod h1:xNUYtjHu2EDXbsxz1i41wouACIwT7Ybq9o0BQhMwD0w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
@@ -23,16 +25,16 @@ github.com/domodwyer/mailyak/v3 v3.6.2 h1:x3tGMsyFhTCaxp6ycgR0FE/bu5QiNp+hetUuCO
|
||||
github.com/domodwyer/mailyak/v3 v3.6.2/go.mod h1:lOm/u9CyCVWHeaAmHIdF4RiKVxKUT/H5XX10lIKAL6c=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/ebitengine/purego v0.9.0 h1:mh0zpKBIXDceC63hpvPuGLiJ8ZAa3DfrFTudmfi8A4k=
|
||||
github.com/ebitengine/purego v0.9.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s0A=
|
||||
github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0=
|
||||
github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
|
||||
github.com/gabriel-vasile/mimetype v1.4.13 h1:46nXokslUBsAJE/wMsp5gtO500a4F3Nkz9Ufpk2AcUM=
|
||||
github.com/gabriel-vasile/mimetype v1.4.13/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
|
||||
github.com/ganigeorgiev/fexpr v0.5.0 h1:XA9JxtTE/Xm+g/JFI6RfZEHSiQlk+1glLvRK1Lpv/Tk=
|
||||
github.com/ganigeorgiev/fexpr v0.5.0/go.mod h1:RyGiGqmeXhEQ6+mlGdnUleLHgtzzu/VGO2WtJkF5drE=
|
||||
github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c=
|
||||
@@ -49,49 +51,53 @@ github.com/go-sql-driver/mysql v1.9.1 h1:FrjNGn/BsJQjVRuSa8CBrM5BWA9BWoXXat3KrtS
|
||||
github.com/go-sql-driver/mysql v1.9.1/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
||||
github.com/godbus/dbus/v5 v5.2.2 h1:TUR3TgtSVDmjiXOgAAyaZbYmIeP3DPkld3jgKGV8mXQ=
|
||||
github.com/godbus/dbus/v5 v5.2.2/go.mod h1:3AAv2+hPq5rdnr5txxxRwiGjPXamgoIHgz9FPBfOp3c=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.1 h1:kYf81DTWFe7t+1VvL7eS+jKFVWaUnK9cB1qbwn63YCY=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 h1:EEHtgt9IwisQ2AZ4pIsMjahcegHh6rmhqxzIRQIyepY=
|
||||
github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U=
|
||||
github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 h1:z2ogiKUYzX5Is6zr/vP9vJGqPwcdqsWjOt+V8J7+bTc=
|
||||
github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jarcoal/httpmock v1.4.1 h1:0Ju+VCFuARfFlhVXFc2HxlcQkfB+Xq12/EotHko+x2A=
|
||||
github.com/jarcoal/httpmock v1.4.1/go.mod h1:ftW1xULwo+j0R0JJkJIIi7UKigZUXCLLanykgjwBXL0=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/compress v1.18.3 h1:9PJRvfbmTabkOX8moIpXPbMMbYN60bWImDDU7L+/6zw=
|
||||
github.com/klauspost/compress v1.18.3/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/lufia/plan9stats v0.0.0-20250827001030-24949be3fa54 h1:mFWunSatvkQQDhpdyuFAYwyAan3hzCuma+Pz8sqvOfg=
|
||||
github.com/lufia/plan9stats v0.0.0-20250827001030-24949be3fa54/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg=
|
||||
github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 h1:PwQumkgq4/acIiZhtifTV5OUqqiP82UAl0h87xj/l9k=
|
||||
github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg=
|
||||
github.com/lxzan/gws v1.8.9 h1:VU3SGUeWlQrEwfUSfokcZep8mdg/BrUF+y73YYshdBM=
|
||||
github.com/lxzan/gws v1.8.9/go.mod h1:d9yHaR1eDTBHagQC6KY7ycUOaz5KWeqQtP3xu7aMK8Y=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
|
||||
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
github.com/nicholas-fedor/shoutrrr v0.9.1 h1:SEBhM6P1favzILO0f55CY3P9JwvM9RZ7B1ZMCl+Injs=
|
||||
github.com/nicholas-fedor/shoutrrr v0.9.1/go.mod h1:khue5m8LYyMzdPWuJxDTJeT89l9gjwjA+a+r0e8qxxk=
|
||||
github.com/onsi/ginkgo/v2 v2.25.3 h1:Ty8+Yi/ayDAGtk4XxmmfUy4GabvM+MegeB4cDLRi6nw=
|
||||
github.com/onsi/ginkgo/v2 v2.25.3/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE=
|
||||
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
|
||||
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
|
||||
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
github.com/nicholas-fedor/shoutrrr v0.13.1 h1:llEoHNbnMM4GfQ9+2Ns3n6ssvNfi3NPWluM0AQiicoY=
|
||||
github.com/nicholas-fedor/shoutrrr v0.13.1/go.mod h1:kU4cFJpEAtTzl3iV0l+XUXmM90OlC5T01b7roM4/pYM=
|
||||
github.com/onsi/ginkgo/v2 v2.27.3 h1:ICsZJ8JoYafeXFFlFAG75a7CxMsJHwgKwtO+82SE9L8=
|
||||
github.com/onsi/ginkgo/v2 v2.27.3/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
|
||||
github.com/onsi/gomega v1.38.3 h1:eTX+W6dobAYfFeGC2PV6RwXRu/MyT+cQguijutvkpSM=
|
||||
github.com/onsi/gomega v1.38.3/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pocketbase/dbx v1.11.0 h1:LpZezioMfT3K4tLrqA55wWFw1EtH1pM4tzSVa7kgszU=
|
||||
github.com/pocketbase/dbx v1.11.0/go.mod h1:xXRCIAKTHMgUCyCKZm55pUOdvFziJjQfXaWKhu2vhMs=
|
||||
github.com/pocketbase/pocketbase v0.30.1 h1:8lgfhH+HiSw1PyKVMq2sjtC4ZNvda2f/envTAzWMLOA=
|
||||
github.com/pocketbase/pocketbase v0.30.1/go.mod h1:sUI+uekXZam5Wa0eh+DClc+HieKMCeqsHA7Ydd9vwyE=
|
||||
github.com/pocketbase/pocketbase v0.36.2 h1:mzrxnvXKc3yxKlvZdbwoYXkH8kfIETteD0hWdgj0VI4=
|
||||
github.com/pocketbase/pocketbase v0.36.2/go.mod h1:71vSF8whUDzC8mcLFE10+Qatf9JQdeOGIRWawOuLLKM=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
@@ -99,12 +105,12 @@ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qq
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/shirou/gopsutil/v4 v4.25.9 h1:JImNpf6gCVhKgZhtaAHJ0serfFGtlfIlSC08eaKdTrU=
|
||||
github.com/shirou/gopsutil/v4 v4.25.9/go.mod h1:gxIxoC+7nQRwUl/xNhutXlD8lq+jxTgpIkEf3rADHL8=
|
||||
github.com/shirou/gopsutil/v4 v4.26.1 h1:TOkEyriIXk2HX9d4isZJtbjXbEjf5qyKPAzbzY0JWSo=
|
||||
github.com/shirou/gopsutil/v4 v4.26.1/go.mod h1:medLI9/UNAb0dOI9Q3/7yWSqKkj00u+1tgY8nvv41pc=
|
||||
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
||||
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
||||
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
|
||||
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
|
||||
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
|
||||
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
@@ -112,75 +118,75 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4=
|
||||
github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4=
|
||||
github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso=
|
||||
github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ=
|
||||
github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYICU0nA=
|
||||
github.com/tklauser/go-sysconf v0.3.16/go.mod h1:/qNL9xxDhc7tx3HSRsLWNnuzbVfh3e7gh/BmM179nYI=
|
||||
github.com/tklauser/numcpus v0.11.0 h1:nSTwhKH5e1dMNsCdVBukSZrURJRoHbSEQjdEbY+9RXw=
|
||||
github.com/tklauser/numcpus v0.11.0/go.mod h1:z+LwcLq54uWZTX0u/bGobaV34u6V7KNlTZejzM6/3MQ=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
|
||||
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
|
||||
golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
|
||||
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9 h1:TQwNpfvNkxAVlItJf6Cr5JTsVZoC/Sj7K3OZv2Pc14A=
|
||||
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk=
|
||||
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
|
||||
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
|
||||
golang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU=
|
||||
golang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHiYkrJyT+2uy9YZJB7H1k68CXZU=
|
||||
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.31.0 h1:mLChjE2MV6g1S7oqbXC0/UcKijjm5fnJLUYKIYrLESA=
|
||||
golang.org/x/image v0.31.0/go.mod h1:R9ec5Lcp96v9FTF+ajwaH3uGxPH4fKfHHAVbUILxghA=
|
||||
golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U=
|
||||
golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI=
|
||||
golang.org/x/image v0.35.0 h1:LKjiHdgMtO8z7Fh18nGY6KDcoEtVfsgLDPeLyguqb7I=
|
||||
golang.org/x/image v0.35.0/go.mod h1:MwPLTVgvxSASsxdLzKrl8BRFuyqMyGhLwmC+TO1Sybk=
|
||||
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
|
||||
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
|
||||
golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/oauth2 v0.31.0 h1:8Fq0yVZLh4j4YA47vHKFTa9Ew5XIrCP8LC6UeNZnLxo=
|
||||
golang.org/x/oauth2 v0.31.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
|
||||
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
|
||||
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
|
||||
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
|
||||
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
|
||||
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
|
||||
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
|
||||
golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
|
||||
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE=
|
||||
golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w=
|
||||
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
|
||||
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A=
|
||||
google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
||||
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
howett.net/plist v1.0.1 h1:37GdZ8tP09Q35o9ych3ehygcsL+HqKSwzctveSlarvM=
|
||||
howett.net/plist v1.0.1/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
|
||||
modernc.org/cc/v4 v4.26.2 h1:991HMkLjJzYBIfha6ECZdjrIYz2/1ayr+FL8GN+CNzM=
|
||||
modernc.org/cc/v4 v4.26.2/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||
modernc.org/ccgo/v4 v4.28.0 h1:rjznn6WWehKq7dG4JtLRKxb52Ecv8OUGah8+Z/SfpNU=
|
||||
modernc.org/ccgo/v4 v4.28.0/go.mod h1:JygV3+9AV6SmPhDasu4JgquwU81XAKLd3OKTUDNOiKE=
|
||||
modernc.org/fileutil v1.3.8 h1:qtzNm7ED75pd1C7WgAGcK4edm4fvhtBsEiI/0NQ54YM=
|
||||
modernc.org/fileutil v1.3.8/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
|
||||
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
|
||||
modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||
modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc=
|
||||
modernc.org/ccgo/v4 v4.30.1/go.mod h1:bIOeI1JL54Utlxn+LwrFyjCx2n2RDiYEaJVSrgdrRfM=
|
||||
modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA=
|
||||
modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
|
||||
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
|
||||
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
||||
modernc.org/gc/v3 v3.1.1 h1:k8T3gkXWY9sEiytKhcgyiZ2L0DTyCQ/nvX+LoCljoRE=
|
||||
modernc.org/gc/v3 v3.1.1/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY=
|
||||
modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
|
||||
modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
|
||||
modernc.org/libc v1.66.3 h1:cfCbjTUcdsKyyZZfEUKfoHcP3S0Wkvz3jgSzByEWVCQ=
|
||||
modernc.org/libc v1.66.3/go.mod h1:XD9zO8kt59cANKvHPXpx7yS2ELPheAey0vjIuZOhOU8=
|
||||
modernc.org/libc v1.67.6 h1:eVOQvpModVLKOdT+LvBPjdQqfrZq+pC39BygcT+E7OI=
|
||||
modernc.org/libc v1.67.6/go.mod h1:JAhxUVlolfYDErnwiqaLvUqc8nfb2r6S6slAgZOnaiE=
|
||||
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
||||
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
||||
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
|
||||
@@ -189,8 +195,8 @@ modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
|
||||
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
||||
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
||||
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
||||
modernc.org/sqlite v1.39.0 h1:6bwu9Ooim0yVYA7IZn9demiQk/Ejp0BtTjBWFLymSeY=
|
||||
modernc.org/sqlite v1.39.0/go.mod h1:cPTJYSlgg3Sfg046yBShXENNtPrWrDX8bsbAQBzgQ5E=
|
||||
modernc.org/sqlite v1.44.3 h1:+39JvV/HWMcYslAwRxHb8067w+2zowvFOUrOWIy9PjY=
|
||||
modernc.org/sqlite v1.44.3/go.mod h1:CzbrU2lSB1DKUusvwGz7rqEKIq+NUd8GWuBBZDs9/nA=
|
||||
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
|
||||
@@ -28,6 +28,7 @@ type AlertManager struct {
|
||||
|
||||
type AlertMessageData struct {
|
||||
UserID string
|
||||
SystemID string
|
||||
Title string
|
||||
Message string
|
||||
Link string
|
||||
@@ -40,13 +41,19 @@ type UserNotificationSettings struct {
|
||||
}
|
||||
|
||||
type SystemAlertStats struct {
|
||||
Cpu float64 `json:"cpu"`
|
||||
Mem float64 `json:"mp"`
|
||||
Disk float64 `json:"dp"`
|
||||
NetSent float64 `json:"ns"`
|
||||
NetRecv float64 `json:"nr"`
|
||||
Temperatures map[string]float32 `json:"t"`
|
||||
LoadAvg [3]float64 `json:"la"`
|
||||
Cpu float64 `json:"cpu"`
|
||||
Mem float64 `json:"mp"`
|
||||
Disk float64 `json:"dp"`
|
||||
NetSent float64 `json:"ns"`
|
||||
NetRecv float64 `json:"nr"`
|
||||
GPU map[string]SystemAlertGPUData `json:"g"`
|
||||
Temperatures map[string]float32 `json:"t"`
|
||||
LoadAvg [3]float64 `json:"la"`
|
||||
Battery [2]uint8 `json:"bat"`
|
||||
}
|
||||
|
||||
type SystemAlertGPUData struct {
|
||||
Usage float64 `json:"u"`
|
||||
}
|
||||
|
||||
type SystemAlertData struct {
|
||||
@@ -72,7 +79,6 @@ var supportsTitle = map[string]struct{}{
|
||||
"ifttt": {},
|
||||
"join": {},
|
||||
"lark": {},
|
||||
"matrix": {},
|
||||
"ntfy": {},
|
||||
"opsgenie": {},
|
||||
"pushbullet": {},
|
||||
@@ -99,10 +105,84 @@ func NewAlertManager(app hubLike) *AlertManager {
|
||||
func (am *AlertManager) bindEvents() {
|
||||
am.hub.OnRecordAfterUpdateSuccess("alerts").BindFunc(updateHistoryOnAlertUpdate)
|
||||
am.hub.OnRecordAfterDeleteSuccess("alerts").BindFunc(resolveHistoryOnAlertDelete)
|
||||
am.hub.OnRecordAfterUpdateSuccess("smart_devices").BindFunc(am.handleSmartDeviceAlert)
|
||||
}
|
||||
|
||||
// IsNotificationSilenced checks if a notification should be silenced based on configured quiet hours
|
||||
func (am *AlertManager) IsNotificationSilenced(userID, systemID string) bool {
|
||||
// Query for quiet hours windows that match this user and system
|
||||
// Include both global windows (system is null/empty) and system-specific windows
|
||||
var filter string
|
||||
var params dbx.Params
|
||||
|
||||
if systemID == "" {
|
||||
// If no systemID provided, only check global windows
|
||||
filter = "user={:user} AND system=''"
|
||||
params = dbx.Params{"user": userID}
|
||||
} else {
|
||||
// Check both global and system-specific windows
|
||||
filter = "user={:user} AND (system='' OR system={:system})"
|
||||
params = dbx.Params{
|
||||
"user": userID,
|
||||
"system": systemID,
|
||||
}
|
||||
}
|
||||
|
||||
quietHourWindows, err := am.hub.FindAllRecords("quiet_hours", dbx.NewExp(filter, params))
|
||||
if err != nil || len(quietHourWindows) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
for _, window := range quietHourWindows {
|
||||
windowType := window.GetString("type")
|
||||
start := window.GetDateTime("start").Time()
|
||||
end := window.GetDateTime("end").Time()
|
||||
|
||||
if windowType == "daily" {
|
||||
// For daily recurring windows, extract just the time portion and compare
|
||||
// The start/end are stored as full datetime but we only care about HH:MM
|
||||
startHour, startMin, _ := start.Clock()
|
||||
endHour, endMin, _ := end.Clock()
|
||||
nowHour, nowMin, _ := now.Clock()
|
||||
|
||||
// Convert to minutes since midnight for easier comparison
|
||||
startMinutes := startHour*60 + startMin
|
||||
endMinutes := endHour*60 + endMin
|
||||
nowMinutes := nowHour*60 + nowMin
|
||||
|
||||
// Handle case where window crosses midnight
|
||||
if endMinutes < startMinutes {
|
||||
// Window crosses midnight (e.g., 23:00 - 01:00)
|
||||
if nowMinutes >= startMinutes || nowMinutes < endMinutes {
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
// Normal case (e.g., 09:00 - 17:00)
|
||||
if nowMinutes >= startMinutes && nowMinutes < endMinutes {
|
||||
return true
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// One-time window: check if current time is within the date range
|
||||
if (now.After(start) || now.Equal(start)) && now.Before(end) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// SendAlert sends an alert to the user
|
||||
func (am *AlertManager) SendAlert(data AlertMessageData) error {
|
||||
// Check if alert is silenced
|
||||
if am.IsNotificationSilenced(data.UserID, data.SystemID) {
|
||||
am.hub.Logger().Info("Notification silenced", "user", data.UserID, "system", data.SystemID, "title", data.Title)
|
||||
return nil
|
||||
}
|
||||
|
||||
// get user settings
|
||||
record, err := am.hub.FindFirstRecordByFilter(
|
||||
"user_settings", "user={:user}",
|
||||
|
||||
387
internal/alerts/alerts_battery_test.go
Normal file
387
internal/alerts/alerts_battery_test.go
Normal file
@@ -0,0 +1,387 @@
|
||||
//go:build testing
|
||||
// +build testing
|
||||
|
||||
package alerts_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/tools/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestBatteryAlertLogic tests that battery alerts trigger when value drops BELOW threshold
|
||||
// (opposite of other alerts like CPU, Memory, etc. which trigger when exceeding threshold)
|
||||
func TestBatteryAlertLogic(t *testing.T) {
|
||||
hub, user := beszelTests.GetHubWithUser(t)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create a system
|
||||
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||
require.NoError(t, err)
|
||||
systemRecord := systems[0]
|
||||
|
||||
// Create a battery alert with threshold of 20% and min of 1 minute (immediate trigger)
|
||||
batteryAlert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||
"name": "Battery",
|
||||
"system": systemRecord.Id,
|
||||
"user": user.Id,
|
||||
"value": 20, // threshold: 20%
|
||||
"min": 1, // 1 minute (immediate trigger for testing)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify alert is not triggered initially
|
||||
assert.False(t, batteryAlert.GetBool("triggered"), "Alert should not be triggered initially")
|
||||
|
||||
// Create system stats with battery at 50% (above threshold - should NOT trigger)
|
||||
statsHigh := system.Stats{
|
||||
Cpu: 10,
|
||||
MemPct: 30,
|
||||
DiskPct: 40,
|
||||
Battery: [2]uint8{50, 1}, // 50% battery, discharging
|
||||
}
|
||||
statsHighJSON, _ := json.Marshal(statsHigh)
|
||||
_, err = beszelTests.CreateRecord(hub, "system_stats", map[string]any{
|
||||
"system": systemRecord.Id,
|
||||
"type": "1m",
|
||||
"stats": string(statsHighJSON),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create CombinedData for the alert handler
|
||||
combinedDataHigh := &system.CombinedData{
|
||||
Stats: statsHigh,
|
||||
Info: system.Info{
|
||||
AgentVersion: "0.12.0",
|
||||
Cpu: 10,
|
||||
MemPct: 30,
|
||||
DiskPct: 40,
|
||||
},
|
||||
}
|
||||
|
||||
// Simulate system update time
|
||||
systemRecord.Set("updated", time.Now().UTC())
|
||||
err = hub.SaveNoValidate(systemRecord)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Handle system alerts with high battery
|
||||
am := hub.GetAlertManager()
|
||||
err = am.HandleSystemAlerts(systemRecord, combinedDataHigh)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify alert is still NOT triggered (battery 50% is above threshold 20%)
|
||||
batteryAlert, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": batteryAlert.Id})
|
||||
require.NoError(t, err)
|
||||
assert.False(t, batteryAlert.GetBool("triggered"), "Alert should NOT be triggered when battery (50%%) is above threshold (20%%)")
|
||||
|
||||
// Now create stats with battery at 15% (below threshold - should trigger)
|
||||
statsLow := system.Stats{
|
||||
Cpu: 10,
|
||||
MemPct: 30,
|
||||
DiskPct: 40,
|
||||
Battery: [2]uint8{15, 1}, // 15% battery, discharging
|
||||
}
|
||||
statsLowJSON, _ := json.Marshal(statsLow)
|
||||
_, err = beszelTests.CreateRecord(hub, "system_stats", map[string]any{
|
||||
"system": systemRecord.Id,
|
||||
"type": "1m",
|
||||
"stats": string(statsLowJSON),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
combinedDataLow := &system.CombinedData{
|
||||
Stats: statsLow,
|
||||
Info: system.Info{
|
||||
AgentVersion: "0.12.0",
|
||||
Cpu: 10,
|
||||
MemPct: 30,
|
||||
DiskPct: 40,
|
||||
},
|
||||
}
|
||||
|
||||
// Update system timestamp
|
||||
systemRecord.Set("updated", time.Now().UTC())
|
||||
err = hub.SaveNoValidate(systemRecord)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Handle system alerts with low battery
|
||||
err = am.HandleSystemAlerts(systemRecord, combinedDataLow)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for the alert to be processed
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
|
||||
// Verify alert IS triggered (battery 15% is below threshold 20%)
|
||||
batteryAlert, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": batteryAlert.Id})
|
||||
require.NoError(t, err)
|
||||
assert.True(t, batteryAlert.GetBool("triggered"), "Alert SHOULD be triggered when battery (15%%) drops below threshold (20%%)")
|
||||
|
||||
// Now test resolution: battery goes back above threshold
|
||||
statsRecovered := system.Stats{
|
||||
Cpu: 10,
|
||||
MemPct: 30,
|
||||
DiskPct: 40,
|
||||
Battery: [2]uint8{25, 1}, // 25% battery, discharging
|
||||
}
|
||||
statsRecoveredJSON, _ := json.Marshal(statsRecovered)
|
||||
_, err = beszelTests.CreateRecord(hub, "system_stats", map[string]any{
|
||||
"system": systemRecord.Id,
|
||||
"type": "1m",
|
||||
"stats": string(statsRecoveredJSON),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
combinedDataRecovered := &system.CombinedData{
|
||||
Stats: statsRecovered,
|
||||
Info: system.Info{
|
||||
AgentVersion: "0.12.0",
|
||||
Cpu: 10,
|
||||
MemPct: 30,
|
||||
DiskPct: 40,
|
||||
},
|
||||
}
|
||||
|
||||
// Update system timestamp
|
||||
systemRecord.Set("updated", time.Now().UTC())
|
||||
err = hub.SaveNoValidate(systemRecord)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Handle system alerts with recovered battery
|
||||
err = am.HandleSystemAlerts(systemRecord, combinedDataRecovered)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for the alert to be processed
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
|
||||
// Verify alert is now resolved (battery 25% is above threshold 20%)
|
||||
batteryAlert, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": batteryAlert.Id})
|
||||
require.NoError(t, err)
|
||||
assert.False(t, batteryAlert.GetBool("triggered"), "Alert should be resolved when battery (25%%) goes above threshold (20%%)")
|
||||
}
|
||||
|
||||
// TestBatteryAlertNoBattery verifies that systems without battery data don't trigger alerts
|
||||
func TestBatteryAlertNoBattery(t *testing.T) {
|
||||
hub, user := beszelTests.GetHubWithUser(t)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create a system
|
||||
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||
require.NoError(t, err)
|
||||
systemRecord := systems[0]
|
||||
|
||||
// Create a battery alert
|
||||
batteryAlert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||
"name": "Battery",
|
||||
"system": systemRecord.Id,
|
||||
"user": user.Id,
|
||||
"value": 20,
|
||||
"min": 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create stats with NO battery data (Battery[0] = 0)
|
||||
statsNoBattery := system.Stats{
|
||||
Cpu: 10,
|
||||
MemPct: 30,
|
||||
DiskPct: 40,
|
||||
Battery: [2]uint8{0, 0}, // No battery
|
||||
}
|
||||
|
||||
combinedData := &system.CombinedData{
|
||||
Stats: statsNoBattery,
|
||||
Info: system.Info{
|
||||
AgentVersion: "0.12.0",
|
||||
Cpu: 10,
|
||||
MemPct: 30,
|
||||
DiskPct: 40,
|
||||
},
|
||||
}
|
||||
|
||||
// Simulate system update time
|
||||
systemRecord.Set("updated", time.Now().UTC())
|
||||
err = hub.SaveNoValidate(systemRecord)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Handle system alerts
|
||||
am := hub.GetAlertManager()
|
||||
err = am.HandleSystemAlerts(systemRecord, combinedData)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait a moment for processing
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
|
||||
// Verify alert is NOT triggered (no battery data should skip the alert)
|
||||
batteryAlert, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": batteryAlert.Id})
|
||||
require.NoError(t, err)
|
||||
assert.False(t, batteryAlert.GetBool("triggered"), "Alert should NOT be triggered when system has no battery")
|
||||
}
|
||||
|
||||
// TestBatteryAlertAveragedSamples tests battery alerts with min > 1 (averaging multiple samples)
|
||||
// This ensures the inverted threshold logic works correctly across averaged time windows
|
||||
func TestBatteryAlertAveragedSamples(t *testing.T) {
|
||||
hub, user := beszelTests.GetHubWithUser(t)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create a system
|
||||
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||
require.NoError(t, err)
|
||||
systemRecord := systems[0]
|
||||
|
||||
// Create a battery alert with threshold of 25% and min of 2 minutes (requires averaging)
|
||||
batteryAlert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||
"name": "Battery",
|
||||
"system": systemRecord.Id,
|
||||
"user": user.Id,
|
||||
"value": 25, // threshold: 25%
|
||||
"min": 2, // 2 minutes - requires averaging
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify alert is not triggered initially
|
||||
assert.False(t, batteryAlert.GetBool("triggered"), "Alert should not be triggered initially")
|
||||
|
||||
am := hub.GetAlertManager()
|
||||
now := time.Now().UTC()
|
||||
|
||||
// Create system_stats records with low battery (below threshold)
|
||||
// The alert has min=2 minutes, so alert.time = now - 2 minutes
|
||||
// For the alert to be valid, alert.time must be AFTER the oldest record's created time
|
||||
// So we need records older than (now - 2 min), plus records within the window
|
||||
// Records at: now-3min (oldest, before window), now-90s, now-60s, now-30s
|
||||
recordTimes := []time.Duration{
|
||||
-180 * time.Second, // 3 min ago - this makes the oldest record before alert.time
|
||||
-90 * time.Second,
|
||||
-60 * time.Second,
|
||||
-30 * time.Second,
|
||||
}
|
||||
|
||||
for _, offset := range recordTimes {
|
||||
statsLow := system.Stats{
|
||||
Cpu: 10,
|
||||
MemPct: 30,
|
||||
DiskPct: 40,
|
||||
Battery: [2]uint8{15, 1}, // 15% battery (below 25% threshold)
|
||||
}
|
||||
statsLowJSON, _ := json.Marshal(statsLow)
|
||||
|
||||
recordTime := now.Add(offset)
|
||||
record, err := beszelTests.CreateRecord(hub, "system_stats", map[string]any{
|
||||
"system": systemRecord.Id,
|
||||
"type": "1m",
|
||||
"stats": string(statsLowJSON),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// Update created time to simulate historical records - use SetRaw with formatted string
|
||||
record.SetRaw("created", recordTime.Format(types.DefaultDateLayout))
|
||||
err = hub.SaveNoValidate(record)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Create combined data with low battery
|
||||
combinedDataLow := &system.CombinedData{
|
||||
Stats: system.Stats{
|
||||
Cpu: 10,
|
||||
MemPct: 30,
|
||||
DiskPct: 40,
|
||||
Battery: [2]uint8{15, 1},
|
||||
},
|
||||
Info: system.Info{
|
||||
AgentVersion: "0.12.0",
|
||||
Cpu: 10,
|
||||
MemPct: 30,
|
||||
DiskPct: 40,
|
||||
},
|
||||
}
|
||||
|
||||
// Update system timestamp
|
||||
systemRecord.Set("updated", now)
|
||||
err = hub.SaveNoValidate(systemRecord)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Handle system alerts - should trigger because average battery is below threshold
|
||||
err = am.HandleSystemAlerts(systemRecord, combinedDataLow)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for alert processing
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
|
||||
// Verify alert IS triggered (average battery 15% is below threshold 25%)
|
||||
batteryAlert, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": batteryAlert.Id})
|
||||
require.NoError(t, err)
|
||||
assert.True(t, batteryAlert.GetBool("triggered"),
|
||||
"Alert SHOULD be triggered when average battery (15%%) is below threshold (25%%) over min period")
|
||||
|
||||
// Now add records with high battery to test resolution
|
||||
// Use a new time window 2 minutes later
|
||||
newNow := now.Add(2 * time.Minute)
|
||||
// Records need to span before the alert time window (newNow - 2 min)
|
||||
recordTimesHigh := []time.Duration{
|
||||
-180 * time.Second, // 3 min before newNow - makes oldest record before alert.time
|
||||
-90 * time.Second,
|
||||
-60 * time.Second,
|
||||
-30 * time.Second,
|
||||
}
|
||||
|
||||
for _, offset := range recordTimesHigh {
|
||||
statsHigh := system.Stats{
|
||||
Cpu: 10,
|
||||
MemPct: 30,
|
||||
DiskPct: 40,
|
||||
Battery: [2]uint8{50, 1}, // 50% battery (above 25% threshold)
|
||||
}
|
||||
statsHighJSON, _ := json.Marshal(statsHigh)
|
||||
|
||||
recordTime := newNow.Add(offset)
|
||||
record, err := beszelTests.CreateRecord(hub, "system_stats", map[string]any{
|
||||
"system": systemRecord.Id,
|
||||
"type": "1m",
|
||||
"stats": string(statsHighJSON),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
record.SetRaw("created", recordTime.Format(types.DefaultDateLayout))
|
||||
err = hub.SaveNoValidate(record)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Create combined data with high battery
|
||||
combinedDataHigh := &system.CombinedData{
|
||||
Stats: system.Stats{
|
||||
Cpu: 10,
|
||||
MemPct: 30,
|
||||
DiskPct: 40,
|
||||
Battery: [2]uint8{50, 1},
|
||||
},
|
||||
Info: system.Info{
|
||||
AgentVersion: "0.12.0",
|
||||
Cpu: 10,
|
||||
MemPct: 30,
|
||||
DiskPct: 40,
|
||||
},
|
||||
}
|
||||
|
||||
// Update system timestamp to the new time window
|
||||
systemRecord.Set("updated", newNow)
|
||||
err = hub.SaveNoValidate(systemRecord)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Handle system alerts - should resolve because average battery is now above threshold
|
||||
err = am.HandleSystemAlerts(systemRecord, combinedDataHigh)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for alert processing
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
|
||||
// Verify alert is resolved (average battery 50% is above threshold 25%)
|
||||
batteryAlert, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": batteryAlert.Id})
|
||||
require.NoError(t, err)
|
||||
assert.False(t, batteryAlert.GetBool("triggered"),
|
||||
"Alert should be resolved when average battery (50%%) is above threshold (25%%) over min period")
|
||||
}
|
||||
426
internal/alerts/alerts_quiet_hours_test.go
Normal file
426
internal/alerts/alerts_quiet_hours_test.go
Normal file
@@ -0,0 +1,426 @@
|
||||
//go:build testing
|
||||
// +build testing
|
||||
|
||||
package alerts_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"testing/synctest"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/alerts"
|
||||
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAlertSilencedOneTime(t *testing.T) {
|
||||
hub, user := beszelTests.GetHubWithUser(t)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create a system
|
||||
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||
assert.NoError(t, err)
|
||||
system := systems[0]
|
||||
|
||||
// Create an alert
|
||||
alert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||
"name": "CPU",
|
||||
"system": system.Id,
|
||||
"user": user.Id,
|
||||
"value": 80,
|
||||
"min": 1,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create a one-time quiet hours window (current time - 1 hour to current time + 1 hour)
|
||||
now := time.Now().UTC()
|
||||
startTime := now.Add(-1 * time.Hour)
|
||||
endTime := now.Add(1 * time.Hour)
|
||||
|
||||
_, err = beszelTests.CreateRecord(hub, "quiet_hours", map[string]any{
|
||||
"user": user.Id,
|
||||
"system": system.Id,
|
||||
"type": "one-time",
|
||||
"start": startTime,
|
||||
"end": endTime,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Get alert manager
|
||||
am := alerts.NewAlertManager(hub)
|
||||
defer am.StopWorker()
|
||||
|
||||
// Test that alert is silenced
|
||||
silenced := am.IsNotificationSilenced(user.Id, system.Id)
|
||||
assert.True(t, silenced, "Alert should be silenced during active one-time window")
|
||||
|
||||
// Create a window that has already ended
|
||||
pastStart := now.Add(-3 * time.Hour)
|
||||
pastEnd := now.Add(-2 * time.Hour)
|
||||
|
||||
_, err = beszelTests.CreateRecord(hub, "quiet_hours", map[string]any{
|
||||
"user": user.Id,
|
||||
"system": system.Id,
|
||||
"type": "one-time",
|
||||
"start": pastStart,
|
||||
"end": pastEnd,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Should still be silenced because of the first window
|
||||
silenced = am.IsNotificationSilenced(user.Id, system.Id)
|
||||
assert.True(t, silenced, "Alert should still be silenced (past window doesn't affect active window)")
|
||||
|
||||
// Clear all windows and create a future window
|
||||
_, err = hub.DB().NewQuery("DELETE FROM quiet_hours").Execute()
|
||||
assert.NoError(t, err)
|
||||
|
||||
futureStart := now.Add(2 * time.Hour)
|
||||
futureEnd := now.Add(3 * time.Hour)
|
||||
|
||||
_, err = beszelTests.CreateRecord(hub, "quiet_hours", map[string]any{
|
||||
"user": user.Id,
|
||||
"system": system.Id,
|
||||
"type": "one-time",
|
||||
"start": futureStart,
|
||||
"end": futureEnd,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Alert should NOT be silenced (window hasn't started yet)
|
||||
silenced = am.IsNotificationSilenced(user.Id, system.Id)
|
||||
assert.False(t, silenced, "Alert should not be silenced (window hasn't started)")
|
||||
|
||||
_ = alert
|
||||
}
|
||||
|
||||
func TestAlertSilencedDaily(t *testing.T) {
|
||||
hub, user := beszelTests.GetHubWithUser(t)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create a system
|
||||
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||
assert.NoError(t, err)
|
||||
system := systems[0]
|
||||
|
||||
// Get alert manager
|
||||
am := alerts.NewAlertManager(hub)
|
||||
defer am.StopWorker()
|
||||
|
||||
// Get current hour and create a window that includes current time
|
||||
now := time.Now().UTC()
|
||||
currentHour := now.Hour()
|
||||
currentMin := now.Minute()
|
||||
|
||||
// Create a window from 1 hour ago to 1 hour from now
|
||||
startHour := (currentHour - 1 + 24) % 24
|
||||
endHour := (currentHour + 1) % 24
|
||||
|
||||
// Create times with just the hours/minutes we want (date doesn't matter for daily)
|
||||
startTime := time.Date(2000, 1, 1, startHour, currentMin, 0, 0, time.UTC)
|
||||
endTime := time.Date(2000, 1, 1, endHour, currentMin, 0, 0, time.UTC)
|
||||
|
||||
_, err = beszelTests.CreateRecord(hub, "quiet_hours", map[string]any{
|
||||
"user": user.Id,
|
||||
"system": system.Id,
|
||||
"type": "daily",
|
||||
"start": startTime,
|
||||
"end": endTime,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Alert should be silenced (current time is within the daily window)
|
||||
silenced := am.IsNotificationSilenced(user.Id, system.Id)
|
||||
assert.True(t, silenced, "Alert should be silenced during active daily window")
|
||||
|
||||
// Clear windows and create one that doesn't include current time
|
||||
_, err = hub.DB().NewQuery("DELETE FROM quiet_hours").Execute()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create a window from 6-12 hours from now
|
||||
futureStartHour := (currentHour + 6) % 24
|
||||
futureEndHour := (currentHour + 12) % 24
|
||||
|
||||
startTime = time.Date(2000, 1, 1, futureStartHour, 0, 0, 0, time.UTC)
|
||||
endTime = time.Date(2000, 1, 1, futureEndHour, 0, 0, 0, time.UTC)
|
||||
|
||||
_, err = beszelTests.CreateRecord(hub, "quiet_hours", map[string]any{
|
||||
"user": user.Id,
|
||||
"system": system.Id,
|
||||
"type": "daily",
|
||||
"start": startTime,
|
||||
"end": endTime,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Alert should NOT be silenced
|
||||
silenced = am.IsNotificationSilenced(user.Id, system.Id)
|
||||
assert.False(t, silenced, "Alert should not be silenced (outside daily window)")
|
||||
}
|
||||
|
||||
func TestAlertSilencedDailyMidnightCrossing(t *testing.T) {
|
||||
hub, user := beszelTests.GetHubWithUser(t)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create a system
|
||||
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||
assert.NoError(t, err)
|
||||
system := systems[0]
|
||||
|
||||
// Get alert manager
|
||||
am := alerts.NewAlertManager(hub)
|
||||
defer am.StopWorker()
|
||||
|
||||
// Create a window that crosses midnight: 22:00 - 02:00
|
||||
startTime := time.Date(2000, 1, 1, 22, 0, 0, 0, time.UTC)
|
||||
endTime := time.Date(2000, 1, 1, 2, 0, 0, 0, time.UTC)
|
||||
|
||||
_, err = beszelTests.CreateRecord(hub, "quiet_hours", map[string]any{
|
||||
"user": user.Id,
|
||||
"system": system.Id,
|
||||
"type": "daily",
|
||||
"start": startTime,
|
||||
"end": endTime,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Test with a time at 23:00 (should be silenced)
|
||||
// We can't control the actual current time, but we can verify the logic
|
||||
// by checking if the window was created correctly
|
||||
windows, err := hub.FindAllRecords("quiet_hours", dbx.HashExp{
|
||||
"user": user.Id,
|
||||
"system": system.Id,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, windows, 1, "Should have created 1 window")
|
||||
|
||||
window := windows[0]
|
||||
assert.Equal(t, "daily", window.GetString("type"))
|
||||
assert.Equal(t, 22, window.GetDateTime("start").Time().Hour())
|
||||
assert.Equal(t, 2, window.GetDateTime("end").Time().Hour())
|
||||
}
|
||||
|
||||
func TestAlertSilencedGlobal(t *testing.T) {
|
||||
hub, user := beszelTests.GetHubWithUser(t)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create multiple systems
|
||||
systems, err := beszelTests.CreateSystems(hub, 3, user.Id, "up")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Get alert manager
|
||||
am := alerts.NewAlertManager(hub)
|
||||
defer am.StopWorker()
|
||||
|
||||
// Create a global quiet hours window (no system specified)
|
||||
now := time.Now().UTC()
|
||||
startTime := now.Add(-1 * time.Hour)
|
||||
endTime := now.Add(1 * time.Hour)
|
||||
|
||||
_, err = beszelTests.CreateRecord(hub, "quiet_hours", map[string]any{
|
||||
"user": user.Id,
|
||||
"type": "one-time",
|
||||
"start": startTime,
|
||||
"end": endTime,
|
||||
// system field is empty/null for global windows
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// All systems should be silenced
|
||||
for _, system := range systems {
|
||||
silenced := am.IsNotificationSilenced(user.Id, system.Id)
|
||||
assert.True(t, silenced, "Alert should be silenced for system %s (global window)", system.Id)
|
||||
}
|
||||
|
||||
// Even with a systemID that doesn't exist, should be silenced
|
||||
silenced := am.IsNotificationSilenced(user.Id, "nonexistent-system")
|
||||
assert.True(t, silenced, "Alert should be silenced for any system (global window)")
|
||||
}
|
||||
|
||||
func TestAlertSilencedSystemSpecific(t *testing.T) {
|
||||
hub, user := beszelTests.GetHubWithUser(t)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create multiple systems
|
||||
systems, err := beszelTests.CreateSystems(hub, 2, user.Id, "up")
|
||||
assert.NoError(t, err)
|
||||
system1 := systems[0]
|
||||
system2 := systems[1]
|
||||
|
||||
// Get alert manager
|
||||
am := alerts.NewAlertManager(hub)
|
||||
defer am.StopWorker()
|
||||
|
||||
// Create a system-specific quiet hours window for system1 only
|
||||
now := time.Now().UTC()
|
||||
startTime := now.Add(-1 * time.Hour)
|
||||
endTime := now.Add(1 * time.Hour)
|
||||
|
||||
_, err = beszelTests.CreateRecord(hub, "quiet_hours", map[string]any{
|
||||
"user": user.Id,
|
||||
"system": system1.Id,
|
||||
"type": "one-time",
|
||||
"start": startTime,
|
||||
"end": endTime,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// System1 should be silenced
|
||||
silenced := am.IsNotificationSilenced(user.Id, system1.Id)
|
||||
assert.True(t, silenced, "Alert should be silenced for system1")
|
||||
|
||||
// System2 should NOT be silenced
|
||||
silenced = am.IsNotificationSilenced(user.Id, system2.Id)
|
||||
assert.False(t, silenced, "Alert should not be silenced for system2")
|
||||
}
|
||||
|
||||
func TestAlertSilencedMultiUser(t *testing.T) {
|
||||
hub, _ := beszelTests.GetHubWithUser(t)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create two users
|
||||
user1, err := beszelTests.CreateUser(hub, "user1@example.com", "password")
|
||||
assert.NoError(t, err)
|
||||
|
||||
user2, err := beszelTests.CreateUser(hub, "user2@example.com", "password")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create a system accessible to both users
|
||||
system, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "shared-system",
|
||||
"users": []string{user1.Id, user2.Id},
|
||||
"host": "127.0.0.1",
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Get alert manager
|
||||
am := alerts.NewAlertManager(hub)
|
||||
defer am.StopWorker()
|
||||
|
||||
// Create a quiet hours window for user1 only
|
||||
now := time.Now().UTC()
|
||||
startTime := now.Add(-1 * time.Hour)
|
||||
endTime := now.Add(1 * time.Hour)
|
||||
|
||||
_, err = beszelTests.CreateRecord(hub, "quiet_hours", map[string]any{
|
||||
"user": user1.Id,
|
||||
"system": system.Id,
|
||||
"type": "one-time",
|
||||
"start": startTime,
|
||||
"end": endTime,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// User1 should be silenced
|
||||
silenced := am.IsNotificationSilenced(user1.Id, system.Id)
|
||||
assert.True(t, silenced, "Alert should be silenced for user1")
|
||||
|
||||
// User2 should NOT be silenced
|
||||
silenced = am.IsNotificationSilenced(user2.Id, system.Id)
|
||||
assert.False(t, silenced, "Alert should not be silenced for user2")
|
||||
}
|
||||
|
||||
func TestAlertSilencedWithActualAlert(t *testing.T) {
|
||||
synctest.Test(t, func(t *testing.T) {
|
||||
hub, user := beszelTests.GetHubWithUser(t)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create a system
|
||||
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||
assert.NoError(t, err)
|
||||
system := systems[0]
|
||||
|
||||
// Create a status alert
|
||||
_, err = beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||
"name": "Status",
|
||||
"system": system.Id,
|
||||
"user": user.Id,
|
||||
"min": 1,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create user settings with email
|
||||
userSettings, err := hub.FindFirstRecordByFilter("user_settings", "user={:user}", dbx.Params{"user": user.Id})
|
||||
if err != nil || userSettings == nil {
|
||||
userSettings, err = beszelTests.CreateRecord(hub, "user_settings", map[string]any{
|
||||
"user": user.Id,
|
||||
"settings": map[string]any{
|
||||
"emails": []string{"test@example.com"},
|
||||
},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
// Create a quiet hours window
|
||||
now := time.Now().UTC()
|
||||
startTime := now.Add(-1 * time.Hour)
|
||||
endTime := now.Add(1 * time.Hour)
|
||||
|
||||
_, err = beszelTests.CreateRecord(hub, "quiet_hours", map[string]any{
|
||||
"user": user.Id,
|
||||
"system": system.Id,
|
||||
"type": "one-time",
|
||||
"start": startTime,
|
||||
"end": endTime,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Get initial email count
|
||||
initialEmailCount := hub.TestMailer.TotalSend()
|
||||
|
||||
// Trigger an alert by setting system to down
|
||||
system.Set("status", "down")
|
||||
err = hub.SaveNoValidate(system)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Wait for the alert to be processed (1 minute + buffer)
|
||||
time.Sleep(time.Second * 75)
|
||||
synctest.Wait()
|
||||
|
||||
// Check that no email was sent (because alert is silenced)
|
||||
finalEmailCount := hub.TestMailer.TotalSend()
|
||||
assert.Equal(t, initialEmailCount, finalEmailCount, "No emails should be sent when alert is silenced")
|
||||
|
||||
// Clear quiet hours windows
|
||||
_, err = hub.DB().NewQuery("DELETE FROM quiet_hours").Execute()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Reset system to up, then down again
|
||||
system.Set("status", "up")
|
||||
err = hub.SaveNoValidate(system)
|
||||
assert.NoError(t, err)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
system.Set("status", "down")
|
||||
err = hub.SaveNoValidate(system)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Wait for the alert to be processed
|
||||
time.Sleep(time.Second * 75)
|
||||
synctest.Wait()
|
||||
|
||||
// Now an email should be sent
|
||||
newEmailCount := hub.TestMailer.TotalSend()
|
||||
assert.Greater(t, newEmailCount, finalEmailCount, "Email should be sent when not silenced")
|
||||
})
|
||||
}
|
||||
|
||||
func TestAlertSilencedNoWindows(t *testing.T) {
|
||||
hub, user := beszelTests.GetHubWithUser(t)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create a system
|
||||
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||
assert.NoError(t, err)
|
||||
system := systems[0]
|
||||
|
||||
// Get alert manager
|
||||
am := alerts.NewAlertManager(hub)
|
||||
defer am.StopWorker()
|
||||
|
||||
// Without any quiet hours windows, alert should NOT be silenced
|
||||
silenced := am.IsNotificationSilenced(user.Id, system.Id)
|
||||
assert.False(t, silenced, "Alert should not be silenced when no windows exist")
|
||||
}
|
||||
67
internal/alerts/alerts_smart.go
Normal file
67
internal/alerts/alerts_smart.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package alerts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
)
|
||||
|
||||
// handleSmartDeviceAlert sends alerts when a SMART device state changes from PASSED to FAILED.
|
||||
// This is automatic and does not require user opt-in.
|
||||
func (am *AlertManager) handleSmartDeviceAlert(e *core.RecordEvent) error {
|
||||
oldState := e.Record.Original().GetString("state")
|
||||
newState := e.Record.GetString("state")
|
||||
|
||||
// Only alert when transitioning from PASSED to FAILED
|
||||
if oldState != "PASSED" || newState != "FAILED" {
|
||||
return e.Next()
|
||||
}
|
||||
|
||||
systemID := e.Record.GetString("system")
|
||||
if systemID == "" {
|
||||
return e.Next()
|
||||
}
|
||||
|
||||
// Fetch the system record to get the name and users
|
||||
systemRecord, err := e.App.FindRecordById("systems", systemID)
|
||||
if err != nil {
|
||||
e.App.Logger().Error("Failed to find system for SMART alert", "err", err, "systemID", systemID)
|
||||
return e.Next()
|
||||
}
|
||||
|
||||
systemName := systemRecord.GetString("name")
|
||||
deviceName := e.Record.GetString("name")
|
||||
model := e.Record.GetString("model")
|
||||
|
||||
// Build alert message
|
||||
title := fmt.Sprintf("SMART failure on %s: %s \U0001F534", systemName, deviceName)
|
||||
var message string
|
||||
if model != "" {
|
||||
message = fmt.Sprintf("Disk %s (%s) SMART status changed to FAILED", deviceName, model)
|
||||
} else {
|
||||
message = fmt.Sprintf("Disk %s SMART status changed to FAILED", deviceName)
|
||||
}
|
||||
|
||||
// Get users associated with the system
|
||||
userIDs := systemRecord.GetStringSlice("users")
|
||||
if len(userIDs) == 0 {
|
||||
return e.Next()
|
||||
}
|
||||
|
||||
// Send alert to each user
|
||||
for _, userID := range userIDs {
|
||||
if err := am.SendAlert(AlertMessageData{
|
||||
UserID: userID,
|
||||
SystemID: systemID,
|
||||
Title: title,
|
||||
Message: message,
|
||||
Link: am.hub.MakeLink("system", systemID),
|
||||
LinkText: "View " + systemName,
|
||||
}); err != nil {
|
||||
e.App.Logger().Error("Failed to send SMART alert", "err", err, "userID", userID)
|
||||
}
|
||||
}
|
||||
|
||||
return e.Next()
|
||||
}
|
||||
|
||||
196
internal/alerts/alerts_smart_test.go
Normal file
196
internal/alerts/alerts_smart_test.go
Normal file
@@ -0,0 +1,196 @@
|
||||
//go:build testing
|
||||
// +build testing
|
||||
|
||||
package alerts_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSmartDeviceAlert(t *testing.T) {
|
||||
hub, user := beszelTests.GetHubWithUser(t)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create a system for the user
|
||||
system, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"users": []string{user.Id},
|
||||
"host": "127.0.0.1",
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create a smart_device with state PASSED
|
||||
smartDevice, err := beszelTests.CreateRecord(hub, "smart_devices", map[string]any{
|
||||
"system": system.Id,
|
||||
"name": "/dev/sda",
|
||||
"model": "Samsung SSD 970 EVO",
|
||||
"state": "PASSED",
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Verify no emails sent initially
|
||||
assert.Zero(t, hub.TestMailer.TotalSend(), "should have 0 emails sent initially")
|
||||
|
||||
// Re-fetch the record so PocketBase can properly track original values
|
||||
smartDevice, err = hub.FindRecordById("smart_devices", smartDevice.Id)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Update the smart device state to FAILED
|
||||
smartDevice.Set("state", "FAILED")
|
||||
err = hub.Save(smartDevice)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Wait for the alert to be processed
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Verify that an email was sent
|
||||
assert.EqualValues(t, 1, hub.TestMailer.TotalSend(), "should have 1 email sent after state changed to FAILED")
|
||||
|
||||
// Check the email content
|
||||
lastMessage := hub.TestMailer.LastMessage()
|
||||
assert.Contains(t, lastMessage.Subject, "SMART failure on test-system")
|
||||
assert.Contains(t, lastMessage.Subject, "/dev/sda")
|
||||
assert.Contains(t, lastMessage.Text, "Samsung SSD 970 EVO")
|
||||
assert.Contains(t, lastMessage.Text, "FAILED")
|
||||
}
|
||||
|
||||
func TestSmartDeviceAlertNoAlertOnNonPassedToFailed(t *testing.T) {
|
||||
hub, user := beszelTests.GetHubWithUser(t)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create a system for the user
|
||||
system, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"users": []string{user.Id},
|
||||
"host": "127.0.0.1",
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create a smart_device with state UNKNOWN
|
||||
smartDevice, err := beszelTests.CreateRecord(hub, "smart_devices", map[string]any{
|
||||
"system": system.Id,
|
||||
"name": "/dev/sda",
|
||||
"model": "Samsung SSD 970 EVO",
|
||||
"state": "UNKNOWN",
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Re-fetch the record so PocketBase can properly track original values
|
||||
smartDevice, err = hub.FindRecordById("smart_devices", smartDevice.Id)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Update the state from UNKNOWN to FAILED - should NOT trigger alert
|
||||
smartDevice.Set("state", "FAILED")
|
||||
err = hub.Save(smartDevice)
|
||||
assert.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Verify no email was sent (only PASSED -> FAILED triggers alert)
|
||||
assert.Zero(t, hub.TestMailer.TotalSend(), "should have 0 emails when changing from UNKNOWN to FAILED")
|
||||
|
||||
// Re-fetch the record again
|
||||
smartDevice, err = hub.FindRecordById("smart_devices", smartDevice.Id)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Update state from FAILED to PASSED - should NOT trigger alert
|
||||
smartDevice.Set("state", "PASSED")
|
||||
err = hub.Save(smartDevice)
|
||||
assert.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Verify no email was sent
|
||||
assert.Zero(t, hub.TestMailer.TotalSend(), "should have 0 emails when changing from FAILED to PASSED")
|
||||
}
|
||||
|
||||
func TestSmartDeviceAlertMultipleUsers(t *testing.T) {
|
||||
hub, user1 := beszelTests.GetHubWithUser(t)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create a second user
|
||||
user2, err := beszelTests.CreateUser(hub, "test2@example.com", "password")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create user settings for the second user
|
||||
_, err = beszelTests.CreateRecord(hub, "user_settings", map[string]any{
|
||||
"user": user2.Id,
|
||||
"settings": `{"emails":["test2@example.com"],"webhooks":[]}`,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create a system with both users
|
||||
system, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "shared-system",
|
||||
"users": []string{user1.Id, user2.Id},
|
||||
"host": "127.0.0.1",
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create a smart_device with state PASSED
|
||||
smartDevice, err := beszelTests.CreateRecord(hub, "smart_devices", map[string]any{
|
||||
"system": system.Id,
|
||||
"name": "/dev/nvme0n1",
|
||||
"model": "WD Black SN850",
|
||||
"state": "PASSED",
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Re-fetch the record so PocketBase can properly track original values
|
||||
smartDevice, err = hub.FindRecordById("smart_devices", smartDevice.Id)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Update the smart device state to FAILED
|
||||
smartDevice.Set("state", "FAILED")
|
||||
err = hub.Save(smartDevice)
|
||||
assert.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Verify that two emails were sent (one for each user)
|
||||
assert.EqualValues(t, 2, hub.TestMailer.TotalSend(), "should have 2 emails sent for 2 users")
|
||||
}
|
||||
|
||||
func TestSmartDeviceAlertWithoutModel(t *testing.T) {
|
||||
hub, user := beszelTests.GetHubWithUser(t)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create a system for the user
|
||||
system, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"users": []string{user.Id},
|
||||
"host": "127.0.0.1",
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create a smart_device with state PASSED but no model
|
||||
smartDevice, err := beszelTests.CreateRecord(hub, "smart_devices", map[string]any{
|
||||
"system": system.Id,
|
||||
"name": "/dev/sdb",
|
||||
"state": "PASSED",
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Re-fetch the record so PocketBase can properly track original values
|
||||
smartDevice, err = hub.FindRecordById("smart_devices", smartDevice.Id)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Update the smart device state to FAILED
|
||||
smartDevice.Set("state", "FAILED")
|
||||
err = hub.Save(smartDevice)
|
||||
assert.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Verify that an email was sent
|
||||
assert.EqualValues(t, 1, hub.TestMailer.TotalSend(), "should have 1 email sent")
|
||||
|
||||
// Check that the email doesn't have empty parentheses for missing model
|
||||
lastMessage := hub.TestMailer.LastMessage()
|
||||
assert.NotContains(t, lastMessage.Text, "()", "should not have empty parentheses for missing model")
|
||||
assert.Contains(t, lastMessage.Text, "/dev/sdb")
|
||||
}
|
||||
@@ -161,19 +161,15 @@ func (am *AlertManager) sendStatusAlert(alertStatus string, systemName string, a
|
||||
title := fmt.Sprintf("Connection to %s is %s %v", systemName, alertStatus, emoji)
|
||||
message := strings.TrimSuffix(title, emoji)
|
||||
|
||||
// if errs := am.hub.ExpandRecord(alertRecord, []string{"user"}, nil); len(errs) > 0 {
|
||||
// return errs["user"]
|
||||
// }
|
||||
// user := alertRecord.ExpandedOne("user")
|
||||
// if user == nil {
|
||||
// return nil
|
||||
// }
|
||||
// Get system ID for the link
|
||||
systemID := alertRecord.GetString("system")
|
||||
|
||||
return am.SendAlert(AlertMessageData{
|
||||
UserID: alertRecord.GetString("user"),
|
||||
SystemID: systemID,
|
||||
Title: title,
|
||||
Message: message,
|
||||
Link: am.hub.MakeLink("system", systemName),
|
||||
Link: am.hub.MakeLink("system", systemID),
|
||||
LinkText: "View " + systemName,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -64,17 +64,32 @@ func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, data *syst
|
||||
case "LoadAvg15":
|
||||
val = data.Info.LoadAvg[2]
|
||||
unit = ""
|
||||
case "GPU":
|
||||
val = data.Info.GpuPct
|
||||
case "Battery":
|
||||
if data.Stats.Battery[0] == 0 {
|
||||
continue
|
||||
}
|
||||
val = float64(data.Stats.Battery[0])
|
||||
}
|
||||
|
||||
triggered := alertRecord.GetBool("triggered")
|
||||
threshold := alertRecord.GetFloat("value")
|
||||
|
||||
// Battery alert has inverted logic: trigger when value is BELOW threshold
|
||||
lowAlert := isLowAlert(name)
|
||||
|
||||
// CONTINUE
|
||||
// IF alert is not triggered and curValue is less than threshold
|
||||
// OR alert is triggered and curValue is greater than threshold
|
||||
if (!triggered && val <= threshold) || (triggered && val > threshold) {
|
||||
// log.Printf("Skipping alert %s: val %f | threshold %f | triggered %v\n", name, val, threshold, triggered)
|
||||
continue
|
||||
// For normal alerts: IF not triggered and curValue <= threshold, OR triggered and curValue > threshold
|
||||
// For low alerts (Battery): IF not triggered and curValue >= threshold, OR triggered and curValue < threshold
|
||||
if lowAlert {
|
||||
if (!triggered && val >= threshold) || (triggered && val < threshold) {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if (!triggered && val <= threshold) || (triggered && val > threshold) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
min := max(1, cast.ToUint8(alertRecord.Get("min")))
|
||||
@@ -92,7 +107,11 @@ func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, data *syst
|
||||
|
||||
// send alert immediately if min is 1 - no need to sum up values.
|
||||
if min == 1 {
|
||||
alert.triggered = val > threshold
|
||||
if lowAlert {
|
||||
alert.triggered = val < threshold
|
||||
} else {
|
||||
alert.triggered = val > threshold
|
||||
}
|
||||
go am.sendSystemAlert(alert)
|
||||
continue
|
||||
}
|
||||
@@ -206,6 +225,19 @@ func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, data *syst
|
||||
alert.val += stats.LoadAvg[1]
|
||||
case "LoadAvg15":
|
||||
alert.val += stats.LoadAvg[2]
|
||||
case "GPU":
|
||||
if len(stats.GPU) == 0 {
|
||||
continue
|
||||
}
|
||||
maxUsage := 0.0
|
||||
for _, gpu := range stats.GPU {
|
||||
if gpu.Usage > maxUsage {
|
||||
maxUsage = gpu.Usage
|
||||
}
|
||||
}
|
||||
alert.val += maxUsage
|
||||
case "Battery":
|
||||
alert.val += float64(stats.Battery[0])
|
||||
default:
|
||||
continue
|
||||
}
|
||||
@@ -243,12 +275,24 @@ func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, data *syst
|
||||
// log.Printf("%s: val %f | count %d | min-count %f | threshold %f\n", alert.name, alert.val, alert.count, minCount, alert.threshold)
|
||||
// pass through alert if count is greater than or equal to minCount
|
||||
if float32(alert.count) >= minCount {
|
||||
if !alert.triggered && alert.val > alert.threshold {
|
||||
alert.triggered = true
|
||||
go am.sendSystemAlert(alert)
|
||||
} else if alert.triggered && alert.val <= alert.threshold {
|
||||
alert.triggered = false
|
||||
go am.sendSystemAlert(alert)
|
||||
// Battery alert has inverted logic: trigger when value is BELOW threshold
|
||||
lowAlert := isLowAlert(alert.name)
|
||||
if lowAlert {
|
||||
if !alert.triggered && alert.val < alert.threshold {
|
||||
alert.triggered = true
|
||||
go am.sendSystemAlert(alert)
|
||||
} else if alert.triggered && alert.val >= alert.threshold {
|
||||
alert.triggered = false
|
||||
go am.sendSystemAlert(alert)
|
||||
}
|
||||
} else {
|
||||
if !alert.triggered && alert.val > alert.threshold {
|
||||
alert.triggered = true
|
||||
go am.sendSystemAlert(alert)
|
||||
} else if alert.triggered && alert.val <= alert.threshold {
|
||||
alert.triggered = false
|
||||
go am.sendSystemAlert(alert)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -268,17 +312,26 @@ func (am *AlertManager) sendSystemAlert(alert SystemAlertData) {
|
||||
alert.name = after + "m Load"
|
||||
}
|
||||
|
||||
// make title alert name lowercase if not CPU
|
||||
// make title alert name lowercase if not CPU or GPU
|
||||
titleAlertName := alert.name
|
||||
if titleAlertName != "CPU" {
|
||||
if titleAlertName != "CPU" && titleAlertName != "GPU" {
|
||||
titleAlertName = strings.ToLower(titleAlertName)
|
||||
}
|
||||
|
||||
var subject string
|
||||
lowAlert := isLowAlert(alert.name)
|
||||
if alert.triggered {
|
||||
subject = fmt.Sprintf("%s %s above threshold", systemName, titleAlertName)
|
||||
if lowAlert {
|
||||
subject = fmt.Sprintf("%s %s below threshold", systemName, titleAlertName)
|
||||
} else {
|
||||
subject = fmt.Sprintf("%s %s above threshold", systemName, titleAlertName)
|
||||
}
|
||||
} else {
|
||||
subject = fmt.Sprintf("%s %s below threshold", systemName, titleAlertName)
|
||||
if lowAlert {
|
||||
subject = fmt.Sprintf("%s %s above threshold", systemName, titleAlertName)
|
||||
} else {
|
||||
subject = fmt.Sprintf("%s %s below threshold", systemName, titleAlertName)
|
||||
}
|
||||
}
|
||||
minutesLabel := "minute"
|
||||
if alert.min > 1 {
|
||||
@@ -296,9 +349,14 @@ func (am *AlertManager) sendSystemAlert(alert SystemAlertData) {
|
||||
}
|
||||
am.SendAlert(AlertMessageData{
|
||||
UserID: alert.alertRecord.GetString("user"),
|
||||
SystemID: alert.systemRecord.Id,
|
||||
Title: subject,
|
||||
Message: body,
|
||||
Link: am.hub.MakeLink("system", systemName),
|
||||
Link: am.hub.MakeLink("system", alert.systemRecord.Id),
|
||||
LinkText: "View " + systemName,
|
||||
})
|
||||
}
|
||||
|
||||
func isLowAlert(name string) bool {
|
||||
return name == "Battery"
|
||||
}
|
||||
|
||||
@@ -17,9 +17,8 @@ import (
|
||||
type cmdOptions struct {
|
||||
key string // key is the public key(s) for SSH authentication.
|
||||
listen string // listen is the address or port to listen on.
|
||||
// TODO: add hubURL and token
|
||||
// hubURL string // hubURL is the URL of the hub to use.
|
||||
// token string // token is the token to use for authentication.
|
||||
hubURL string // hubURL is the URL of the Beszel hub.
|
||||
token string // token is the token to use for authentication.
|
||||
}
|
||||
|
||||
// parse parses the command line flags and populates the config struct.
|
||||
@@ -47,13 +46,13 @@ func (opts *cmdOptions) parse() bool {
|
||||
// pflag.CommandLine.ParseErrorsWhitelist.UnknownFlags = true
|
||||
pflag.StringVarP(&opts.key, "key", "k", "", "Public key(s) for SSH authentication")
|
||||
pflag.StringVarP(&opts.listen, "listen", "l", "", "Address or port to listen on")
|
||||
// pflag.StringVarP(&opts.hubURL, "hub-url", "u", "", "URL of the hub to use")
|
||||
// pflag.StringVarP(&opts.token, "token", "t", "", "Token to use for authentication")
|
||||
pflag.StringVarP(&opts.hubURL, "url", "u", "", "URL of the Beszel hub")
|
||||
pflag.StringVarP(&opts.token, "token", "t", "", "Token to use for authentication")
|
||||
chinaMirrors := pflag.BoolP("china-mirrors", "c", false, "Use mirror for update (gh.beszel.dev) instead of GitHub")
|
||||
help := pflag.BoolP("help", "h", false, "Show this help message")
|
||||
|
||||
// Convert old single-dash long flags to double-dash for backward compatibility
|
||||
flagsToConvert := []string{"key", "listen"}
|
||||
flagsToConvert := []string{"key", "listen", "url", "token"}
|
||||
for i, arg := range os.Args {
|
||||
for _, flag := range flagsToConvert {
|
||||
singleDash := "-" + flag
|
||||
@@ -95,6 +94,13 @@ func (opts *cmdOptions) parse() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Set environment variables from CLI flags (if provided)
|
||||
if opts.hubURL != "" {
|
||||
os.Setenv("HUB_URL", opts.hubURL)
|
||||
}
|
||||
if opts.token != "" {
|
||||
os.Setenv("TOKEN", opts.token)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"github.com/henrygd/beszel/internal/entities/smart"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||
)
|
||||
|
||||
type WebSocketAction = uint8
|
||||
@@ -18,6 +20,8 @@ const (
|
||||
GetContainerInfo
|
||||
// Request SMART data from agent
|
||||
GetSmartData
|
||||
// Request detailed systemd service info from agent
|
||||
GetSystemdInfo
|
||||
// Add new actions here...
|
||||
)
|
||||
|
||||
@@ -31,13 +35,14 @@ type HubRequest[T any] struct {
|
||||
// AgentResponse defines the structure for responses sent from agent to hub.
|
||||
type AgentResponse struct {
|
||||
Id *uint32 `cbor:"0,keyasint,omitempty"`
|
||||
SystemData *system.CombinedData `cbor:"1,keyasint,omitempty,omitzero"`
|
||||
Fingerprint *FingerprintResponse `cbor:"2,keyasint,omitempty,omitzero"`
|
||||
SystemData *system.CombinedData `cbor:"1,keyasint,omitempty,omitzero"` // Legacy (<= 0.17)
|
||||
Fingerprint *FingerprintResponse `cbor:"2,keyasint,omitempty,omitzero"` // Legacy (<= 0.17)
|
||||
Error string `cbor:"3,keyasint,omitempty,omitzero"`
|
||||
String *string `cbor:"4,keyasint,omitempty,omitzero"`
|
||||
SmartData map[string]smart.SmartData `cbor:"5,keyasint,omitempty,omitzero"`
|
||||
// Logs *LogsPayload `cbor:"4,keyasint,omitempty,omitzero"`
|
||||
// RawBytes []byte `cbor:"4,keyasint,omitempty,omitzero"`
|
||||
String *string `cbor:"4,keyasint,omitempty,omitzero"` // Legacy (<= 0.17)
|
||||
SmartData map[string]smart.SmartData `cbor:"5,keyasint,omitempty,omitzero"` // Legacy (<= 0.17)
|
||||
ServiceInfo systemd.ServiceDetails `cbor:"6,keyasint,omitempty,omitzero"` // Legacy (<= 0.17)
|
||||
// Data is the generic response payload for new endpoints (0.18+)
|
||||
Data cbor.RawMessage `cbor:"7,keyasint,omitempty,omitzero"`
|
||||
}
|
||||
|
||||
type FingerprintRequest struct {
|
||||
@@ -54,8 +59,8 @@ type FingerprintResponse struct {
|
||||
}
|
||||
|
||||
type DataRequestOptions struct {
|
||||
CacheTimeMs uint16 `cbor:"0,keyasint"`
|
||||
// ResourceType uint8 `cbor:"1,keyasint,omitempty,omitzero"`
|
||||
CacheTimeMs uint16 `cbor:"0,keyasint"`
|
||||
IncludeDetails bool `cbor:"1,keyasint"`
|
||||
}
|
||||
|
||||
type ContainerLogsRequest struct {
|
||||
@@ -65,3 +70,7 @@ type ContainerLogsRequest struct {
|
||||
type ContainerInfoRequest struct {
|
||||
ContainerID string `cbor:"0,keyasint"`
|
||||
}
|
||||
|
||||
type SystemdInfoRequest struct {
|
||||
ServiceName string `cbor:"0,keyasint"`
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ RUN rm -rf /tmp/*
|
||||
# --------------------------
|
||||
# Final image: default scratch-based agent
|
||||
# --------------------------
|
||||
FROM alpine:latest
|
||||
FROM alpine:3.23
|
||||
COPY --from=builder /agent /agent
|
||||
|
||||
RUN apk add --no-cache smartmontools
|
||||
|
||||
@@ -16,7 +16,7 @@ RUN CGO_ENABLED=0 GOGC=75 GOOS=$TARGETOS GOARCH=$TARGETARCH go build -ldflags "-
|
||||
# Final image
|
||||
# Note: must cap_add: [CAP_PERFMON] and mount /dev/dri/ as volume
|
||||
# --------------------------
|
||||
FROM alpine:edge
|
||||
FROM alpine:3.23
|
||||
|
||||
COPY --from=builder /agent /agent
|
||||
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
FROM --platform=$BUILDPLATFORM golang:alpine AS builder
|
||||
FROM --platform=$BUILDPLATFORM golang:bookworm AS builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
|
||||
COPY ../go.mod ../go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
@@ -11,9 +10,26 @@ COPY . ./
|
||||
|
||||
# Build
|
||||
ARG TARGETOS TARGETARCH
|
||||
RUN CGO_ENABLED=0 GOGC=75 GOOS=$TARGETOS GOARCH=$TARGETARCH go build -ldflags "-w -s" -o /agent ./internal/cmd/agent
|
||||
RUN CGO_ENABLED=0 GOGC=75 GOOS=$TARGETOS GOARCH=$TARGETARCH go build -tags glibc -ldflags "-w -s" -o /agent ./internal/cmd/agent
|
||||
|
||||
RUN rm -rf /tmp/*
|
||||
# --------------------------
|
||||
# Smartmontools builder stage
|
||||
# --------------------------
|
||||
FROM nvidia/cuda:12.2.2-base-ubuntu22.04 AS smartmontools-builder
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
wget \
|
||||
build-essential \
|
||||
&& wget https://downloads.sourceforge.net/project/smartmontools/smartmontools/7.5/smartmontools-7.5.tar.gz \
|
||||
&& tar zxvf smartmontools-7.5.tar.gz \
|
||||
&& cd smartmontools-7.5 \
|
||||
&& ./configure --prefix=/usr --sysconfdir=/etc \
|
||||
&& make \
|
||||
&& make install \
|
||||
&& rm -rf /smartmontools-7.5* \
|
||||
&& apt-get remove -y wget build-essential \
|
||||
&& apt-get autoremove -y \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# --------------------------
|
||||
# Final image: GPU-enabled agent with nvidia-smi
|
||||
@@ -21,10 +37,8 @@ RUN rm -rf /tmp/*
|
||||
FROM nvidia/cuda:12.2.2-base-ubuntu22.04
|
||||
COPY --from=builder /agent /agent
|
||||
|
||||
# this is so we don't need to create the /tmp directory in the scratch container
|
||||
COPY --from=builder /tmp /tmp
|
||||
|
||||
RUN apt-get update && apt-get install -y smartmontools && rm -rf /var/lib/apt/lists/*
|
||||
# Copy smartmontools binaries and config files
|
||||
COPY --from=smartmontools-builder /usr/sbin/smartctl /usr/sbin/smartctl
|
||||
|
||||
# Ensure data persistence across container recreations
|
||||
VOLUME ["/var/lib/beszel-agent"]
|
||||
|
||||
@@ -34,6 +34,14 @@ type ApiStats struct {
|
||||
MemoryStats MemoryStats `json:"memory_stats"`
|
||||
}
|
||||
|
||||
// Docker system info from /info API endpoint
|
||||
type HostInfo struct {
|
||||
OperatingSystem string `json:"OperatingSystem"`
|
||||
KernelVersion string `json:"KernelVersion"`
|
||||
NCPU int `json:"NCPU"`
|
||||
MemTotal uint64 `json:"MemTotal"`
|
||||
}
|
||||
|
||||
func (s *ApiStats) CalculateCpuPercentLinux(prevCpuContainer uint64, prevCpuSystem uint64) float64 {
|
||||
cpuDelta := s.CPUStats.CPUUsage.TotalUsage - prevCpuContainer
|
||||
systemDelta := s.CPUStats.SystemUsage - prevCpuSystem
|
||||
@@ -121,11 +129,12 @@ var DockerHealthStrings = map[string]DockerHealth{
|
||||
|
||||
// Docker container stats
|
||||
type Stats struct {
|
||||
Name string `json:"n" cbor:"0,keyasint"`
|
||||
Cpu float64 `json:"c" cbor:"1,keyasint"`
|
||||
Mem float64 `json:"m" cbor:"2,keyasint"`
|
||||
NetworkSent float64 `json:"ns" cbor:"3,keyasint"`
|
||||
NetworkRecv float64 `json:"nr" cbor:"4,keyasint"`
|
||||
Name string `json:"n" cbor:"0,keyasint"`
|
||||
Cpu float64 `json:"c" cbor:"1,keyasint"`
|
||||
Mem float64 `json:"m" cbor:"2,keyasint"`
|
||||
NetworkSent float64 `json:"ns,omitzero" cbor:"3,keyasint,omitzero"` // deprecated 0.18.3 (MB) - keep field for old agents/records
|
||||
NetworkRecv float64 `json:"nr,omitzero" cbor:"4,keyasint,omitzero"` // deprecated 0.18.3 (MB) - keep field for old agents/records
|
||||
Bandwidth [2]uint64 `json:"b,omitzero" cbor:"9,keyasint,omitzero"` // [sent bytes, recv bytes]
|
||||
|
||||
Health DockerHealth `json:"-" cbor:"5,keyasint"`
|
||||
Status string `json:"-" cbor:"6,keyasint"`
|
||||
|
||||
@@ -1,5 +1,11 @@
|
||||
package smart
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Common types
|
||||
type VersionInfo [2]int
|
||||
|
||||
@@ -124,35 +130,154 @@ type SummaryInfo struct {
|
||||
}
|
||||
|
||||
type AtaSmartAttributes struct {
|
||||
// Revision int `json:"revision"`
|
||||
Table []AtaSmartAttribute `json:"table"`
|
||||
}
|
||||
|
||||
type AtaSmartAttribute struct {
|
||||
ID uint16 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Value uint16 `json:"value"`
|
||||
Worst uint16 `json:"worst"`
|
||||
Thresh uint16 `json:"thresh"`
|
||||
WhenFailed string `json:"when_failed"`
|
||||
Flags AttributeFlags `json:"flags"`
|
||||
Raw RawValue `json:"raw"`
|
||||
type AtaDeviceStatistics struct {
|
||||
Pages []AtaDeviceStatisticsPage `json:"pages"`
|
||||
}
|
||||
|
||||
type AttributeFlags struct {
|
||||
Value int `json:"value"`
|
||||
String string `json:"string"`
|
||||
Prefailure bool `json:"prefailure"`
|
||||
UpdatedOnline bool `json:"updated_online"`
|
||||
Performance bool `json:"performance"`
|
||||
ErrorRate bool `json:"error_rate"`
|
||||
EventCount bool `json:"event_count"`
|
||||
AutoKeep bool `json:"auto_keep"`
|
||||
type AtaDeviceStatisticsPage struct {
|
||||
Number uint8 `json:"number"`
|
||||
Table []AtaDeviceStatisticsEntry `json:"table"`
|
||||
}
|
||||
|
||||
type AtaDeviceStatisticsEntry struct {
|
||||
Name string `json:"name"`
|
||||
Value *uint64 `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
type AtaSmartAttribute struct {
|
||||
ID uint16 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Value uint16 `json:"value"`
|
||||
Worst uint16 `json:"worst"`
|
||||
Thresh uint16 `json:"thresh"`
|
||||
WhenFailed string `json:"when_failed"`
|
||||
// Flags AttributeFlags `json:"flags"`
|
||||
Raw RawValue `json:"raw"`
|
||||
}
|
||||
|
||||
// type AttributeFlags struct {
|
||||
// Value int `json:"value"`
|
||||
// String string `json:"string"`
|
||||
// Prefailure bool `json:"prefailure"`
|
||||
// UpdatedOnline bool `json:"updated_online"`
|
||||
// Performance bool `json:"performance"`
|
||||
// ErrorRate bool `json:"error_rate"`
|
||||
// EventCount bool `json:"event_count"`
|
||||
// AutoKeep bool `json:"auto_keep"`
|
||||
// }
|
||||
|
||||
type RawValue struct {
|
||||
Value uint64 `json:"value"`
|
||||
String string `json:"string"`
|
||||
Value SmartRawValue `json:"value"`
|
||||
String string `json:"string"`
|
||||
}
|
||||
|
||||
func (r *RawValue) UnmarshalJSON(data []byte) error {
|
||||
var tmp struct {
|
||||
Value json.RawMessage `json:"value"`
|
||||
String string `json:"string"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(data, &tmp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(tmp.Value) > 0 {
|
||||
if err := r.Value.UnmarshalJSON(tmp.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
r.Value = 0
|
||||
}
|
||||
|
||||
r.String = tmp.String
|
||||
|
||||
if parsed, ok := ParseSmartRawValueString(tmp.String); ok {
|
||||
r.Value = SmartRawValue(parsed)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type SmartRawValue uint64
|
||||
|
||||
// handles when drives report strings like "0h+0m+0.000s" or "7344 (253d 8h)" for power on hours
|
||||
func (v *SmartRawValue) UnmarshalJSON(data []byte) error {
|
||||
trimmed := strings.TrimSpace(string(data))
|
||||
if len(trimmed) == 0 || trimmed == "null" {
|
||||
*v = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
if trimmed[0] == '"' {
|
||||
valueStr, err := strconv.Unquote(trimmed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
parsed, ok := ParseSmartRawValueString(valueStr)
|
||||
if ok {
|
||||
*v = SmartRawValue(parsed)
|
||||
return nil
|
||||
}
|
||||
*v = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
if parsed, err := strconv.ParseUint(trimmed, 0, 64); err == nil {
|
||||
*v = SmartRawValue(parsed)
|
||||
return nil
|
||||
}
|
||||
|
||||
if parsed, ok := ParseSmartRawValueString(trimmed); ok {
|
||||
*v = SmartRawValue(parsed)
|
||||
return nil
|
||||
}
|
||||
|
||||
*v = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseSmartRawValueString attempts to extract a numeric value from the raw value
|
||||
// strings emitted by smartctl, which sometimes include human-friendly annotations
|
||||
// like "7344 (253d 8h)" or "0h+0m+0.000s". It returns the parsed value and a
|
||||
// boolean indicating success.
|
||||
func ParseSmartRawValueString(value string) (uint64, bool) {
|
||||
value = strings.TrimSpace(value)
|
||||
if value == "" {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
if parsed, err := strconv.ParseUint(value, 0, 64); err == nil {
|
||||
return parsed, true
|
||||
}
|
||||
|
||||
if idx := strings.IndexRune(value, 'h'); idx > 0 {
|
||||
hoursPart := strings.TrimSpace(value[:idx])
|
||||
if hoursPart != "" {
|
||||
if parsed, err := strconv.ParseFloat(hoursPart, 64); err == nil {
|
||||
return uint64(parsed), true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < len(value); i++ {
|
||||
if value[i] < '0' || value[i] > '9' {
|
||||
continue
|
||||
}
|
||||
end := i + 1
|
||||
for end < len(value) && value[end] >= '0' && value[end] <= '9' {
|
||||
end++
|
||||
}
|
||||
digits := value[i:end]
|
||||
if parsed, err := strconv.ParseUint(digits, 10, 64); err == nil {
|
||||
return parsed, true
|
||||
}
|
||||
i = end
|
||||
}
|
||||
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// type PowerOnTimeInfo struct {
|
||||
@@ -163,6 +288,11 @@ type TemperatureInfo struct {
|
||||
Current uint8 `json:"current"`
|
||||
}
|
||||
|
||||
type TemperatureInfoScsi struct {
|
||||
Current uint8 `json:"current"`
|
||||
DriveTrip uint8 `json:"drive_trip"`
|
||||
}
|
||||
|
||||
// type SelectiveSelfTestTable struct {
|
||||
// LbaMin int `json:"lba_min"`
|
||||
// LbaMax int `json:"lba_max"`
|
||||
@@ -211,6 +341,8 @@ type SmartInfoForSata struct {
|
||||
// Wwn WwnInfo `json:"wwn"`
|
||||
FirmwareVersion string `json:"firmware_version"`
|
||||
UserCapacity UserCapacity `json:"user_capacity"`
|
||||
ScsiVendor string `json:"scsi_vendor"`
|
||||
ScsiProduct string `json:"scsi_product"`
|
||||
// LogicalBlockSize int `json:"logical_block_size"`
|
||||
// PhysicalBlockSize int `json:"physical_block_size"`
|
||||
// RotationRate int `json:"rotation_rate"`
|
||||
@@ -224,7 +356,8 @@ type SmartInfoForSata struct {
|
||||
SmartStatus SmartStatusInfo `json:"smart_status"`
|
||||
// AtaSmartData AtaSmartData `json:"ata_smart_data"`
|
||||
// AtaSctCapabilities AtaSctCapabilities `json:"ata_sct_capabilities"`
|
||||
AtaSmartAttributes AtaSmartAttributes `json:"ata_smart_attributes"`
|
||||
AtaSmartAttributes AtaSmartAttributes `json:"ata_smart_attributes"`
|
||||
AtaDeviceStatistics AtaDeviceStatistics `json:"ata_device_statistics"`
|
||||
// PowerOnTime PowerOnTimeInfo `json:"power_on_time"`
|
||||
// PowerCycleCount uint16 `json:"power_cycle_count"`
|
||||
Temperature TemperatureInfo `json:"temperature"`
|
||||
@@ -233,6 +366,54 @@ type SmartInfoForSata struct {
|
||||
// AtaSmartSelectiveSelfTestLog AtaSmartSelectiveSelfTestLog `json:"ata_smart_selective_self_test_log"`
|
||||
}
|
||||
|
||||
type ScsiErrorCounter struct {
|
||||
ErrorsCorrectedByECCFast uint64 `json:"errors_corrected_by_eccfast"`
|
||||
ErrorsCorrectedByECCDelayed uint64 `json:"errors_corrected_by_eccdelayed"`
|
||||
ErrorsCorrectedByRereadsRewrites uint64 `json:"errors_corrected_by_rereads_rewrites"`
|
||||
TotalErrorsCorrected uint64 `json:"total_errors_corrected"`
|
||||
CorrectionAlgorithmInvocations uint64 `json:"correction_algorithm_invocations"`
|
||||
GigabytesProcessed string `json:"gigabytes_processed"`
|
||||
TotalUncorrectedErrors uint64 `json:"total_uncorrected_errors"`
|
||||
}
|
||||
|
||||
type ScsiErrorCounterLog struct {
|
||||
Read ScsiErrorCounter `json:"read"`
|
||||
Write ScsiErrorCounter `json:"write"`
|
||||
Verify ScsiErrorCounter `json:"verify"`
|
||||
}
|
||||
|
||||
type ScsiStartStopCycleCounter struct {
|
||||
YearOfManufacture string `json:"year_of_manufacture"`
|
||||
WeekOfManufacture string `json:"week_of_manufacture"`
|
||||
SpecifiedCycleCountOverDeviceLifetime uint64 `json:"specified_cycle_count_over_device_lifetime"`
|
||||
AccumulatedStartStopCycles uint64 `json:"accumulated_start_stop_cycles"`
|
||||
SpecifiedLoadUnloadCountOverDeviceLifetime uint64 `json:"specified_load_unload_count_over_device_lifetime"`
|
||||
AccumulatedLoadUnloadCycles uint64 `json:"accumulated_load_unload_cycles"`
|
||||
}
|
||||
|
||||
type PowerOnTimeScsi struct {
|
||||
Hours uint64 `json:"hours"`
|
||||
Minutes uint64 `json:"minutes"`
|
||||
}
|
||||
|
||||
type SmartInfoForScsi struct {
|
||||
Smartctl SmartctlInfoLegacy `json:"smartctl"`
|
||||
Device DeviceInfo `json:"device"`
|
||||
ScsiVendor string `json:"scsi_vendor"`
|
||||
ScsiProduct string `json:"scsi_product"`
|
||||
ScsiModelName string `json:"scsi_model_name"`
|
||||
ScsiRevision string `json:"scsi_revision"`
|
||||
ScsiVersion string `json:"scsi_version"`
|
||||
SerialNumber string `json:"serial_number"`
|
||||
UserCapacity UserCapacity `json:"user_capacity"`
|
||||
Temperature TemperatureInfoScsi `json:"temperature"`
|
||||
SmartStatus SmartStatusInfo `json:"smart_status"`
|
||||
PowerOnTime PowerOnTimeScsi `json:"power_on_time"`
|
||||
ScsiStartStopCycleCounter ScsiStartStopCycleCounter `json:"scsi_start_stop_cycle_counter"`
|
||||
ScsiGrownDefectList uint64 `json:"scsi_grown_defect_list"`
|
||||
ScsiErrorCounterLog ScsiErrorCounterLog `json:"scsi_error_counter_log"`
|
||||
}
|
||||
|
||||
// type AtaSmartErrorLog struct {
|
||||
// Summary SummaryInfo `json:"summary"`
|
||||
// }
|
||||
|
||||
62
internal/entities/smart/smart_test.go
Normal file
62
internal/entities/smart/smart_test.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package smart
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSmartRawValueUnmarshalDuration(t *testing.T) {
|
||||
input := []byte(`{"value":"62312h+33m+50.907s","string":"62312h+33m+50.907s"}`)
|
||||
var raw RawValue
|
||||
err := json.Unmarshal(input, &raw)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.EqualValues(t, 62312, raw.Value)
|
||||
}
|
||||
|
||||
func TestSmartRawValueUnmarshalNumericString(t *testing.T) {
|
||||
input := []byte(`{"value":"7344","string":"7344"}`)
|
||||
var raw RawValue
|
||||
err := json.Unmarshal(input, &raw)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.EqualValues(t, 7344, raw.Value)
|
||||
}
|
||||
|
||||
func TestSmartRawValueUnmarshalParenthetical(t *testing.T) {
|
||||
input := []byte(`{"value":"39925 (212 206 0)","string":"39925 (212 206 0)"}`)
|
||||
var raw RawValue
|
||||
err := json.Unmarshal(input, &raw)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.EqualValues(t, 39925, raw.Value)
|
||||
}
|
||||
|
||||
func TestSmartRawValueUnmarshalDurationWithFractions(t *testing.T) {
|
||||
input := []byte(`{"value":"2748h+31m+49.560s","string":"2748h+31m+49.560s"}`)
|
||||
var raw RawValue
|
||||
err := json.Unmarshal(input, &raw)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.EqualValues(t, 2748, raw.Value)
|
||||
}
|
||||
|
||||
func TestSmartRawValueUnmarshalParentheticalRawValue(t *testing.T) {
|
||||
input := []byte(`{"value":57891864217128,"string":"39925 (212 206 0)"}`)
|
||||
var raw RawValue
|
||||
err := json.Unmarshal(input, &raw)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.EqualValues(t, 39925, raw.Value)
|
||||
}
|
||||
|
||||
func TestSmartRawValueUnmarshalDurationRawValue(t *testing.T) {
|
||||
input := []byte(`{"value":57891864217128,"string":"2748h+31m+49.560s"}`)
|
||||
var raw RawValue
|
||||
err := json.Unmarshal(input, &raw)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.EqualValues(t, 2748, raw.Value)
|
||||
}
|
||||
@@ -3,9 +3,11 @@ package system
|
||||
// TODO: this is confusing, make common package with common/types common/helpers etc
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/container"
|
||||
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||
)
|
||||
|
||||
type Stats struct {
|
||||
@@ -25,8 +27,8 @@ type Stats struct {
|
||||
DiskWritePs float64 `json:"dw" cbor:"13,keyasint"`
|
||||
MaxDiskReadPs float64 `json:"drm,omitempty" cbor:"14,keyasint,omitempty"`
|
||||
MaxDiskWritePs float64 `json:"dwm,omitempty" cbor:"15,keyasint,omitempty"`
|
||||
NetworkSent float64 `json:"ns" cbor:"16,keyasint"`
|
||||
NetworkRecv float64 `json:"nr" cbor:"17,keyasint"`
|
||||
NetworkSent float64 `json:"ns,omitzero" cbor:"16,keyasint,omitzero"`
|
||||
NetworkRecv float64 `json:"nr,omitzero" cbor:"17,keyasint,omitzero"`
|
||||
MaxNetworkSent float64 `json:"nsm,omitempty" cbor:"18,keyasint,omitempty"`
|
||||
MaxNetworkRecv float64 `json:"nrm,omitempty" cbor:"19,keyasint,omitempty"`
|
||||
Temperatures map[string]float64 `json:"t,omitempty" cbor:"20,keyasint,omitempty"`
|
||||
@@ -41,9 +43,28 @@ type Stats struct {
|
||||
LoadAvg [3]float64 `json:"la,omitempty" cbor:"28,keyasint"`
|
||||
Battery [2]uint8 `json:"bat,omitzero" cbor:"29,keyasint,omitzero"` // [percent, charge state, current]
|
||||
MaxMem float64 `json:"mm,omitempty" cbor:"30,keyasint,omitempty"`
|
||||
NetworkInterfaces map[string][4]uint64 `json:"ni,omitempty" cbor:"31,keyasint,omitempty"` // [upload bytes, download bytes, total upload, total download]
|
||||
DiskIO [2]uint64 `json:"dio,omitzero" cbor:"32,keyasint,omitzero"` // [read bytes, write bytes]
|
||||
MaxDiskIO [2]uint64 `json:"diom,omitzero" cbor:"-"` // [max read bytes, max write bytes]
|
||||
NetworkInterfaces map[string][4]uint64 `json:"ni,omitempty" cbor:"31,keyasint,omitempty"` // [upload bytes, download bytes, total upload, total download]
|
||||
DiskIO [2]uint64 `json:"dio,omitzero" cbor:"32,keyasint,omitzero"` // [read bytes, write bytes]
|
||||
MaxDiskIO [2]uint64 `json:"diom,omitzero" cbor:"-"` // [max read bytes, max write bytes]
|
||||
CpuBreakdown []float64 `json:"cpub,omitempty" cbor:"33,keyasint,omitempty"` // [user, system, iowait, steal, idle]
|
||||
CpuCoresUsage Uint8Slice `json:"cpus,omitempty" cbor:"34,keyasint,omitempty"` // per-core busy usage [CPU0..]
|
||||
}
|
||||
|
||||
// Uint8Slice wraps []uint8 to customize JSON encoding while keeping CBOR efficient.
|
||||
// JSON: encodes as array of numbers (avoids base64 string).
|
||||
// CBOR: falls back to default handling for []uint8 (byte string), keeping payload small.
|
||||
type Uint8Slice []uint8
|
||||
|
||||
func (s Uint8Slice) MarshalJSON() ([]byte, error) {
|
||||
if s == nil {
|
||||
return []byte("null"), nil
|
||||
}
|
||||
// Convert to wider ints to force array-of-numbers encoding.
|
||||
arr := make([]uint16, len(s))
|
||||
for i, v := range s {
|
||||
arr[i] = uint16(v)
|
||||
}
|
||||
return json.Marshal(arr)
|
||||
}
|
||||
|
||||
type GPUData struct {
|
||||
@@ -102,34 +123,56 @@ const (
|
||||
ConnectionTypeWebSocket
|
||||
)
|
||||
|
||||
// Core system data that is needed in All Systems table
|
||||
type Info struct {
|
||||
Hostname string `json:"h" cbor:"0,keyasint"`
|
||||
KernelVersion string `json:"k,omitempty" cbor:"1,keyasint,omitempty"`
|
||||
Cores int `json:"c" cbor:"2,keyasint"`
|
||||
Hostname string `json:"h,omitempty" cbor:"0,keyasint,omitempty"` // deprecated - moved to Details struct
|
||||
KernelVersion string `json:"k,omitempty" cbor:"1,keyasint,omitempty"` // deprecated - moved to Details struct
|
||||
Cores int `json:"c,omitzero" cbor:"2,keyasint,omitzero"` // deprecated - moved to Details struct
|
||||
// Threads is needed in Info struct to calculate load average thresholds
|
||||
Threads int `json:"t,omitempty" cbor:"3,keyasint,omitempty"`
|
||||
CpuModel string `json:"m" cbor:"4,keyasint"`
|
||||
CpuModel string `json:"m,omitempty" cbor:"4,keyasint,omitempty"` // deprecated - moved to Details struct
|
||||
Uptime uint64 `json:"u" cbor:"5,keyasint"`
|
||||
Cpu float64 `json:"cpu" cbor:"6,keyasint"`
|
||||
MemPct float64 `json:"mp" cbor:"7,keyasint"`
|
||||
DiskPct float64 `json:"dp" cbor:"8,keyasint"`
|
||||
Bandwidth float64 `json:"b" cbor:"9,keyasint"`
|
||||
AgentVersion string `json:"v" cbor:"10,keyasint"`
|
||||
Podman bool `json:"p,omitempty" cbor:"11,keyasint,omitempty"`
|
||||
Podman bool `json:"p,omitempty" cbor:"11,keyasint,omitempty"` // deprecated - moved to Details struct
|
||||
GpuPct float64 `json:"g,omitempty" cbor:"12,keyasint,omitempty"`
|
||||
DashboardTemp float64 `json:"dt,omitempty" cbor:"13,keyasint,omitempty"`
|
||||
Os Os `json:"os" cbor:"14,keyasint"`
|
||||
LoadAvg1 float64 `json:"l1,omitempty" cbor:"15,keyasint,omitempty"`
|
||||
LoadAvg5 float64 `json:"l5,omitempty" cbor:"16,keyasint,omitempty"`
|
||||
LoadAvg15 float64 `json:"l15,omitempty" cbor:"17,keyasint,omitempty"`
|
||||
Os Os `json:"os,omitempty" cbor:"14,keyasint,omitempty"` // deprecated - moved to Details struct
|
||||
LoadAvg1 float64 `json:"l1,omitempty" cbor:"15,keyasint,omitempty"` // deprecated - use `la` array instead
|
||||
LoadAvg5 float64 `json:"l5,omitempty" cbor:"16,keyasint,omitempty"` // deprecated - use `la` array instead
|
||||
LoadAvg15 float64 `json:"l15,omitempty" cbor:"17,keyasint,omitempty"` // deprecated - use `la` array instead
|
||||
BandwidthBytes uint64 `json:"bb" cbor:"18,keyasint"`
|
||||
// TODO: remove load fields in future release in favor of load avg array
|
||||
LoadAvg [3]float64 `json:"la,omitempty" cbor:"19,keyasint"`
|
||||
ConnectionType ConnectionType `json:"ct,omitempty" cbor:"20,keyasint,omitempty,omitzero"`
|
||||
|
||||
LoadAvg [3]float64 `json:"la,omitempty" cbor:"19,keyasint"`
|
||||
ConnectionType ConnectionType `json:"ct,omitempty" cbor:"20,keyasint,omitempty,omitzero"`
|
||||
ExtraFsPct map[string]float64 `json:"efs,omitempty" cbor:"21,keyasint,omitempty"`
|
||||
Services []uint16 `json:"sv,omitempty" cbor:"22,keyasint,omitempty"` // [totalServices, numFailedServices]
|
||||
Battery [2]uint8 `json:"bat,omitzero" cbor:"23,keyasint,omitzero"` // [percent, charge state]
|
||||
}
|
||||
|
||||
// Data that does not change during process lifetime and is not needed in All Systems table
|
||||
type Details struct {
|
||||
Hostname string `cbor:"0,keyasint"`
|
||||
Kernel string `cbor:"1,keyasint,omitempty"`
|
||||
Cores int `cbor:"2,keyasint"`
|
||||
Threads int `cbor:"3,keyasint"`
|
||||
CpuModel string `cbor:"4,keyasint"`
|
||||
Os Os `cbor:"5,keyasint"`
|
||||
OsName string `cbor:"6,keyasint"`
|
||||
Arch string `cbor:"7,keyasint"`
|
||||
Podman bool `cbor:"8,keyasint,omitempty"`
|
||||
MemoryTotal uint64 `cbor:"9,keyasint"`
|
||||
SmartInterval time.Duration `cbor:"10,keyasint,omitempty"`
|
||||
}
|
||||
|
||||
// Final data structure to return to the hub
|
||||
type CombinedData struct {
|
||||
Stats Stats `json:"stats" cbor:"0,keyasint"`
|
||||
Info Info `json:"info" cbor:"1,keyasint"`
|
||||
Containers []*container.Stats `json:"container" cbor:"2,keyasint"`
|
||||
Stats Stats `json:"stats" cbor:"0,keyasint"`
|
||||
Info Info `json:"info" cbor:"1,keyasint"`
|
||||
Containers []*container.Stats `json:"container" cbor:"2,keyasint"`
|
||||
SystemdServices []*systemd.Service `json:"systemd,omitempty" cbor:"3,keyasint,omitempty"`
|
||||
Details *Details `cbor:"4,keyasint,omitempty"`
|
||||
}
|
||||
|
||||
127
internal/entities/systemd/systemd.go
Normal file
127
internal/entities/systemd/systemd.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package systemd
|
||||
|
||||
import (
|
||||
"math"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ServiceState represents the status of a systemd service
|
||||
type ServiceState uint8
|
||||
|
||||
const (
|
||||
StatusActive ServiceState = iota
|
||||
StatusInactive
|
||||
StatusFailed
|
||||
StatusActivating
|
||||
StatusDeactivating
|
||||
StatusReloading
|
||||
)
|
||||
|
||||
// ServiceSubState represents the sub status of a systemd service
|
||||
type ServiceSubState uint8
|
||||
|
||||
const (
|
||||
SubStateDead ServiceSubState = iota
|
||||
SubStateRunning
|
||||
SubStateExited
|
||||
SubStateFailed
|
||||
SubStateUnknown
|
||||
)
|
||||
|
||||
// ParseServiceStatus converts a string status to a ServiceStatus enum value
|
||||
func ParseServiceStatus(status string) ServiceState {
|
||||
switch status {
|
||||
case "active":
|
||||
return StatusActive
|
||||
case "inactive":
|
||||
return StatusInactive
|
||||
case "failed":
|
||||
return StatusFailed
|
||||
case "activating":
|
||||
return StatusActivating
|
||||
case "deactivating":
|
||||
return StatusDeactivating
|
||||
case "reloading":
|
||||
return StatusReloading
|
||||
default:
|
||||
return StatusInactive
|
||||
}
|
||||
}
|
||||
|
||||
// ParseServiceSubState converts a string sub status to a ServiceSubState enum value
|
||||
func ParseServiceSubState(subState string) ServiceSubState {
|
||||
switch subState {
|
||||
case "dead":
|
||||
return SubStateDead
|
||||
case "running":
|
||||
return SubStateRunning
|
||||
case "exited":
|
||||
return SubStateExited
|
||||
case "failed":
|
||||
return SubStateFailed
|
||||
default:
|
||||
return SubStateUnknown
|
||||
}
|
||||
}
|
||||
|
||||
// Service represents a single systemd service with its stats.
|
||||
type Service struct {
|
||||
Name string `json:"n" cbor:"0,keyasint"`
|
||||
State ServiceState `json:"s" cbor:"1,keyasint"`
|
||||
Cpu float64 `json:"c" cbor:"2,keyasint"`
|
||||
Mem uint64 `json:"m" cbor:"3,keyasint"`
|
||||
MemPeak uint64 `json:"mp" cbor:"4,keyasint"`
|
||||
Sub ServiceSubState `json:"ss" cbor:"5,keyasint"`
|
||||
CpuPeak float64 `json:"cp" cbor:"6,keyasint"`
|
||||
PrevCpuUsage uint64 `json:"-"`
|
||||
PrevReadTime time.Time `json:"-"`
|
||||
}
|
||||
|
||||
// UpdateCPUPercent calculates the CPU usage percentage for the service.
|
||||
func (s *Service) UpdateCPUPercent(cpuUsage uint64) {
|
||||
now := time.Now()
|
||||
|
||||
if s.PrevReadTime.IsZero() || cpuUsage < s.PrevCpuUsage {
|
||||
s.Cpu = 0
|
||||
s.PrevCpuUsage = cpuUsage
|
||||
s.PrevReadTime = now
|
||||
return
|
||||
}
|
||||
|
||||
duration := now.Sub(s.PrevReadTime).Nanoseconds()
|
||||
if duration <= 0 {
|
||||
s.PrevCpuUsage = cpuUsage
|
||||
s.PrevReadTime = now
|
||||
return
|
||||
}
|
||||
|
||||
coreCount := int64(runtime.NumCPU())
|
||||
duration *= coreCount
|
||||
|
||||
usageDelta := cpuUsage - s.PrevCpuUsage
|
||||
cpuPercent := float64(usageDelta) / float64(duration)
|
||||
s.Cpu = twoDecimals(cpuPercent * 100)
|
||||
|
||||
if s.Cpu > s.CpuPeak {
|
||||
s.CpuPeak = s.Cpu
|
||||
}
|
||||
|
||||
s.PrevCpuUsage = cpuUsage
|
||||
s.PrevReadTime = now
|
||||
}
|
||||
|
||||
func twoDecimals(value float64) float64 {
|
||||
return math.Round(value*100) / 100
|
||||
}
|
||||
|
||||
// ServiceDependency represents a unit that the service depends on.
|
||||
type ServiceDependency struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description,omitempty"`
|
||||
ActiveState string `json:"activeState,omitempty"`
|
||||
SubState string `json:"subState,omitempty"`
|
||||
}
|
||||
|
||||
// ServiceDetails contains extended information about a systemd service.
|
||||
type ServiceDetails map[string]any
|
||||
113
internal/entities/systemd/systemd_test.go
Normal file
113
internal/entities/systemd/systemd_test.go
Normal file
@@ -0,0 +1,113 @@
|
||||
//go:build testing
|
||||
|
||||
package systemd_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseServiceStatus(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
expected systemd.ServiceState
|
||||
}{
|
||||
{"active", systemd.StatusActive},
|
||||
{"inactive", systemd.StatusInactive},
|
||||
{"failed", systemd.StatusFailed},
|
||||
{"activating", systemd.StatusActivating},
|
||||
{"deactivating", systemd.StatusDeactivating},
|
||||
{"reloading", systemd.StatusReloading},
|
||||
{"unknown", systemd.StatusInactive}, // default case
|
||||
{"", systemd.StatusInactive}, // default case
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.input, func(t *testing.T) {
|
||||
result := systemd.ParseServiceStatus(test.input)
|
||||
assert.Equal(t, test.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseServiceSubState(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
expected systemd.ServiceSubState
|
||||
}{
|
||||
{"dead", systemd.SubStateDead},
|
||||
{"running", systemd.SubStateRunning},
|
||||
{"exited", systemd.SubStateExited},
|
||||
{"failed", systemd.SubStateFailed},
|
||||
{"unknown", systemd.SubStateUnknown},
|
||||
{"other", systemd.SubStateUnknown}, // default case
|
||||
{"", systemd.SubStateUnknown}, // default case
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.input, func(t *testing.T) {
|
||||
result := systemd.ParseServiceSubState(test.input)
|
||||
assert.Equal(t, test.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceUpdateCPUPercent(t *testing.T) {
|
||||
t.Run("initial call sets CPU to 0", func(t *testing.T) {
|
||||
service := &systemd.Service{}
|
||||
service.UpdateCPUPercent(1000)
|
||||
assert.Equal(t, 0.0, service.Cpu)
|
||||
assert.Equal(t, uint64(1000), service.PrevCpuUsage)
|
||||
assert.False(t, service.PrevReadTime.IsZero())
|
||||
})
|
||||
|
||||
t.Run("subsequent call calculates CPU percentage", func(t *testing.T) {
|
||||
service := &systemd.Service{}
|
||||
service.PrevCpuUsage = 1000
|
||||
service.PrevReadTime = time.Now().Add(-time.Second)
|
||||
|
||||
service.UpdateCPUPercent(8000000000) // 8 seconds of CPU time
|
||||
|
||||
// CPU usage should be positive and reasonable
|
||||
assert.Greater(t, service.Cpu, 0.0, "CPU usage should be positive")
|
||||
assert.LessOrEqual(t, service.Cpu, 100.0, "CPU usage should not exceed 100%")
|
||||
assert.Equal(t, uint64(8000000000), service.PrevCpuUsage)
|
||||
assert.Greater(t, service.CpuPeak, 0.0, "CPU peak should be set")
|
||||
})
|
||||
|
||||
t.Run("CPU peak updates only when higher", func(t *testing.T) {
|
||||
service := &systemd.Service{}
|
||||
service.PrevCpuUsage = 1000
|
||||
service.PrevReadTime = time.Now().Add(-time.Second)
|
||||
service.UpdateCPUPercent(8000000000) // Set initial peak to ~50%
|
||||
initialPeak := service.CpuPeak
|
||||
|
||||
// Now try with much lower CPU usage - should not update peak
|
||||
service.PrevReadTime = time.Now().Add(-time.Second)
|
||||
service.UpdateCPUPercent(1000000) // Much lower usage
|
||||
assert.Equal(t, initialPeak, service.CpuPeak, "Peak should not update for lower CPU usage")
|
||||
})
|
||||
|
||||
t.Run("handles zero duration", func(t *testing.T) {
|
||||
service := &systemd.Service{}
|
||||
service.PrevCpuUsage = 1000
|
||||
now := time.Now()
|
||||
service.PrevReadTime = now
|
||||
// Mock time.Now() to return the same time to ensure zero duration
|
||||
// Since we can't mock time in Go easily, we'll check the logic manually
|
||||
// The zero duration case happens when duration <= 0
|
||||
assert.Equal(t, 0.0, service.Cpu, "CPU should start at 0")
|
||||
})
|
||||
|
||||
t.Run("handles CPU usage wraparound", func(t *testing.T) {
|
||||
service := &systemd.Service{}
|
||||
// Simulate wraparound where new usage is less than previous
|
||||
service.PrevCpuUsage = 1000
|
||||
service.PrevReadTime = time.Now().Add(-time.Second)
|
||||
service.UpdateCPUPercent(500) // Less than previous, should reset
|
||||
assert.Equal(t, 0.0, service.Cpu)
|
||||
})
|
||||
}
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -345,5 +346,32 @@ func archiveSuffix(binaryName, goos, goarch string) string {
|
||||
if goos == "windows" {
|
||||
return fmt.Sprintf("%s_%s_%s.zip", binaryName, goos, goarch)
|
||||
}
|
||||
// Use glibc build for agent on glibc systems (includes NVML support via purego)
|
||||
if binaryName == "beszel-agent" && goos == "linux" && goarch == "amd64" && isGlibc() {
|
||||
return fmt.Sprintf("%s_%s_%s_glibc.tar.gz", binaryName, goos, goarch)
|
||||
}
|
||||
return fmt.Sprintf("%s_%s_%s.tar.gz", binaryName, goos, goarch)
|
||||
}
|
||||
|
||||
func isGlibc() bool {
|
||||
for _, path := range []string{
|
||||
"/lib64/ld-linux-x86-64.so.2", // common on many distros
|
||||
"/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2", // Debian/Ubuntu
|
||||
"/lib/ld-linux-x86-64.so.2", // alternate
|
||||
} {
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
// Fallback to ldd output when present (musl ldd reports musl, glibc reports GNU libc/glibc).
|
||||
if lddPath, err := exec.LookPath("ldd"); err == nil {
|
||||
out, err := exec.Command(lddPath, "--version").CombinedOutput()
|
||||
if err == nil {
|
||||
s := strings.ToLower(string(out))
|
||||
if strings.Contains(s, "gnu libc") || strings.Contains(s, "glibc") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
66
internal/ghupdate/selinux.go
Normal file
66
internal/ghupdate/selinux.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package ghupdate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// HandleSELinuxContext restores or applies the correct SELinux label to the binary.
|
||||
func HandleSELinuxContext(path string) error {
|
||||
out, err := exec.Command("getenforce").Output()
|
||||
if err != nil {
|
||||
// SELinux not enabled or getenforce not available
|
||||
return nil
|
||||
}
|
||||
state := strings.TrimSpace(string(out))
|
||||
if state == "Disabled" {
|
||||
return nil
|
||||
}
|
||||
|
||||
ColorPrint(ColorYellow, "SELinux is enabled; applying context…")
|
||||
|
||||
// Try persistent context via semanage+restorecon
|
||||
if success := trySemanageRestorecon(path); success {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fallback to temporary context via chcon
|
||||
if chconPath, err := exec.LookPath("chcon"); err == nil {
|
||||
if err := exec.Command(chconPath, "-t", "bin_t", path).Run(); err != nil {
|
||||
return fmt.Errorf("chcon failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("no SELinux tools available (semanage/restorecon or chcon)")
|
||||
}
|
||||
|
||||
// trySemanageRestorecon attempts to set persistent SELinux context using semanage and restorecon.
|
||||
// Returns true if successful, false otherwise.
|
||||
func trySemanageRestorecon(path string) bool {
|
||||
semanagePath, err := exec.LookPath("semanage")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
restoreconPath, err := exec.LookPath("restorecon")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Try to add the fcontext rule; if it already exists, try to modify it
|
||||
if err := exec.Command(semanagePath, "fcontext", "-a", "-t", "bin_t", path).Run(); err != nil {
|
||||
// Rule may already exist, try modify instead
|
||||
if err := exec.Command(semanagePath, "fcontext", "-m", "-t", "bin_t", path).Run(); err != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Apply the context with restorecon
|
||||
if err := exec.Command(restoreconPath, "-v", path).Run(); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
53
internal/ghupdate/selinux_test.go
Normal file
53
internal/ghupdate/selinux_test.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package ghupdate
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestHandleSELinuxContext_NoSELinux(t *testing.T) {
|
||||
// Skip on SELinux systems - this test is for non-SELinux behavior
|
||||
if _, err := exec.LookPath("getenforce"); err == nil {
|
||||
t.Skip("skipping on SELinux-enabled system")
|
||||
}
|
||||
|
||||
// On systems without SELinux, getenforce will fail and the function
|
||||
// should return nil without error
|
||||
tempFile := filepath.Join(t.TempDir(), "test-binary")
|
||||
if err := os.WriteFile(tempFile, []byte("test"), 0755); err != nil {
|
||||
t.Fatalf("failed to create temp file: %v", err)
|
||||
}
|
||||
|
||||
err := HandleSELinuxContext(tempFile)
|
||||
if err != nil {
|
||||
t.Errorf("HandleSELinuxContext() on non-SELinux system returned error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleSELinuxContext_InvalidPath(t *testing.T) {
|
||||
// Skip on SELinux systems - this test is for non-SELinux behavior
|
||||
if _, err := exec.LookPath("getenforce"); err == nil {
|
||||
t.Skip("skipping on SELinux-enabled system")
|
||||
}
|
||||
|
||||
// On non-SELinux systems, getenforce fails early so even invalid paths succeed
|
||||
err := HandleSELinuxContext("/nonexistent/path/binary")
|
||||
if err != nil {
|
||||
t.Errorf("HandleSELinuxContext() with invalid path on non-SELinux system returned error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTrySemanageRestorecon_NoTools(t *testing.T) {
|
||||
// Skip if semanage is available (we don't want to modify system SELinux policy)
|
||||
if _, err := exec.LookPath("semanage"); err == nil {
|
||||
t.Skip("skipping on system with semanage available")
|
||||
}
|
||||
|
||||
// Should return false when semanage is not available
|
||||
result := trySemanageRestorecon("/some/path")
|
||||
if result {
|
||||
t.Error("trySemanageRestorecon() returned true when semanage is not available")
|
||||
}
|
||||
}
|
||||
@@ -66,6 +66,15 @@ func (acr *agentConnectRequest) agentConnect() (err error) {
|
||||
|
||||
// Check if token is an active universal token
|
||||
acr.userId, acr.isUniversalToken = universalTokenMap.GetMap().GetOk(acr.token)
|
||||
if !acr.isUniversalToken {
|
||||
// Fallback: check for a permanent universal token stored in the DB
|
||||
if rec, err := acr.hub.FindFirstRecordByFilter("universal_tokens", "token = {:token}", dbx.Params{"token": acr.token}); err == nil {
|
||||
if userID := rec.GetString("user"); userID != "" {
|
||||
acr.userId = userID
|
||||
acr.isUniversalToken = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Find matching fingerprint records for this token
|
||||
fpRecords := getFingerprintRecordsByToken(acr.token, acr.hub)
|
||||
|
||||
@@ -1169,6 +1169,106 @@ func TestMultipleSystemsWithSameUniversalToken(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestPermanentUniversalTokenFromDB verifies that a universal token persisted in the DB
|
||||
// (universal_tokens collection) is accepted for agent self-registration even if it is not
|
||||
// present in the in-memory universalTokenMap.
|
||||
func TestPermanentUniversalTokenFromDB(t *testing.T) {
|
||||
// Create hub and test app
|
||||
hub, testApp, err := createTestHub(t)
|
||||
require.NoError(t, err)
|
||||
defer testApp.Cleanup()
|
||||
|
||||
// Get the hub's SSH key
|
||||
hubSigner, err := hub.GetSSHKey("")
|
||||
require.NoError(t, err)
|
||||
goodPubKey := hubSigner.PublicKey()
|
||||
|
||||
// Create test user
|
||||
userRecord, err := createTestUser(testApp)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a permanent universal token record in the DB (do NOT add it to universalTokenMap)
|
||||
universalToken := "db-universal-token-123"
|
||||
_, err = createTestRecord(testApp, "universal_tokens", map[string]any{
|
||||
"user": userRecord.Id,
|
||||
"token": universalToken,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create HTTP server with the actual API route
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/api/beszel/agent-connect" {
|
||||
acr := &agentConnectRequest{
|
||||
hub: hub,
|
||||
req: r,
|
||||
res: w,
|
||||
}
|
||||
acr.agentConnect()
|
||||
} else {
|
||||
http.NotFound(w, r)
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
// Create and configure agent
|
||||
agentDataDir := t.TempDir()
|
||||
err = os.WriteFile(filepath.Join(agentDataDir, "fingerprint"), []byte("db-token-system-fingerprint"), 0644)
|
||||
require.NoError(t, err)
|
||||
|
||||
testAgent, err := agent.NewAgent(agentDataDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set up environment variables for the agent
|
||||
os.Setenv("BESZEL_AGENT_HUB_URL", ts.URL)
|
||||
os.Setenv("BESZEL_AGENT_TOKEN", universalToken)
|
||||
defer func() {
|
||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
||||
}()
|
||||
|
||||
// Start agent in background
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
serverOptions := agent.ServerOptions{
|
||||
Network: "tcp",
|
||||
Addr: "127.0.0.1:46050",
|
||||
Keys: []ssh.PublicKey{goodPubKey},
|
||||
}
|
||||
done <- testAgent.Start(serverOptions)
|
||||
}()
|
||||
|
||||
// Wait for connection result
|
||||
maxWait := 2 * time.Second
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
checkInterval := 20 * time.Millisecond
|
||||
timeout := time.After(maxWait)
|
||||
ticker := time.Tick(checkInterval)
|
||||
|
||||
connectionManager := testAgent.GetConnectionManager()
|
||||
for {
|
||||
select {
|
||||
case <-timeout:
|
||||
t.Fatalf("Expected connection to succeed but timed out - agent state: %d", connectionManager.State)
|
||||
case <-ticker:
|
||||
if connectionManager.State == agent.WebSocketConnected {
|
||||
// Success
|
||||
goto verify
|
||||
}
|
||||
case err := <-done:
|
||||
// If Start returns early, treat it as failure
|
||||
if err != nil {
|
||||
t.Fatalf("Agent failed to start/connect: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
verify:
|
||||
// Verify that a system was created for the user (self-registration path)
|
||||
systemsAfter, err := testApp.FindRecordsByFilter("systems", "users ~ {:userId}", "", -1, 0, map[string]any{"userId": userRecord.Id})
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, systemsAfter, "Expected a system to be created for DB-backed universal token")
|
||||
}
|
||||
|
||||
// TestFindOrCreateSystemForToken tests the findOrCreateSystemForToken function
|
||||
func TestFindOrCreateSystemForToken(t *testing.T) {
|
||||
hub, testApp, err := createTestHub(t)
|
||||
|
||||
@@ -415,7 +415,11 @@ func TestExpiryMap_RemoveValue_WithExpiration(t *testing.T) {
|
||||
// Wait for first value to expire
|
||||
time.Sleep(time.Millisecond * 20)
|
||||
|
||||
// Try to remove the expired value - should remove one of the "value1" entries
|
||||
// Trigger lazy cleanup of the expired key
|
||||
_, ok := em.GetOk("key1")
|
||||
assert.False(t, ok)
|
||||
|
||||
// Try to remove the remaining "value1" entry (key3)
|
||||
removedValue, ok := em.RemovebyValue("value1")
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, "value1", removedValue)
|
||||
@@ -423,14 +427,9 @@ func TestExpiryMap_RemoveValue_WithExpiration(t *testing.T) {
|
||||
// Should still have key2 (different value)
|
||||
assert.True(t, em.Has("key2"))
|
||||
|
||||
// Should have removed one of the "value1" entries (either key1 or key3)
|
||||
// But we can't predict which one due to map iteration order
|
||||
key1Exists := em.Has("key1")
|
||||
key3Exists := em.Has("key3")
|
||||
|
||||
// Exactly one of key1 or key3 should be gone
|
||||
assert.False(t, key1Exists && key3Exists) // Both shouldn't exist
|
||||
assert.True(t, key1Exists || key3Exists) // At least one should still exist
|
||||
// key1 should be gone due to expiration and key3 should be removed by value.
|
||||
assert.False(t, em.Has("key1"))
|
||||
assert.False(t, em.Has("key3"))
|
||||
}
|
||||
|
||||
func TestExpiryMap_ValueOperations_Integration(t *testing.T) {
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/henrygd/beszel/internal/users"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase"
|
||||
"github.com/pocketbase/pocketbase/apis"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
@@ -136,6 +137,7 @@ func setCollectionAuthSettings(app core.App) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// disable email auth if DISABLE_PASSWORD_AUTH env var is set
|
||||
disablePasswordAuth, _ := GetEnv("DISABLE_PASSWORD_AUTH")
|
||||
usersCollection.PasswordAuth.Enabled = disablePasswordAuth != "true"
|
||||
@@ -147,6 +149,7 @@ func setCollectionAuthSettings(app core.App) error {
|
||||
} else {
|
||||
usersCollection.CreateRule = nil
|
||||
}
|
||||
|
||||
// enable mfaOtp mfa if MFA_OTP env var is set
|
||||
mfaOtp, _ := GetEnv("MFA_OTP")
|
||||
usersCollection.OTP.Length = 6
|
||||
@@ -161,23 +164,64 @@ func setCollectionAuthSettings(app core.App) error {
|
||||
if err := app.Save(usersCollection); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
shareAllSystems, _ := GetEnv("SHARE_ALL_SYSTEMS")
|
||||
|
||||
// allow all users to access systems if SHARE_ALL_SYSTEMS is set
|
||||
systemsCollection, err := app.FindCollectionByNameOrId("systems")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
shareAllSystems, _ := GetEnv("SHARE_ALL_SYSTEMS")
|
||||
systemsReadRule := "@request.auth.id != \"\""
|
||||
if shareAllSystems != "true" {
|
||||
// default is to only show systems that the user id is assigned to
|
||||
systemsReadRule += " && users.id ?= @request.auth.id"
|
||||
var systemsReadRule string
|
||||
if shareAllSystems == "true" {
|
||||
systemsReadRule = "@request.auth.id != \"\""
|
||||
} else {
|
||||
systemsReadRule = "@request.auth.id != \"\" && users.id ?= @request.auth.id"
|
||||
}
|
||||
updateDeleteRule := systemsReadRule + " && @request.auth.role != \"readonly\""
|
||||
systemsCollection.ListRule = &systemsReadRule
|
||||
systemsCollection.ViewRule = &systemsReadRule
|
||||
systemsCollection.UpdateRule = &updateDeleteRule
|
||||
systemsCollection.DeleteRule = &updateDeleteRule
|
||||
return app.Save(systemsCollection)
|
||||
if err := app.Save(systemsCollection); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// allow all users to access all containers if SHARE_ALL_SYSTEMS is set
|
||||
containersCollection, err := app.FindCollectionByNameOrId("containers")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
containersListRule := strings.Replace(systemsReadRule, "users.id", "system.users.id", 1)
|
||||
containersCollection.ListRule = &containersListRule
|
||||
if err := app.Save(containersCollection); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// allow all users to access system-related collections if SHARE_ALL_SYSTEMS is set
|
||||
// these collections all have a "system" relation field
|
||||
systemRelatedCollections := []string{"system_details", "smart_devices", "systemd_services"}
|
||||
for _, collectionName := range systemRelatedCollections {
|
||||
collection, err := app.FindCollectionByNameOrId(collectionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
collection.ListRule = &containersListRule
|
||||
// set viewRule for collections that need it (system_details, smart_devices)
|
||||
if collection.ViewRule != nil {
|
||||
collection.ViewRule = &containersListRule
|
||||
}
|
||||
// set deleteRule for smart_devices (allows user to dismiss disk warnings)
|
||||
if collectionName == "smart_devices" {
|
||||
deleteRule := containersListRule + " && @request.auth.role != \"readonly\""
|
||||
collection.DeleteRule = &deleteRule
|
||||
}
|
||||
if err := app.Save(collection); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// registerCronJobs sets up scheduled tasks
|
||||
@@ -252,12 +296,17 @@ func (h *Hub) registerApiRoutes(se *core.ServeEvent) error {
|
||||
// update / delete user alerts
|
||||
apiAuth.POST("/user-alerts", alerts.UpsertUserAlerts)
|
||||
apiAuth.DELETE("/user-alerts", alerts.DeleteUserAlerts)
|
||||
// get container logs
|
||||
apiAuth.GET("/containers/logs", h.getContainerLogs)
|
||||
// get container info
|
||||
apiAuth.GET("/containers/info", h.getContainerInfo)
|
||||
// get SMART data
|
||||
apiAuth.GET("/smart", h.getSmartData)
|
||||
// refresh SMART devices for a system
|
||||
apiAuth.POST("/smart/refresh", h.refreshSmartData)
|
||||
// get systemd service details
|
||||
apiAuth.GET("/systemd/info", h.getSystemdInfo)
|
||||
// /containers routes
|
||||
if enabled, _ := GetEnv("CONTAINER_DETAILS"); enabled != "false" {
|
||||
// get container logs
|
||||
apiAuth.GET("/containers/logs", h.getContainerLogs)
|
||||
// get container info
|
||||
apiAuth.GET("/containers/info", h.getContainerInfo)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -267,24 +316,90 @@ func (h *Hub) getUniversalToken(e *core.RequestEvent) error {
|
||||
userID := e.Auth.Id
|
||||
query := e.Request.URL.Query()
|
||||
token := query.Get("token")
|
||||
enable := query.Get("enable")
|
||||
permanent := query.Get("permanent")
|
||||
|
||||
// helper for deleting any existing permanent token record for this user
|
||||
deletePermanent := func() error {
|
||||
rec, err := h.FindFirstRecordByFilter("universal_tokens", "user = {:user}", dbx.Params{"user": userID})
|
||||
if err != nil {
|
||||
return nil // no record
|
||||
}
|
||||
return h.Delete(rec)
|
||||
}
|
||||
|
||||
// helper for upserting a permanent token record for this user
|
||||
upsertPermanent := func(token string) error {
|
||||
rec, err := h.FindFirstRecordByFilter("universal_tokens", "user = {:user}", dbx.Params{"user": userID})
|
||||
if err == nil {
|
||||
rec.Set("token", token)
|
||||
return h.Save(rec)
|
||||
}
|
||||
|
||||
col, err := h.FindCachedCollectionByNameOrId("universal_tokens")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newRec := core.NewRecord(col)
|
||||
newRec.Set("user", userID)
|
||||
newRec.Set("token", token)
|
||||
return h.Save(newRec)
|
||||
}
|
||||
|
||||
// Disable universal tokens (both ephemeral and permanent)
|
||||
if enable == "0" {
|
||||
tokenMap.RemovebyValue(userID)
|
||||
_ = deletePermanent()
|
||||
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": false, "permanent": false})
|
||||
}
|
||||
|
||||
// Enable universal token (ephemeral or permanent)
|
||||
if enable == "1" {
|
||||
if token == "" {
|
||||
token = uuid.New().String()
|
||||
}
|
||||
|
||||
if permanent == "1" {
|
||||
// make token permanent (persist across restarts)
|
||||
tokenMap.RemovebyValue(userID)
|
||||
if err := upsertPermanent(token); err != nil {
|
||||
return err
|
||||
}
|
||||
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": true, "permanent": true})
|
||||
}
|
||||
|
||||
// default: ephemeral mode (1 hour)
|
||||
_ = deletePermanent()
|
||||
tokenMap.Set(token, userID, time.Hour)
|
||||
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": true, "permanent": false})
|
||||
}
|
||||
|
||||
// Read current state
|
||||
// Prefer permanent token if it exists.
|
||||
if rec, err := h.FindFirstRecordByFilter("universal_tokens", "user = {:user}", dbx.Params{"user": userID}); err == nil {
|
||||
dbToken := rec.GetString("token")
|
||||
// If no token was provided, or the caller is asking about their permanent token, return it.
|
||||
if token == "" || token == dbToken {
|
||||
return e.JSON(http.StatusOK, map[string]any{"token": dbToken, "active": true, "permanent": true})
|
||||
}
|
||||
// Token doesn't match their permanent token (avoid leaking other info)
|
||||
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": false, "permanent": false})
|
||||
}
|
||||
|
||||
// No permanent token; fall back to ephemeral token map.
|
||||
if token == "" {
|
||||
// return existing token if it exists
|
||||
if token, _, ok := tokenMap.GetByValue(userID); ok {
|
||||
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": true})
|
||||
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": true, "permanent": false})
|
||||
}
|
||||
// if no token is provided, generate a new one
|
||||
token = uuid.New().String()
|
||||
}
|
||||
response := map[string]any{"token": token}
|
||||
|
||||
switch query.Get("enable") {
|
||||
case "1":
|
||||
tokenMap.Set(token, userID, time.Hour)
|
||||
case "0":
|
||||
tokenMap.RemovebyValue(userID)
|
||||
}
|
||||
_, response["active"] = tokenMap.GetOk(token)
|
||||
// Token is considered active only if it belongs to the current user.
|
||||
activeUser, ok := tokenMap.GetOk(token)
|
||||
active := ok && activeUser == userID
|
||||
response := map[string]any{"token": token, "active": active, "permanent": false}
|
||||
return e.JSON(http.StatusOK, response)
|
||||
}
|
||||
|
||||
@@ -304,7 +419,7 @@ func (h *Hub) containerRequestHandler(e *core.RequestEvent, fetchFunc func(*syst
|
||||
|
||||
data, err := fetchFunc(system, containerID)
|
||||
if err != nil {
|
||||
return e.JSON(http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||
return e.JSON(http.StatusNotFound, map[string]string{"error": err.Error()})
|
||||
}
|
||||
|
||||
return e.JSON(http.StatusOK, map[string]string{responseKey: data})
|
||||
@@ -323,22 +438,46 @@ func (h *Hub) getContainerInfo(e *core.RequestEvent) error {
|
||||
}, "info")
|
||||
}
|
||||
|
||||
// getSmartData handles GET /api/beszel/smart requests
|
||||
func (h *Hub) getSmartData(e *core.RequestEvent) error {
|
||||
systemID := e.Request.URL.Query().Get("system")
|
||||
if systemID == "" {
|
||||
return e.JSON(http.StatusBadRequest, map[string]string{"error": "system parameter is required"})
|
||||
// getSystemdInfo handles GET /api/beszel/systemd/info requests
|
||||
func (h *Hub) getSystemdInfo(e *core.RequestEvent) error {
|
||||
query := e.Request.URL.Query()
|
||||
systemID := query.Get("system")
|
||||
serviceName := query.Get("service")
|
||||
|
||||
if systemID == "" || serviceName == "" {
|
||||
return e.JSON(http.StatusBadRequest, map[string]string{"error": "system and service parameters are required"})
|
||||
}
|
||||
system, err := h.sm.GetSystem(systemID)
|
||||
if err != nil {
|
||||
return e.JSON(http.StatusNotFound, map[string]string{"error": "system not found"})
|
||||
}
|
||||
data, err := system.FetchSmartDataFromAgent()
|
||||
details, err := system.FetchSystemdInfoFromAgent(serviceName)
|
||||
if err != nil {
|
||||
return e.JSON(http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||
return e.JSON(http.StatusNotFound, map[string]string{"error": err.Error()})
|
||||
}
|
||||
e.Response.Header().Set("Cache-Control", "public, max-age=60")
|
||||
return e.JSON(http.StatusOK, data)
|
||||
return e.JSON(http.StatusOK, map[string]any{"details": details})
|
||||
}
|
||||
|
||||
// refreshSmartData handles POST /api/beszel/smart/refresh requests
|
||||
// Fetches fresh SMART data from the agent and updates the collection
|
||||
func (h *Hub) refreshSmartData(e *core.RequestEvent) error {
|
||||
systemID := e.Request.URL.Query().Get("system")
|
||||
if systemID == "" {
|
||||
return e.JSON(http.StatusBadRequest, map[string]string{"error": "system parameter is required"})
|
||||
}
|
||||
|
||||
system, err := h.sm.GetSystem(systemID)
|
||||
if err != nil {
|
||||
return e.JSON(http.StatusNotFound, map[string]string{"error": "system not found"})
|
||||
}
|
||||
|
||||
// Fetch and save SMART devices
|
||||
if err := system.FetchAndSaveSmartDevices(); err != nil {
|
||||
return e.JSON(http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||
}
|
||||
|
||||
return e.JSON(http.StatusOK, map[string]string{"status": "ok"})
|
||||
}
|
||||
|
||||
// generates key pair if it doesn't exist and returns signer
|
||||
|
||||
@@ -378,7 +378,18 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"active", "token"},
|
||||
ExpectedContent: []string{"active", "token", "permanent"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "GET /universal-token - enable permanent should succeed",
|
||||
Method: http.MethodGet,
|
||||
URL: "/api/beszel/universal-token?enable=1&permanent=1&token=permanent-token-123",
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"permanent\":true", "permanent-token-123"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
|
||||
@@ -5,39 +5,50 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"math/rand"
|
||||
"net"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
"github.com/henrygd/beszel/internal/hub/transport"
|
||||
"github.com/henrygd/beszel/internal/hub/ws"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/container"
|
||||
"github.com/henrygd/beszel/internal/entities/smart"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||
|
||||
"github.com/henrygd/beszel"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"github.com/lxzan/gws"
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
type System struct {
|
||||
Id string `db:"id"`
|
||||
Host string `db:"host"`
|
||||
Port string `db:"port"`
|
||||
Status string `db:"status"`
|
||||
manager *SystemManager // Manager that this system belongs to
|
||||
client *ssh.Client // SSH client for fetching data
|
||||
data *system.CombinedData // system data from agent
|
||||
ctx context.Context // Context for stopping the updater
|
||||
cancel context.CancelFunc // Stops and removes system from updater
|
||||
WsConn *ws.WsConn // Handler for agent WebSocket connection
|
||||
agentVersion semver.Version // Agent version
|
||||
updateTicker *time.Ticker // Ticker for updating the system
|
||||
Id string `db:"id"`
|
||||
Host string `db:"host"`
|
||||
Port string `db:"port"`
|
||||
Status string `db:"status"`
|
||||
manager *SystemManager // Manager that this system belongs to
|
||||
client *ssh.Client // SSH client for fetching data
|
||||
sshTransport *transport.SSHTransport // SSH transport for requests
|
||||
data *system.CombinedData // system data from agent
|
||||
ctx context.Context // Context for stopping the updater
|
||||
cancel context.CancelFunc // Stops and removes system from updater
|
||||
WsConn *ws.WsConn // Handler for agent WebSocket connection
|
||||
agentVersion semver.Version // Agent version
|
||||
updateTicker *time.Ticker // Ticker for updating the system
|
||||
detailsFetched atomic.Bool // True if static system details have been fetched and saved
|
||||
smartFetching atomic.Bool // True if SMART devices are currently being fetched
|
||||
smartInterval time.Duration // Interval for periodic SMART data updates
|
||||
lastSmartFetch atomic.Int64 // Unix milliseconds of last SMART data fetch
|
||||
}
|
||||
|
||||
func (sm *SystemManager) NewSystem(systemId string) *System {
|
||||
@@ -110,10 +121,37 @@ func (sys *System) update() error {
|
||||
sys.handlePaused()
|
||||
return nil
|
||||
}
|
||||
data, err := sys.fetchDataFromAgent(common.DataRequestOptions{CacheTimeMs: uint16(interval)})
|
||||
if err == nil {
|
||||
_, err = sys.createRecords(data)
|
||||
options := common.DataRequestOptions{
|
||||
CacheTimeMs: uint16(interval),
|
||||
}
|
||||
// fetch system details if not already fetched
|
||||
if !sys.detailsFetched.Load() {
|
||||
options.IncludeDetails = true
|
||||
}
|
||||
|
||||
data, err := sys.fetchDataFromAgent(options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// create system records
|
||||
_, err = sys.createRecords(data)
|
||||
|
||||
// Fetch and save SMART devices when system first comes online or at intervals
|
||||
if backgroundSmartFetchEnabled() {
|
||||
if sys.smartInterval <= 0 {
|
||||
sys.smartInterval = time.Hour
|
||||
}
|
||||
lastFetch := sys.lastSmartFetch.Load()
|
||||
if time.Since(time.UnixMilli(lastFetch)) >= sys.smartInterval && sys.smartFetching.CompareAndSwap(false, true) {
|
||||
go func() {
|
||||
defer sys.smartFetching.Store(false)
|
||||
sys.lastSmartFetch.Store(time.Now().UnixMilli())
|
||||
_ = sys.FetchAndSaveSmartDevices()
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -138,12 +176,11 @@ func (sys *System) createRecords(data *system.CombinedData) (*core.Record, error
|
||||
}
|
||||
hub := sys.manager.hub
|
||||
err = hub.RunInTransaction(func(txApp core.App) error {
|
||||
// add system_stats and container_stats records
|
||||
// add system_stats record
|
||||
systemStatsCollection, err := txApp.FindCachedCollectionByNameOrId("system_stats")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
systemStatsRecord := core.NewRecord(systemStatsCollection)
|
||||
systemStatsRecord.Set("system", systemRecord.Id)
|
||||
systemStatsRecord.Set("stats", data.Stats)
|
||||
@@ -151,14 +188,14 @@ func (sys *System) createRecords(data *system.CombinedData) (*core.Record, error
|
||||
if err := txApp.SaveNoValidate(systemStatsRecord); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// add containers and container_stats records
|
||||
if len(data.Containers) > 0 {
|
||||
// add / update containers records
|
||||
if data.Containers[0].Id != "" {
|
||||
if err := createContainerRecords(txApp, data.Containers, sys.Id); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// add new container_stats record
|
||||
containerStatsCollection, err := txApp.FindCachedCollectionByNameOrId("container_stats")
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -171,9 +208,28 @@ func (sys *System) createRecords(data *system.CombinedData) (*core.Record, error
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// add new systemd_stats record
|
||||
if len(data.SystemdServices) > 0 {
|
||||
if err := createSystemdStatsRecords(txApp, data.SystemdServices, sys.Id); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// add system details record
|
||||
if data.Details != nil {
|
||||
if err := createSystemDetailsRecord(txApp, data.Details, sys.Id); err != nil {
|
||||
return err
|
||||
}
|
||||
sys.detailsFetched.Store(true)
|
||||
// update smart interval if it's set on the agent side
|
||||
if data.Details.SmartInterval > 0 {
|
||||
sys.smartInterval = data.Details.SmartInterval
|
||||
}
|
||||
}
|
||||
|
||||
// update system record (do this last because it triggers alerts and we need above records to be inserted first)
|
||||
systemRecord.Set("status", up)
|
||||
|
||||
systemRecord.Set("info", data.Info)
|
||||
if err := txApp.SaveNoValidate(systemRecord); err != nil {
|
||||
return err
|
||||
@@ -184,11 +240,68 @@ func (sys *System) createRecords(data *system.CombinedData) (*core.Record, error
|
||||
return systemRecord, err
|
||||
}
|
||||
|
||||
func createSystemDetailsRecord(app core.App, data *system.Details, systemId string) error {
|
||||
collectionName := "system_details"
|
||||
params := dbx.Params{
|
||||
"id": systemId,
|
||||
"system": systemId,
|
||||
"hostname": data.Hostname,
|
||||
"kernel": data.Kernel,
|
||||
"cores": data.Cores,
|
||||
"threads": data.Threads,
|
||||
"cpu": data.CpuModel,
|
||||
"os": data.Os,
|
||||
"os_name": data.OsName,
|
||||
"arch": data.Arch,
|
||||
"memory": data.MemoryTotal,
|
||||
"podman": data.Podman,
|
||||
"updated": time.Now().UTC(),
|
||||
}
|
||||
result, err := app.DB().Update(collectionName, params, dbx.HashExp{"id": systemId}).Execute()
|
||||
rowsAffected, _ := result.RowsAffected()
|
||||
if err != nil || rowsAffected == 0 {
|
||||
_, err = app.DB().Insert(collectionName, params).Execute()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func createSystemdStatsRecords(app core.App, data []*systemd.Service, systemId string) error {
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
}
|
||||
// shared params for all records
|
||||
params := dbx.Params{
|
||||
"system": systemId,
|
||||
"updated": time.Now().UTC().UnixMilli(),
|
||||
}
|
||||
|
||||
valueStrings := make([]string, 0, len(data))
|
||||
for i, service := range data {
|
||||
suffix := fmt.Sprintf("%d", i)
|
||||
valueStrings = append(valueStrings, fmt.Sprintf("({:id%[1]s}, {:system}, {:name%[1]s}, {:state%[1]s}, {:sub%[1]s}, {:cpu%[1]s}, {:cpuPeak%[1]s}, {:memory%[1]s}, {:memPeak%[1]s}, {:updated})", suffix))
|
||||
params["id"+suffix] = makeStableHashId(systemId, service.Name)
|
||||
params["name"+suffix] = service.Name
|
||||
params["state"+suffix] = service.State
|
||||
params["sub"+suffix] = service.Sub
|
||||
params["cpu"+suffix] = service.Cpu
|
||||
params["cpuPeak"+suffix] = service.CpuPeak
|
||||
params["memory"+suffix] = service.Mem
|
||||
params["memPeak"+suffix] = service.MemPeak
|
||||
}
|
||||
queryString := fmt.Sprintf(
|
||||
"INSERT INTO systemd_services (id, system, name, state, sub, cpu, cpuPeak, memory, memPeak, updated) VALUES %s ON CONFLICT(id) DO UPDATE SET system = excluded.system, name = excluded.name, state = excluded.state, sub = excluded.sub, cpu = excluded.cpu, cpuPeak = excluded.cpuPeak, memory = excluded.memory, memPeak = excluded.memPeak, updated = excluded.updated",
|
||||
strings.Join(valueStrings, ","),
|
||||
)
|
||||
_, err := app.DB().NewQuery(queryString).Bind(params).Execute()
|
||||
return err
|
||||
}
|
||||
|
||||
// createContainerRecords creates container records
|
||||
func createContainerRecords(app core.App, data []*container.Stats, systemId string) error {
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
}
|
||||
// shared params for all records
|
||||
params := dbx.Params{
|
||||
"system": systemId,
|
||||
"updated": time.Now().UTC().UnixMilli(),
|
||||
@@ -204,7 +317,11 @@ func createContainerRecords(app core.App, data []*container.Stats, systemId stri
|
||||
params["health"+suffix] = container.Health
|
||||
params["cpu"+suffix] = container.Cpu
|
||||
params["memory"+suffix] = container.Mem
|
||||
params["net"+suffix] = container.NetworkSent + container.NetworkRecv
|
||||
netBytes := container.Bandwidth[0] + container.Bandwidth[1]
|
||||
if netBytes == 0 {
|
||||
netBytes = uint64((container.NetworkSent + container.NetworkRecv) * 1024 * 1024)
|
||||
}
|
||||
params["net"+suffix] = netBytes
|
||||
}
|
||||
queryString := fmt.Sprintf(
|
||||
"INSERT INTO containers (id, system, name, image, status, health, cpu, memory, net, updated) VALUES %s ON CONFLICT(id) DO UPDATE SET system = excluded.system, name = excluded.name, image = excluded.image, status = excluded.status, health = excluded.health, cpu = excluded.cpu, memory = excluded.memory, net = excluded.net, updated = excluded.updated",
|
||||
@@ -250,8 +367,78 @@ func (sys *System) getContext() (context.Context, context.CancelFunc) {
|
||||
return sys.ctx, sys.cancel
|
||||
}
|
||||
|
||||
// fetchDataFromAgent attempts to fetch data from the agent,
|
||||
// prioritizing WebSocket if available.
|
||||
// request sends a request to the agent, trying WebSocket first, then SSH.
|
||||
// This is the unified request method that uses the transport abstraction.
|
||||
func (sys *System) request(ctx context.Context, action common.WebSocketAction, req any, dest any) error {
|
||||
// Try WebSocket first
|
||||
if sys.WsConn != nil && sys.WsConn.IsConnected() {
|
||||
wsTransport := transport.NewWebSocketTransport(sys.WsConn)
|
||||
if err := wsTransport.Request(ctx, action, req, dest); err == nil {
|
||||
return nil
|
||||
} else if !shouldFallbackToSSH(err) {
|
||||
return err
|
||||
} else if shouldCloseWebSocket(err) {
|
||||
sys.closeWebSocketConnection()
|
||||
}
|
||||
}
|
||||
|
||||
// Fall back to SSH if WebSocket fails
|
||||
if err := sys.ensureSSHTransport(); err != nil {
|
||||
return err
|
||||
}
|
||||
err := sys.sshTransport.RequestWithRetry(ctx, action, req, dest, 1)
|
||||
// Keep legacy SSH client/version fields in sync for other code paths.
|
||||
if sys.sshTransport != nil {
|
||||
sys.client = sys.sshTransport.GetClient()
|
||||
sys.agentVersion = sys.sshTransport.GetAgentVersion()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func shouldFallbackToSSH(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
|
||||
return true
|
||||
}
|
||||
if errors.Is(err, gws.ErrConnClosed) {
|
||||
return true
|
||||
}
|
||||
return errors.Is(err, transport.ErrWebSocketNotConnected)
|
||||
}
|
||||
|
||||
func shouldCloseWebSocket(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
return errors.Is(err, gws.ErrConnClosed) || errors.Is(err, transport.ErrWebSocketNotConnected)
|
||||
}
|
||||
|
||||
// ensureSSHTransport ensures the SSH transport is initialized and connected.
|
||||
func (sys *System) ensureSSHTransport() error {
|
||||
if sys.sshTransport == nil {
|
||||
if sys.manager.sshConfig == nil {
|
||||
if err := sys.manager.createSSHClientConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
sys.sshTransport = transport.NewSSHTransport(transport.SSHTransportConfig{
|
||||
Host: sys.Host,
|
||||
Port: sys.Port,
|
||||
Config: sys.manager.sshConfig,
|
||||
Timeout: 4 * time.Second,
|
||||
})
|
||||
}
|
||||
// Sync client state with transport
|
||||
if sys.client != nil {
|
||||
sys.sshTransport.SetClient(sys.client)
|
||||
sys.sshTransport.SetAgentVersion(sys.agentVersion)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// fetchDataFromAgent attempts to fetch data from the agent, prioritizing WebSocket if available.
|
||||
func (sys *System) fetchDataFromAgent(options common.DataRequestOptions) (*system.CombinedData, error) {
|
||||
if sys.data == nil {
|
||||
sys.data = &system.CombinedData{}
|
||||
@@ -277,108 +464,58 @@ func (sys *System) fetchDataViaWebSocket(options common.DataRequestOptions) (*sy
|
||||
if sys.WsConn == nil || !sys.WsConn.IsConnected() {
|
||||
return nil, errors.New("no websocket connection")
|
||||
}
|
||||
err := sys.WsConn.RequestSystemData(context.Background(), sys.data, options)
|
||||
wsTransport := transport.NewWebSocketTransport(sys.WsConn)
|
||||
err := wsTransport.Request(context.Background(), common.GetData, options, sys.data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sys.data, nil
|
||||
}
|
||||
|
||||
// fetchStringFromAgentViaSSH is a generic function to fetch strings via SSH
|
||||
func (sys *System) fetchStringFromAgentViaSSH(action common.WebSocketAction, requestData any, errorMsg string) (string, error) {
|
||||
var result string
|
||||
err := sys.runSSHOperation(4*time.Second, 1, func(session *ssh.Session) (bool, error) {
|
||||
stdout, err := session.StdoutPipe()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
stdin, stdinErr := session.StdinPipe()
|
||||
if stdinErr != nil {
|
||||
return false, stdinErr
|
||||
}
|
||||
if err := session.Shell(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
req := common.HubRequest[any]{Action: action, Data: requestData}
|
||||
_ = cbor.NewEncoder(stdin).Encode(req)
|
||||
_ = stdin.Close()
|
||||
var resp common.AgentResponse
|
||||
err = cbor.NewDecoder(stdout).Decode(&resp)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if resp.String == nil {
|
||||
return false, errors.New(errorMsg)
|
||||
}
|
||||
result = *resp.String
|
||||
return false, nil
|
||||
})
|
||||
return result, err
|
||||
}
|
||||
|
||||
// FetchContainerInfoFromAgent fetches container info from the agent
|
||||
func (sys *System) FetchContainerInfoFromAgent(containerID string) (string, error) {
|
||||
// fetch via websocket
|
||||
if sys.WsConn != nil && sys.WsConn.IsConnected() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
return sys.WsConn.RequestContainerInfo(ctx, containerID)
|
||||
}
|
||||
// fetch via SSH
|
||||
return sys.fetchStringFromAgentViaSSH(common.GetContainerInfo, common.ContainerInfoRequest{ContainerID: containerID}, "no info in response")
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
var result string
|
||||
err := sys.request(ctx, common.GetContainerInfo, common.ContainerInfoRequest{ContainerID: containerID}, &result)
|
||||
return result, err
|
||||
}
|
||||
|
||||
// FetchContainerLogsFromAgent fetches container logs from the agent
|
||||
func (sys *System) FetchContainerLogsFromAgent(containerID string) (string, error) {
|
||||
// fetch via websocket
|
||||
if sys.WsConn != nil && sys.WsConn.IsConnected() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
return sys.WsConn.RequestContainerLogs(ctx, containerID)
|
||||
}
|
||||
// fetch via SSH
|
||||
return sys.fetchStringFromAgentViaSSH(common.GetContainerLogs, common.ContainerLogsRequest{ContainerID: containerID}, "no logs in response")
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
var result string
|
||||
err := sys.request(ctx, common.GetContainerLogs, common.ContainerLogsRequest{ContainerID: containerID}, &result)
|
||||
return result, err
|
||||
}
|
||||
|
||||
// FetchSystemdInfoFromAgent fetches detailed systemd service information from the agent
|
||||
func (sys *System) FetchSystemdInfoFromAgent(serviceName string) (systemd.ServiceDetails, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
var result systemd.ServiceDetails
|
||||
err := sys.request(ctx, common.GetSystemdInfo, common.SystemdInfoRequest{ServiceName: serviceName}, &result)
|
||||
return result, err
|
||||
}
|
||||
|
||||
// FetchSmartDataFromAgent fetches SMART data from the agent
|
||||
func (sys *System) FetchSmartDataFromAgent() (map[string]any, error) {
|
||||
// fetch via websocket
|
||||
if sys.WsConn != nil && sys.WsConn.IsConnected() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
return sys.WsConn.RequestSmartData(ctx)
|
||||
}
|
||||
// fetch via SSH
|
||||
var result map[string]any
|
||||
err := sys.runSSHOperation(5*time.Second, 1, func(session *ssh.Session) (bool, error) {
|
||||
stdout, err := session.StdoutPipe()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
stdin, stdinErr := session.StdinPipe()
|
||||
if stdinErr != nil {
|
||||
return false, stdinErr
|
||||
}
|
||||
if err := session.Shell(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
req := common.HubRequest[any]{Action: common.GetSmartData}
|
||||
_ = cbor.NewEncoder(stdin).Encode(req)
|
||||
_ = stdin.Close()
|
||||
var resp common.AgentResponse
|
||||
if err := cbor.NewDecoder(stdout).Decode(&resp); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Convert to generic map for JSON response
|
||||
result = make(map[string]any, len(resp.SmartData))
|
||||
for k, v := range resp.SmartData {
|
||||
result[k] = v
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
func (sys *System) FetchSmartDataFromAgent() (map[string]smart.SmartData, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
var result map[string]smart.SmartData
|
||||
err := sys.request(ctx, common.GetSmartData, nil, &result)
|
||||
return result, err
|
||||
}
|
||||
|
||||
func makeStableHashId(strings ...string) string {
|
||||
hash := fnv.New32a()
|
||||
for _, str := range strings {
|
||||
hash.Write([]byte(str))
|
||||
}
|
||||
return fmt.Sprintf("%x", hash.Sum32())
|
||||
}
|
||||
|
||||
// fetchDataViaSSH handles fetching data using SSH.
|
||||
// This function encapsulates the original SSH logic.
|
||||
// It updates sys.data directly upon successful fetch.
|
||||
@@ -532,6 +669,9 @@ func (sys *System) createSessionWithTimeout(timeout time.Duration) (*ssh.Session
|
||||
|
||||
// closeSSHConnection closes the SSH connection but keeps the system in the manager
|
||||
func (sys *System) closeSSHConnection() {
|
||||
if sys.sshTransport != nil {
|
||||
sys.sshTransport.Close()
|
||||
}
|
||||
if sys.client != nil {
|
||||
sys.client.Close()
|
||||
sys.client = nil
|
||||
|
||||
92
internal/hub/systems/system_smart.go
Normal file
92
internal/hub/systems/system_smart.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package systems
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/smart"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
)
|
||||
|
||||
// FetchAndSaveSmartDevices fetches SMART data from the agent and saves it to the database
|
||||
func (sys *System) FetchAndSaveSmartDevices() error {
|
||||
smartData, err := sys.FetchSmartDataFromAgent()
|
||||
if err != nil || len(smartData) == 0 {
|
||||
return err
|
||||
}
|
||||
return sys.saveSmartDevices(smartData)
|
||||
}
|
||||
|
||||
// saveSmartDevices saves SMART device data to the smart_devices collection
|
||||
func (sys *System) saveSmartDevices(smartData map[string]smart.SmartData) error {
|
||||
if len(smartData) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
hub := sys.manager.hub
|
||||
collection, err := hub.FindCachedCollectionByNameOrId("smart_devices")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for deviceKey, device := range smartData {
|
||||
if err := sys.upsertSmartDeviceRecord(collection, deviceKey, device); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sys *System) upsertSmartDeviceRecord(collection *core.Collection, deviceKey string, device smart.SmartData) error {
|
||||
hub := sys.manager.hub
|
||||
recordID := makeStableHashId(sys.Id, deviceKey)
|
||||
|
||||
record, err := hub.FindRecordById(collection, recordID)
|
||||
if err != nil {
|
||||
if !errors.Is(err, sql.ErrNoRows) {
|
||||
return err
|
||||
}
|
||||
record = core.NewRecord(collection)
|
||||
record.Set("id", recordID)
|
||||
}
|
||||
|
||||
name := device.DiskName
|
||||
if name == "" {
|
||||
name = deviceKey
|
||||
}
|
||||
|
||||
powerOnHours, powerCycles := extractPowerMetrics(device.Attributes)
|
||||
record.Set("system", sys.Id)
|
||||
record.Set("name", name)
|
||||
record.Set("model", device.ModelName)
|
||||
record.Set("state", device.SmartStatus)
|
||||
record.Set("capacity", device.Capacity)
|
||||
record.Set("temp", device.Temperature)
|
||||
record.Set("firmware", device.FirmwareVersion)
|
||||
record.Set("serial", device.SerialNumber)
|
||||
record.Set("type", device.DiskType)
|
||||
record.Set("hours", powerOnHours)
|
||||
record.Set("cycles", powerCycles)
|
||||
record.Set("attributes", device.Attributes)
|
||||
|
||||
return hub.SaveNoValidate(record)
|
||||
}
|
||||
|
||||
// extractPowerMetrics extracts power on hours and power cycles from SMART attributes
|
||||
func extractPowerMetrics(attributes []*smart.SmartAttribute) (powerOnHours, powerCycles uint64) {
|
||||
for _, attr := range attributes {
|
||||
nameLower := strings.ToLower(attr.Name)
|
||||
if powerOnHours == 0 && (strings.Contains(nameLower, "poweronhours") || strings.Contains(nameLower, "power_on_hours")) {
|
||||
powerOnHours = attr.RawValue
|
||||
}
|
||||
if powerCycles == 0 && ((strings.Contains(nameLower, "power") && strings.Contains(nameLower, "cycle")) || strings.Contains(nameLower, "startstopcycles")) {
|
||||
powerCycles = attr.RawValue
|
||||
}
|
||||
if powerOnHours > 0 && powerCycles > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
75
internal/hub/systems/system_systemd_test.go
Normal file
75
internal/hub/systems/system_systemd_test.go
Normal file
@@ -0,0 +1,75 @@
|
||||
//go:build testing
|
||||
|
||||
package systems
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetSystemdServiceId(t *testing.T) {
|
||||
t.Run("deterministic output", func(t *testing.T) {
|
||||
systemId := "sys-123"
|
||||
serviceName := "nginx.service"
|
||||
|
||||
// Call multiple times and ensure same result
|
||||
id1 := makeStableHashId(systemId, serviceName)
|
||||
id2 := makeStableHashId(systemId, serviceName)
|
||||
id3 := makeStableHashId(systemId, serviceName)
|
||||
|
||||
assert.Equal(t, id1, id2)
|
||||
assert.Equal(t, id2, id3)
|
||||
assert.NotEmpty(t, id1)
|
||||
})
|
||||
|
||||
t.Run("different inputs produce different ids", func(t *testing.T) {
|
||||
systemId1 := "sys-123"
|
||||
systemId2 := "sys-456"
|
||||
serviceName1 := "nginx.service"
|
||||
serviceName2 := "apache.service"
|
||||
|
||||
id1 := makeStableHashId(systemId1, serviceName1)
|
||||
id2 := makeStableHashId(systemId2, serviceName1)
|
||||
id3 := makeStableHashId(systemId1, serviceName2)
|
||||
id4 := makeStableHashId(systemId2, serviceName2)
|
||||
|
||||
// All IDs should be different
|
||||
assert.NotEqual(t, id1, id2)
|
||||
assert.NotEqual(t, id1, id3)
|
||||
assert.NotEqual(t, id1, id4)
|
||||
assert.NotEqual(t, id2, id3)
|
||||
assert.NotEqual(t, id2, id4)
|
||||
assert.NotEqual(t, id3, id4)
|
||||
})
|
||||
|
||||
t.Run("consistent length", func(t *testing.T) {
|
||||
testCases := []struct {
|
||||
systemId string
|
||||
serviceName string
|
||||
}{
|
||||
{"short", "short.service"},
|
||||
{"very-long-system-id-that-might-be-used-in-practice", "very-long-service-name.service"},
|
||||
{"", "empty-system.service"},
|
||||
{"empty-service", ""},
|
||||
{"", ""},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
id := makeStableHashId(tc.systemId, tc.serviceName)
|
||||
// FNV-32 produces 8 hex characters
|
||||
assert.Len(t, id, 8, "ID should be 8 characters for systemId='%s', serviceName='%s'", tc.systemId, tc.serviceName)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("hexadecimal output", func(t *testing.T) {
|
||||
id := makeStableHashId("test-system", "test-service")
|
||||
assert.NotEmpty(t, id)
|
||||
|
||||
// Should only contain hexadecimal characters
|
||||
for _, char := range id {
|
||||
assert.True(t, (char >= '0' && char <= '9') || (char >= 'a' && char <= 'f'),
|
||||
"ID should only contain hexadecimal characters, got: %s", id)
|
||||
}
|
||||
})
|
||||
}
|
||||
10
internal/hub/systems/systems_production.go
Normal file
10
internal/hub/systems/systems_production.go
Normal file
@@ -0,0 +1,10 @@
|
||||
//go:build !testing
|
||||
// +build !testing
|
||||
|
||||
package systems
|
||||
|
||||
// Background SMART fetching is enabled in production but disabled for tests (systems_test_helpers.go).
|
||||
//
|
||||
// The hub integration tests create/replace systems and clean up the test apps quickly.
|
||||
// Background SMART fetching can outlive teardown and crash in PocketBase internals (nil DB).
|
||||
func backgroundSmartFetchEnabled() bool { return true }
|
||||
@@ -266,18 +266,20 @@ func testOld(t *testing.T, hub *tests.TestHub) {
|
||||
|
||||
// Create test system data
|
||||
testData := &system.CombinedData{
|
||||
Details: &system.Details{
|
||||
Hostname: "data-test.example.com",
|
||||
Kernel: "5.15.0-generic",
|
||||
Cores: 4,
|
||||
Threads: 8,
|
||||
CpuModel: "Test CPU",
|
||||
},
|
||||
Info: system.Info{
|
||||
Hostname: "data-test.example.com",
|
||||
KernelVersion: "5.15.0-generic",
|
||||
Cores: 4,
|
||||
Threads: 8,
|
||||
CpuModel: "Test CPU",
|
||||
Uptime: 3600,
|
||||
Cpu: 25.5,
|
||||
MemPct: 40.2,
|
||||
DiskPct: 60.0,
|
||||
Bandwidth: 100.0,
|
||||
AgentVersion: "1.0.0",
|
||||
Uptime: 3600,
|
||||
Cpu: 25.5,
|
||||
MemPct: 40.2,
|
||||
DiskPct: 60.0,
|
||||
Bandwidth: 100.0,
|
||||
AgentVersion: "1.0.0",
|
||||
},
|
||||
Stats: system.Stats{
|
||||
Cpu: 25.5,
|
||||
|
||||
@@ -10,6 +10,13 @@ import (
|
||||
entities "github.com/henrygd/beszel/internal/entities/system"
|
||||
)
|
||||
|
||||
// The hub integration tests create/replace systems and cleanup the test apps quickly.
|
||||
// Background SMART fetching can outlive teardown and crash in PocketBase internals (nil DB).
|
||||
//
|
||||
// We keep the explicit SMART refresh endpoint / method available, but disable
|
||||
// the automatic background fetch during tests.
|
||||
func backgroundSmartFetchEnabled() bool { return false }
|
||||
|
||||
// TESTING ONLY: GetSystemCount returns the number of systems in the store
|
||||
func (sm *SystemManager) GetSystemCount() int {
|
||||
return sm.systems.Length()
|
||||
|
||||
227
internal/hub/transport/ssh.go
Normal file
227
internal/hub/transport/ssh.go
Normal file
@@ -0,0 +1,227 @@
|
||||
package transport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
// SSHTransport implements Transport over SSH connections.
|
||||
type SSHTransport struct {
|
||||
client *ssh.Client
|
||||
config *ssh.ClientConfig
|
||||
host string
|
||||
port string
|
||||
agentVersion semver.Version
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// SSHTransportConfig holds configuration for creating an SSH transport.
|
||||
type SSHTransportConfig struct {
|
||||
Host string
|
||||
Port string
|
||||
Config *ssh.ClientConfig
|
||||
AgentVersion semver.Version
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// NewSSHTransport creates a new SSH transport with the given configuration.
|
||||
func NewSSHTransport(cfg SSHTransportConfig) *SSHTransport {
|
||||
timeout := cfg.Timeout
|
||||
if timeout == 0 {
|
||||
timeout = 4 * time.Second
|
||||
}
|
||||
return &SSHTransport{
|
||||
config: cfg.Config,
|
||||
host: cfg.Host,
|
||||
port: cfg.Port,
|
||||
agentVersion: cfg.AgentVersion,
|
||||
timeout: timeout,
|
||||
}
|
||||
}
|
||||
|
||||
// SetClient sets the SSH client for reuse across requests.
|
||||
func (t *SSHTransport) SetClient(client *ssh.Client) {
|
||||
t.client = client
|
||||
}
|
||||
|
||||
// SetAgentVersion sets the agent version (extracted from SSH handshake).
|
||||
func (t *SSHTransport) SetAgentVersion(version semver.Version) {
|
||||
t.agentVersion = version
|
||||
}
|
||||
|
||||
// GetClient returns the current SSH client (for connection management).
|
||||
func (t *SSHTransport) GetClient() *ssh.Client {
|
||||
return t.client
|
||||
}
|
||||
|
||||
// GetAgentVersion returns the agent version.
|
||||
func (t *SSHTransport) GetAgentVersion() semver.Version {
|
||||
return t.agentVersion
|
||||
}
|
||||
|
||||
// Request sends a request to the agent via SSH and unmarshals the response.
|
||||
func (t *SSHTransport) Request(ctx context.Context, action common.WebSocketAction, req any, dest any) error {
|
||||
if t.client == nil {
|
||||
if err := t.connect(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
session, err := t.createSessionWithTimeout(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer session.Close()
|
||||
|
||||
stdout, err := session.StdoutPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stdin, err := session.StdinPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := session.Shell(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Send request
|
||||
hubReq := common.HubRequest[any]{Action: action, Data: req}
|
||||
if err := cbor.NewEncoder(stdin).Encode(hubReq); err != nil {
|
||||
return fmt.Errorf("failed to encode request: %w", err)
|
||||
}
|
||||
stdin.Close()
|
||||
|
||||
// Read response
|
||||
var resp common.AgentResponse
|
||||
if err := cbor.NewDecoder(stdout).Decode(&resp); err != nil {
|
||||
return fmt.Errorf("failed to decode response: %w", err)
|
||||
}
|
||||
|
||||
if resp.Error != "" {
|
||||
return errors.New(resp.Error)
|
||||
}
|
||||
|
||||
if err := session.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return UnmarshalResponse(resp, action, dest)
|
||||
}
|
||||
|
||||
// IsConnected returns true if the SSH connection is active.
|
||||
func (t *SSHTransport) IsConnected() bool {
|
||||
return t.client != nil
|
||||
}
|
||||
|
||||
// Close terminates the SSH connection.
|
||||
func (t *SSHTransport) Close() {
|
||||
if t.client != nil {
|
||||
t.client.Close()
|
||||
t.client = nil
|
||||
}
|
||||
}
|
||||
|
||||
// connect establishes a new SSH connection.
|
||||
func (t *SSHTransport) connect() error {
|
||||
if t.config == nil {
|
||||
return errors.New("SSH config not set")
|
||||
}
|
||||
|
||||
network := "tcp"
|
||||
host := t.host
|
||||
if strings.HasPrefix(host, "/") {
|
||||
network = "unix"
|
||||
} else {
|
||||
host = net.JoinHostPort(host, t.port)
|
||||
}
|
||||
|
||||
client, err := ssh.Dial(network, host, t.config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.client = client
|
||||
|
||||
// Extract agent version from server version string
|
||||
t.agentVersion, _ = extractAgentVersion(string(client.Conn.ServerVersion()))
|
||||
return nil
|
||||
}
|
||||
|
||||
// createSessionWithTimeout creates a new SSH session with a timeout.
|
||||
func (t *SSHTransport) createSessionWithTimeout(ctx context.Context) (*ssh.Session, error) {
|
||||
if t.client == nil {
|
||||
return nil, errors.New("client not initialized")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, t.timeout)
|
||||
defer cancel()
|
||||
|
||||
sessionChan := make(chan *ssh.Session, 1)
|
||||
errChan := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
session, err := t.client.NewSession()
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
} else {
|
||||
sessionChan <- session
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case session := <-sessionChan:
|
||||
return session, nil
|
||||
case err := <-errChan:
|
||||
return nil, err
|
||||
case <-ctx.Done():
|
||||
return nil, errors.New("timeout creating session")
|
||||
}
|
||||
}
|
||||
|
||||
// extractAgentVersion extracts the beszel version from SSH server version string.
|
||||
func extractAgentVersion(versionString string) (semver.Version, error) {
|
||||
_, after, _ := strings.Cut(versionString, "_")
|
||||
return semver.Parse(after)
|
||||
}
|
||||
|
||||
// RequestWithRetry sends a request with automatic retry on connection failures.
|
||||
func (t *SSHTransport) RequestWithRetry(ctx context.Context, action common.WebSocketAction, req any, dest any, retries int) error {
|
||||
var lastErr error
|
||||
for attempt := 0; attempt <= retries; attempt++ {
|
||||
err := t.Request(ctx, action, req, dest)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
lastErr = err
|
||||
|
||||
// Check if it's a connection error that warrants a retry
|
||||
if isConnectionError(err) && attempt < retries {
|
||||
t.Close()
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
return lastErr
|
||||
}
|
||||
|
||||
// isConnectionError checks if an error indicates a connection problem.
|
||||
func isConnectionError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
errStr := err.Error()
|
||||
return strings.Contains(errStr, "connection") ||
|
||||
strings.Contains(errStr, "EOF") ||
|
||||
strings.Contains(errStr, "closed") ||
|
||||
errors.Is(err, io.EOF)
|
||||
}
|
||||
112
internal/hub/transport/transport.go
Normal file
112
internal/hub/transport/transport.go
Normal file
@@ -0,0 +1,112 @@
|
||||
// Package transport provides a unified abstraction for hub-agent communication
|
||||
// over different transports (WebSocket, SSH).
|
||||
package transport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
"github.com/henrygd/beszel/internal/entities/smart"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||
)
|
||||
|
||||
// Transport defines the interface for hub-agent communication.
|
||||
// Both WebSocket and SSH transports implement this interface.
|
||||
type Transport interface {
|
||||
// Request sends a request to the agent and unmarshals the response into dest.
|
||||
// The dest parameter should be a pointer to the expected response type.
|
||||
Request(ctx context.Context, action common.WebSocketAction, req any, dest any) error
|
||||
// IsConnected returns true if the transport connection is active.
|
||||
IsConnected() bool
|
||||
// Close terminates the transport connection.
|
||||
Close()
|
||||
}
|
||||
|
||||
// UnmarshalResponse unmarshals an AgentResponse into the destination type.
|
||||
// It first checks the generic Data field (0.19+ agents), then falls back
|
||||
// to legacy typed fields for backward compatibility with 0.18.0 agents.
|
||||
func UnmarshalResponse(resp common.AgentResponse, action common.WebSocketAction, dest any) error {
|
||||
if dest == nil {
|
||||
return errors.New("nil destination")
|
||||
}
|
||||
// Try generic Data field first (0.19+)
|
||||
if len(resp.Data) > 0 {
|
||||
if err := cbor.Unmarshal(resp.Data, dest); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal generic response data: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Fall back to legacy typed fields for older agents/hubs.
|
||||
return unmarshalLegacyResponse(resp, action, dest)
|
||||
}
|
||||
|
||||
// unmarshalLegacyResponse handles legacy responses that use typed fields.
|
||||
func unmarshalLegacyResponse(resp common.AgentResponse, action common.WebSocketAction, dest any) error {
|
||||
switch action {
|
||||
case common.GetData:
|
||||
d, ok := dest.(*system.CombinedData)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected dest type for GetData: %T", dest)
|
||||
}
|
||||
if resp.SystemData == nil {
|
||||
return errors.New("no system data in response")
|
||||
}
|
||||
*d = *resp.SystemData
|
||||
return nil
|
||||
case common.CheckFingerprint:
|
||||
d, ok := dest.(*common.FingerprintResponse)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected dest type for CheckFingerprint: %T", dest)
|
||||
}
|
||||
if resp.Fingerprint == nil {
|
||||
return errors.New("no fingerprint in response")
|
||||
}
|
||||
*d = *resp.Fingerprint
|
||||
return nil
|
||||
case common.GetContainerLogs:
|
||||
d, ok := dest.(*string)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected dest type for GetContainerLogs: %T", dest)
|
||||
}
|
||||
if resp.String == nil {
|
||||
return errors.New("no logs in response")
|
||||
}
|
||||
*d = *resp.String
|
||||
return nil
|
||||
case common.GetContainerInfo:
|
||||
d, ok := dest.(*string)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected dest type for GetContainerInfo: %T", dest)
|
||||
}
|
||||
if resp.String == nil {
|
||||
return errors.New("no info in response")
|
||||
}
|
||||
*d = *resp.String
|
||||
return nil
|
||||
case common.GetSmartData:
|
||||
d, ok := dest.(*map[string]smart.SmartData)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected dest type for GetSmartData: %T", dest)
|
||||
}
|
||||
if resp.SmartData == nil {
|
||||
return errors.New("no SMART data in response")
|
||||
}
|
||||
*d = resp.SmartData
|
||||
return nil
|
||||
case common.GetSystemdInfo:
|
||||
d, ok := dest.(*systemd.ServiceDetails)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected dest type for GetSystemdInfo: %T", dest)
|
||||
}
|
||||
if resp.ServiceInfo == nil {
|
||||
return errors.New("no systemd info in response")
|
||||
}
|
||||
*d = resp.ServiceInfo
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unsupported action: %d", action)
|
||||
}
|
||||
74
internal/hub/transport/websocket.go
Normal file
74
internal/hub/transport/websocket.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package transport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"github.com/henrygd/beszel"
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
"github.com/henrygd/beszel/internal/hub/ws"
|
||||
)
|
||||
|
||||
// ErrWebSocketNotConnected indicates a WebSocket transport is not currently connected.
|
||||
var ErrWebSocketNotConnected = errors.New("websocket not connected")
|
||||
|
||||
// WebSocketTransport implements Transport over WebSocket connections.
|
||||
type WebSocketTransport struct {
|
||||
wsConn *ws.WsConn
|
||||
}
|
||||
|
||||
// NewWebSocketTransport creates a new WebSocket transport wrapper.
|
||||
func NewWebSocketTransport(wsConn *ws.WsConn) *WebSocketTransport {
|
||||
return &WebSocketTransport{wsConn: wsConn}
|
||||
}
|
||||
|
||||
// Request sends a request to the agent via WebSocket and unmarshals the response.
|
||||
func (t *WebSocketTransport) Request(ctx context.Context, action common.WebSocketAction, req any, dest any) error {
|
||||
if !t.IsConnected() {
|
||||
return ErrWebSocketNotConnected
|
||||
}
|
||||
|
||||
pendingReq, err := t.wsConn.SendRequest(ctx, action, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for response
|
||||
select {
|
||||
case message := <-pendingReq.ResponseCh:
|
||||
defer message.Close()
|
||||
defer pendingReq.Cancel()
|
||||
|
||||
// Legacy agents (< MinVersionAgentResponse) respond with a raw payload instead of an AgentResponse wrapper.
|
||||
if t.wsConn.AgentVersion().LT(beszel.MinVersionAgentResponse) {
|
||||
return cbor.Unmarshal(message.Data.Bytes(), dest)
|
||||
}
|
||||
|
||||
var agentResponse common.AgentResponse
|
||||
if err := cbor.Unmarshal(message.Data.Bytes(), &agentResponse); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if agentResponse.Error != "" {
|
||||
return errors.New(agentResponse.Error)
|
||||
}
|
||||
|
||||
return UnmarshalResponse(agentResponse, action, dest)
|
||||
|
||||
case <-pendingReq.Context.Done():
|
||||
return pendingReq.Context.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// IsConnected returns true if the WebSocket connection is active.
|
||||
func (t *WebSocketTransport) IsConnected() bool {
|
||||
return t.wsConn != nil && t.wsConn.IsConnected()
|
||||
}
|
||||
|
||||
// Close terminates the WebSocket connection.
|
||||
func (t *WebSocketTransport) Close() {
|
||||
if t.wsConn != nil {
|
||||
t.wsConn.Close(nil)
|
||||
}
|
||||
}
|
||||
@@ -45,6 +45,11 @@ func Update(cmd *cobra.Command, _ []string) {
|
||||
fmt.Printf("Warning: failed to set executable permissions: %v\n", err)
|
||||
}
|
||||
|
||||
// Fix SELinux context if necessary
|
||||
if err := ghupdate.HandleSELinuxContext(exePath); err != nil {
|
||||
ghupdate.ColorPrintf(ghupdate.ColorYellow, "Warning: SELinux context handling: %v", err)
|
||||
}
|
||||
|
||||
// Try to restart the service if it's running
|
||||
restartService()
|
||||
}
|
||||
|
||||
@@ -6,12 +6,12 @@ import (
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
"github.com/lxzan/gws"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
// ResponseHandler defines interface for handling agent responses
|
||||
// ResponseHandler defines interface for handling agent responses.
|
||||
// This is used by handleAgentRequest for legacy response handling.
|
||||
type ResponseHandler interface {
|
||||
Handle(agentResponse common.AgentResponse) error
|
||||
HandleLegacy(rawData []byte) error
|
||||
@@ -25,134 +25,7 @@ func (h *BaseHandler) HandleLegacy(rawData []byte) error {
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// systemDataHandler implements ResponseHandler for system data requests
|
||||
type systemDataHandler struct {
|
||||
data *system.CombinedData
|
||||
}
|
||||
|
||||
func (h *systemDataHandler) HandleLegacy(rawData []byte) error {
|
||||
return cbor.Unmarshal(rawData, h.data)
|
||||
}
|
||||
|
||||
func (h *systemDataHandler) Handle(agentResponse common.AgentResponse) error {
|
||||
if agentResponse.SystemData != nil {
|
||||
*h.data = *agentResponse.SystemData
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RequestSystemData requests system metrics from the agent and unmarshals the response.
|
||||
func (ws *WsConn) RequestSystemData(ctx context.Context, data *system.CombinedData, options common.DataRequestOptions) error {
|
||||
if !ws.IsConnected() {
|
||||
return gws.ErrConnClosed
|
||||
}
|
||||
|
||||
req, err := ws.requestManager.SendRequest(ctx, common.GetData, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
handler := &systemDataHandler{data: data}
|
||||
return ws.handleAgentRequest(req, handler)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// stringResponseHandler is a generic handler for string responses from agents
|
||||
type stringResponseHandler struct {
|
||||
BaseHandler
|
||||
value string
|
||||
errorMsg string
|
||||
}
|
||||
|
||||
func (h *stringResponseHandler) Handle(agentResponse common.AgentResponse) error {
|
||||
if agentResponse.String == nil {
|
||||
return errors.New(h.errorMsg)
|
||||
}
|
||||
h.value = *agentResponse.String
|
||||
return nil
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// requestContainerStringViaWS is a generic function to request container-related strings via WebSocket
|
||||
func (ws *WsConn) requestContainerStringViaWS(ctx context.Context, action common.WebSocketAction, requestData any, errorMsg string) (string, error) {
|
||||
if !ws.IsConnected() {
|
||||
return "", gws.ErrConnClosed
|
||||
}
|
||||
|
||||
req, err := ws.requestManager.SendRequest(ctx, action, requestData)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
handler := &stringResponseHandler{errorMsg: errorMsg}
|
||||
if err := ws.handleAgentRequest(req, handler); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return handler.value, nil
|
||||
}
|
||||
|
||||
// RequestContainerLogs requests logs for a specific container via WebSocket.
|
||||
func (ws *WsConn) RequestContainerLogs(ctx context.Context, containerID string) (string, error) {
|
||||
return ws.requestContainerStringViaWS(ctx, common.GetContainerLogs, common.ContainerLogsRequest{ContainerID: containerID}, "no logs in response")
|
||||
}
|
||||
|
||||
// RequestContainerInfo requests information about a specific container via WebSocket.
|
||||
func (ws *WsConn) RequestContainerInfo(ctx context.Context, containerID string) (string, error) {
|
||||
return ws.requestContainerStringViaWS(ctx, common.GetContainerInfo, common.ContainerInfoRequest{ContainerID: containerID}, "no info in response")
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// RequestSmartData requests SMART data via WebSocket.
|
||||
func (ws *WsConn) RequestSmartData(ctx context.Context) (map[string]any, error) {
|
||||
if !ws.IsConnected() {
|
||||
return nil, gws.ErrConnClosed
|
||||
}
|
||||
req, err := ws.requestManager.SendRequest(ctx, common.GetSmartData, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var result map[string]any
|
||||
handler := ResponseHandler(&smartDataHandler{result: &result})
|
||||
if err := ws.handleAgentRequest(req, handler); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// smartDataHandler parses SMART data map from AgentResponse
|
||||
type smartDataHandler struct {
|
||||
BaseHandler
|
||||
result *map[string]any
|
||||
}
|
||||
|
||||
func (h *smartDataHandler) Handle(agentResponse common.AgentResponse) error {
|
||||
if agentResponse.SmartData == nil {
|
||||
return errors.New("no SMART data in response")
|
||||
}
|
||||
// convert to map[string]any for transport convenience in hub layer
|
||||
out := make(map[string]any, len(agentResponse.SmartData))
|
||||
for k, v := range agentResponse.SmartData {
|
||||
out[k] = v
|
||||
}
|
||||
*h.result = out
|
||||
return nil
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// Fingerprint handling (used for WebSocket authentication)
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// fingerprintHandler implements ResponseHandler for fingerprint requests
|
||||
|
||||
@@ -45,7 +45,15 @@ func NewRequestManager(conn *gws.Conn) *RequestManager {
|
||||
func (rm *RequestManager) SendRequest(ctx context.Context, action common.WebSocketAction, data any) (*PendingRequest, error) {
|
||||
reqID := RequestID(rm.nextID.Add(1))
|
||||
|
||||
reqCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
// Respect any caller-provided deadline. If none is set, apply a reasonable default
|
||||
// so pending requests don't live forever if the agent never responds.
|
||||
reqCtx := ctx
|
||||
var cancel context.CancelFunc
|
||||
if _, hasDeadline := ctx.Deadline(); hasDeadline {
|
||||
reqCtx, cancel = context.WithCancel(ctx)
|
||||
} else {
|
||||
reqCtx, cancel = context.WithTimeout(ctx, 5*time.Second)
|
||||
}
|
||||
|
||||
req := &PendingRequest{
|
||||
ID: reqID,
|
||||
@@ -100,6 +108,11 @@ func (rm *RequestManager) handleResponse(message *gws.Message) {
|
||||
return
|
||||
}
|
||||
|
||||
if response.Id == nil {
|
||||
rm.routeLegacyResponse(message)
|
||||
return
|
||||
}
|
||||
|
||||
reqID := RequestID(*response.Id)
|
||||
|
||||
rm.RLock()
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package ws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
"weak"
|
||||
@@ -161,3 +162,14 @@ func (ws *WsConn) handleAgentRequest(req *PendingRequest, handler ResponseHandle
|
||||
func (ws *WsConn) IsConnected() bool {
|
||||
return ws.conn != nil
|
||||
}
|
||||
|
||||
// AgentVersion returns the connected agent's version (as reported during handshake).
|
||||
func (ws *WsConn) AgentVersion() semver.Version {
|
||||
return ws.agentVersion
|
||||
}
|
||||
|
||||
// SendRequest sends a request to the agent and returns a pending request handle.
|
||||
// This is used by the transport layer to send requests.
|
||||
func (ws *WsConn) SendRequest(ctx context.Context, action common.WebSocketAction, data any) (*PendingRequest, error) {
|
||||
return ws.requestManager.SendRequest(ctx, action, data)
|
||||
}
|
||||
|
||||
@@ -184,14 +184,18 @@ func TestCommonActions(t *testing.T) {
|
||||
assert.Equal(t, common.WebSocketAction(2), common.GetContainerLogs, "GetLogs should be action 2")
|
||||
}
|
||||
|
||||
func TestLogsHandler(t *testing.T) {
|
||||
h := &stringResponseHandler{errorMsg: "no logs in response"}
|
||||
func TestFingerprintHandler(t *testing.T) {
|
||||
var result common.FingerprintResponse
|
||||
h := &fingerprintHandler{result: &result}
|
||||
|
||||
logValue := "test logs"
|
||||
resp := common.AgentResponse{String: &logValue}
|
||||
resp := common.AgentResponse{Fingerprint: &common.FingerprintResponse{
|
||||
Fingerprint: "test-fingerprint",
|
||||
Hostname: "test-host",
|
||||
}}
|
||||
err := h.Handle(resp)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, logValue, h.value)
|
||||
assert.Equal(t, "test-fingerprint", result.Fingerprint)
|
||||
assert.Equal(t, "test-host", result.Hostname)
|
||||
}
|
||||
|
||||
// TestHandler tests that we can create a Handler
|
||||
|
||||
@@ -75,9 +75,11 @@ func init() {
|
||||
"Disk",
|
||||
"Temperature",
|
||||
"Bandwidth",
|
||||
"GPU",
|
||||
"LoadAvg1",
|
||||
"LoadAvg5",
|
||||
"LoadAvg15"
|
||||
"LoadAvg15",
|
||||
"Battery"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -718,7 +720,9 @@ func init() {
|
||||
"type": "autodate"
|
||||
}
|
||||
],
|
||||
"indexes": [],
|
||||
"indexes": [
|
||||
"CREATE INDEX ` + "`" + `idx_systems_status` + "`" + ` ON ` + "`" + `systems` + "`" + ` (` + "`" + `status` + "`" + `)"
|
||||
],
|
||||
"system": false
|
||||
},
|
||||
{
|
||||
@@ -1005,6 +1009,682 @@ func init() {
|
||||
"CREATE INDEX ` + "`" + `idx_r3Ja0rs102` + "`" + ` ON ` + "`" + `containers` + "`" + ` (` + "`" + `system` + "`" + `)"
|
||||
],
|
||||
"system": false
|
||||
},
|
||||
{
|
||||
"createRule": null,
|
||||
"deleteRule": null,
|
||||
"fields": [
|
||||
{
|
||||
"autogeneratePattern": "[a-z0-9]{10}",
|
||||
"hidden": false,
|
||||
"id": "text3208210256",
|
||||
"max": 10,
|
||||
"min": 6,
|
||||
"name": "id",
|
||||
"pattern": "^[a-z0-9]+$",
|
||||
"presentable": false,
|
||||
"primaryKey": true,
|
||||
"required": true,
|
||||
"system": true,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"autogeneratePattern": "",
|
||||
"hidden": false,
|
||||
"id": "text1579384326",
|
||||
"max": 0,
|
||||
"min": 0,
|
||||
"name": "name",
|
||||
"pattern": "",
|
||||
"presentable": false,
|
||||
"primaryKey": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"cascadeDelete": true,
|
||||
"collectionId": "2hz5ncl8tizk5nx",
|
||||
"hidden": false,
|
||||
"id": "relation3377271179",
|
||||
"maxSelect": 1,
|
||||
"minSelect": 0,
|
||||
"name": "system",
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "relation"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "number2063623452",
|
||||
"max": null,
|
||||
"min": null,
|
||||
"name": "state",
|
||||
"onlyInt": true,
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "number1476559580",
|
||||
"max": null,
|
||||
"min": null,
|
||||
"name": "sub",
|
||||
"onlyInt": true,
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "number3128971310",
|
||||
"max": null,
|
||||
"min": null,
|
||||
"name": "cpu",
|
||||
"onlyInt": false,
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "number1052053287",
|
||||
"max": null,
|
||||
"min": null,
|
||||
"name": "cpuPeak",
|
||||
"onlyInt": false,
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "number3933025333",
|
||||
"max": null,
|
||||
"min": null,
|
||||
"name": "memory",
|
||||
"onlyInt": false,
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "number1828797201",
|
||||
"max": null,
|
||||
"min": null,
|
||||
"name": "memPeak",
|
||||
"onlyInt": false,
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "number3332085495",
|
||||
"max": null,
|
||||
"min": null,
|
||||
"name": "updated",
|
||||
"onlyInt": false,
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "number"
|
||||
}
|
||||
],
|
||||
"id": "pbc_3494996990",
|
||||
"indexes": [
|
||||
"CREATE INDEX ` + "`" + `idx_4Z7LuLNdQb` + "`" + ` ON ` + "`" + `systemd_services` + "`" + ` (` + "`" + `system` + "`" + `)",
|
||||
"CREATE INDEX ` + "`" + `idx_pBp1fF837e` + "`" + ` ON ` + "`" + `systemd_services` + "`" + ` (` + "`" + `updated` + "`" + `)"
|
||||
],
|
||||
"listRule": "@request.auth.id != \"\" && system.users.id ?= @request.auth.id",
|
||||
"name": "systemd_services",
|
||||
"system": false,
|
||||
"type": "base",
|
||||
"updateRule": null,
|
||||
"viewRule": null
|
||||
},
|
||||
{
|
||||
"createRule": "@request.auth.id != \"\" && user.id = @request.auth.id",
|
||||
"deleteRule": "@request.auth.id != \"\" && user.id = @request.auth.id",
|
||||
"fields": [
|
||||
{
|
||||
"autogeneratePattern": "[a-z0-9]{10}",
|
||||
"hidden": false,
|
||||
"id": "text3208210256",
|
||||
"max": 10,
|
||||
"min": 10,
|
||||
"name": "id",
|
||||
"pattern": "^[a-z0-9]+$",
|
||||
"presentable": false,
|
||||
"primaryKey": true,
|
||||
"required": true,
|
||||
"system": true,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"cascadeDelete": true,
|
||||
"collectionId": "_pb_users_auth_",
|
||||
"hidden": false,
|
||||
"id": "relation2375276105",
|
||||
"maxSelect": 1,
|
||||
"minSelect": 0,
|
||||
"name": "user",
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "relation"
|
||||
},
|
||||
{
|
||||
"cascadeDelete": true,
|
||||
"collectionId": "2hz5ncl8tizk5nx",
|
||||
"hidden": false,
|
||||
"id": "relation3377271179",
|
||||
"maxSelect": 1,
|
||||
"minSelect": 0,
|
||||
"name": "system",
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "relation"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "select2844932856",
|
||||
"maxSelect": 1,
|
||||
"name": "type",
|
||||
"presentable": false,
|
||||
"required": true,
|
||||
"system": false,
|
||||
"type": "select",
|
||||
"values": [
|
||||
"one-time",
|
||||
"daily"
|
||||
]
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "date2675529103",
|
||||
"max": "",
|
||||
"min": "",
|
||||
"name": "start",
|
||||
"presentable": false,
|
||||
"required": true,
|
||||
"system": false,
|
||||
"type": "date"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "date16528305",
|
||||
"max": "",
|
||||
"min": "",
|
||||
"name": "end",
|
||||
"presentable": false,
|
||||
"required": true,
|
||||
"system": false,
|
||||
"type": "date"
|
||||
}
|
||||
],
|
||||
"id": "pbc_451525641",
|
||||
"indexes": [
|
||||
"CREATE INDEX ` + "`" + `idx_q0iKnRP9v8` + "`" + ` ON ` + "`" + `quiet_hours` + "`" + ` (\n ` + "`" + `user` + "`" + `,\n ` + "`" + `system` + "`" + `\n)",
|
||||
"CREATE INDEX ` + "`" + `idx_6T7ljT7FJd` + "`" + ` ON ` + "`" + `quiet_hours` + "`" + ` (\n ` + "`" + `type` + "`" + `,\n ` + "`" + `end` + "`" + `\n)"
|
||||
],
|
||||
"listRule": "@request.auth.id != \"\" && user.id = @request.auth.id",
|
||||
"name": "quiet_hours",
|
||||
"system": false,
|
||||
"type": "base",
|
||||
"updateRule": "@request.auth.id != \"\" && user.id = @request.auth.id",
|
||||
"viewRule": "@request.auth.id != \"\" && user.id = @request.auth.id"
|
||||
},
|
||||
{
|
||||
"createRule": null,
|
||||
"deleteRule": "@request.auth.id != \"\" && system.users.id ?= @request.auth.id",
|
||||
"fields": [
|
||||
{
|
||||
"autogeneratePattern": "[a-z0-9]{10}",
|
||||
"hidden": false,
|
||||
"id": "text3208210256",
|
||||
"max": 10,
|
||||
"min": 10,
|
||||
"name": "id",
|
||||
"pattern": "^[a-z0-9]+$",
|
||||
"presentable": false,
|
||||
"primaryKey": true,
|
||||
"required": true,
|
||||
"system": true,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"cascadeDelete": true,
|
||||
"collectionId": "2hz5ncl8tizk5nx",
|
||||
"hidden": false,
|
||||
"id": "relation3377271179",
|
||||
"maxSelect": 1,
|
||||
"minSelect": 0,
|
||||
"name": "system",
|
||||
"presentable": false,
|
||||
"required": true,
|
||||
"system": false,
|
||||
"type": "relation"
|
||||
},
|
||||
{
|
||||
"autogeneratePattern": "",
|
||||
"hidden": false,
|
||||
"id": "text1579384326",
|
||||
"max": 0,
|
||||
"min": 0,
|
||||
"name": "name",
|
||||
"pattern": "",
|
||||
"presentable": false,
|
||||
"primaryKey": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"autogeneratePattern": "",
|
||||
"hidden": false,
|
||||
"id": "text3616895705",
|
||||
"max": 0,
|
||||
"min": 0,
|
||||
"name": "model",
|
||||
"pattern": "",
|
||||
"presentable": false,
|
||||
"primaryKey": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"autogeneratePattern": "",
|
||||
"hidden": false,
|
||||
"id": "text2744374011",
|
||||
"max": 0,
|
||||
"min": 0,
|
||||
"name": "state",
|
||||
"pattern": "",
|
||||
"presentable": false,
|
||||
"primaryKey": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "number3051925876",
|
||||
"max": null,
|
||||
"min": null,
|
||||
"name": "capacity",
|
||||
"onlyInt": false,
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "number190023114",
|
||||
"max": null,
|
||||
"min": null,
|
||||
"name": "temp",
|
||||
"onlyInt": false,
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"autogeneratePattern": "",
|
||||
"hidden": false,
|
||||
"id": "text3589068740",
|
||||
"max": 0,
|
||||
"min": 0,
|
||||
"name": "firmware",
|
||||
"pattern": "",
|
||||
"presentable": false,
|
||||
"primaryKey": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"autogeneratePattern": "",
|
||||
"hidden": false,
|
||||
"id": "text3547646428",
|
||||
"max": 0,
|
||||
"min": 0,
|
||||
"name": "serial",
|
||||
"pattern": "",
|
||||
"presentable": false,
|
||||
"primaryKey": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"autogeneratePattern": "",
|
||||
"hidden": false,
|
||||
"id": "text2363381545",
|
||||
"max": 0,
|
||||
"min": 0,
|
||||
"name": "type",
|
||||
"pattern": "",
|
||||
"presentable": false,
|
||||
"primaryKey": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "number1234567890",
|
||||
"max": null,
|
||||
"min": null,
|
||||
"name": "hours",
|
||||
"onlyInt": true,
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "number0987654321",
|
||||
"max": null,
|
||||
"min": null,
|
||||
"name": "cycles",
|
||||
"onlyInt": true,
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "json832282224",
|
||||
"maxSize": 0,
|
||||
"name": "attributes",
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "json"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "autodate3332085495",
|
||||
"name": "updated",
|
||||
"onCreate": true,
|
||||
"onUpdate": true,
|
||||
"presentable": false,
|
||||
"system": false,
|
||||
"type": "autodate"
|
||||
}
|
||||
],
|
||||
"id": "pbc_2571630677",
|
||||
"indexes": [
|
||||
"CREATE INDEX ` + "`" + `idx_DZ9yhvgl44` + "`" + ` ON ` + "`" + `smart_devices` + "`" + ` (` + "`" + `system` + "`" + `)"
|
||||
],
|
||||
"listRule": "@request.auth.id != \"\" && system.users.id ?= @request.auth.id",
|
||||
"name": "smart_devices",
|
||||
"system": false,
|
||||
"type": "base",
|
||||
"updateRule": null,
|
||||
"viewRule": "@request.auth.id != \"\" && system.users.id ?= @request.auth.id"
|
||||
},
|
||||
{
|
||||
"createRule": "",
|
||||
"deleteRule": "",
|
||||
"fields": [
|
||||
{
|
||||
"autogeneratePattern": "[a-z0-9]{15}",
|
||||
"hidden": false,
|
||||
"id": "text3208210256",
|
||||
"max": 15,
|
||||
"min": 15,
|
||||
"name": "id",
|
||||
"pattern": "^[a-z0-9]+$",
|
||||
"presentable": false,
|
||||
"primaryKey": true,
|
||||
"required": true,
|
||||
"system": true,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"cascadeDelete": true,
|
||||
"collectionId": "2hz5ncl8tizk5nx",
|
||||
"hidden": false,
|
||||
"id": "relation3377271179",
|
||||
"maxSelect": 1,
|
||||
"minSelect": 0,
|
||||
"name": "system",
|
||||
"presentable": false,
|
||||
"required": true,
|
||||
"system": false,
|
||||
"type": "relation"
|
||||
},
|
||||
{
|
||||
"autogeneratePattern": "",
|
||||
"hidden": false,
|
||||
"id": "text3847340049",
|
||||
"max": 0,
|
||||
"min": 0,
|
||||
"name": "hostname",
|
||||
"pattern": "",
|
||||
"presentable": false,
|
||||
"primaryKey": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "number1789936913",
|
||||
"max": null,
|
||||
"min": null,
|
||||
"name": "os",
|
||||
"onlyInt": false,
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"autogeneratePattern": "",
|
||||
"hidden": false,
|
||||
"id": "text2818598173",
|
||||
"max": 0,
|
||||
"min": 0,
|
||||
"name": "os_name",
|
||||
"pattern": "",
|
||||
"presentable": false,
|
||||
"primaryKey": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"autogeneratePattern": "",
|
||||
"hidden": false,
|
||||
"id": "text1574083243",
|
||||
"max": 0,
|
||||
"min": 0,
|
||||
"name": "kernel",
|
||||
"pattern": "",
|
||||
"presentable": false,
|
||||
"primaryKey": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"autogeneratePattern": "",
|
||||
"hidden": false,
|
||||
"id": "text3128971310",
|
||||
"max": 0,
|
||||
"min": 0,
|
||||
"name": "cpu",
|
||||
"pattern": "",
|
||||
"presentable": false,
|
||||
"primaryKey": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"autogeneratePattern": "",
|
||||
"hidden": false,
|
||||
"id": "text4161937994",
|
||||
"max": 0,
|
||||
"min": 0,
|
||||
"name": "arch",
|
||||
"pattern": "",
|
||||
"presentable": false,
|
||||
"primaryKey": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "number4245036687",
|
||||
"max": null,
|
||||
"min": null,
|
||||
"name": "cores",
|
||||
"onlyInt": false,
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "number1871592925",
|
||||
"max": null,
|
||||
"min": null,
|
||||
"name": "threads",
|
||||
"onlyInt": false,
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "number3933025333",
|
||||
"max": null,
|
||||
"min": null,
|
||||
"name": "memory",
|
||||
"onlyInt": false,
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "bool2200265312",
|
||||
"name": "podman",
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "bool"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "autodate3332085495",
|
||||
"name": "updated",
|
||||
"onCreate": true,
|
||||
"onUpdate": true,
|
||||
"presentable": false,
|
||||
"system": false,
|
||||
"type": "autodate"
|
||||
}
|
||||
],
|
||||
"id": "pbc_3116237454",
|
||||
"indexes": [],
|
||||
"listRule": "@request.auth.id != \"\" && system.users.id ?= @request.auth.id",
|
||||
"name": "system_details",
|
||||
"system": false,
|
||||
"type": "base",
|
||||
"updateRule": "",
|
||||
"viewRule": "@request.auth.id != \"\" && system.users.id ?= @request.auth.id"
|
||||
},
|
||||
{
|
||||
"createRule": null,
|
||||
"deleteRule": null,
|
||||
"fields": [
|
||||
{
|
||||
"autogeneratePattern": "[a-z0-9]{10}",
|
||||
"hidden": false,
|
||||
"id": "text3208210256",
|
||||
"max": 10,
|
||||
"min": 10,
|
||||
"name": "id",
|
||||
"pattern": "^[a-z0-9]+$",
|
||||
"presentable": false,
|
||||
"primaryKey": true,
|
||||
"required": true,
|
||||
"system": true,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"cascadeDelete": true,
|
||||
"collectionId": "_pb_users_auth_",
|
||||
"hidden": false,
|
||||
"id": "relation2375276105",
|
||||
"maxSelect": 1,
|
||||
"minSelect": 0,
|
||||
"name": "user",
|
||||
"presentable": false,
|
||||
"required": true,
|
||||
"system": false,
|
||||
"type": "relation"
|
||||
},
|
||||
{
|
||||
"autogeneratePattern": "",
|
||||
"hidden": false,
|
||||
"id": "text1597481275",
|
||||
"max": 0,
|
||||
"min": 0,
|
||||
"name": "token",
|
||||
"pattern": "",
|
||||
"presentable": false,
|
||||
"primaryKey": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "autodate2990389176",
|
||||
"name": "created",
|
||||
"onCreate": true,
|
||||
"onUpdate": false,
|
||||
"presentable": false,
|
||||
"system": false,
|
||||
"type": "autodate"
|
||||
}
|
||||
],
|
||||
"id": "pbc_3383022248",
|
||||
"indexes": [
|
||||
"CREATE INDEX ` + "`" + `idx_iaD9Y2Lgbl` + "`" + ` ON ` + "`" + `universal_tokens` + "`" + ` (` + "`" + `token` + "`" + `)",
|
||||
"CREATE UNIQUE INDEX ` + "`" + `idx_wdR0A4PbRG` + "`" + ` ON ` + "`" + `universal_tokens` + "`" + ` (` + "`" + `user` + "`" + `)"
|
||||
],
|
||||
"listRule": null,
|
||||
"name": "universal_tokens",
|
||||
"system": false,
|
||||
"type": "base",
|
||||
"updateRule": null,
|
||||
"viewRule": null
|
||||
}
|
||||
]`
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
m "github.com/pocketbase/pocketbase/migrations"
|
||||
)
|
||||
|
||||
// This can be deleted after Nov 2025 or so
|
||||
|
||||
func init() {
|
||||
m.Register(func(app core.App) error {
|
||||
app.RunInTransaction(func(txApp core.App) error {
|
||||
var systemIds []string
|
||||
txApp.DB().NewQuery("SELECT id FROM systems").Column(&systemIds)
|
||||
|
||||
for _, systemId := range systemIds {
|
||||
var statRecordIds []string
|
||||
txApp.DB().NewQuery("SELECT id FROM system_stats WHERE system = {:system} AND created > {:created}").Bind(map[string]any{"system": systemId, "created": "2025-09-21"}).Column(&statRecordIds)
|
||||
|
||||
for _, statRecordId := range statRecordIds {
|
||||
statRecord, err := txApp.FindRecordById("system_stats", statRecordId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var systemStats system.Stats
|
||||
err = statRecord.UnmarshalJSONField("stats", &systemStats)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// if mem buff cache is less than total mem, we don't need to fix it
|
||||
if systemStats.MemBuffCache < systemStats.Mem {
|
||||
continue
|
||||
}
|
||||
systemStats.MemBuffCache = 0
|
||||
statRecord.Set("stats", systemStats)
|
||||
err = txApp.SaveNoValidate(statRecord)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
}, func(app core.App) error {
|
||||
return nil
|
||||
})
|
||||
}
|
||||
@@ -177,6 +177,10 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
stats := &tempStats
|
||||
// necessary because uint8 is not big enough for the sum
|
||||
batterySum := 0
|
||||
// accumulate per-core usage across records
|
||||
var cpuCoresSums []uint64
|
||||
// accumulate cpu breakdown [user, system, iowait, steal, idle]
|
||||
var cpuBreakdownSums []float64
|
||||
|
||||
count := float64(len(records))
|
||||
tempCount := float64(0)
|
||||
@@ -186,6 +190,8 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
id := record.Id
|
||||
// clear global statsRecord for reuse
|
||||
statsRecord.Stats = statsRecord.Stats[:0]
|
||||
// reset tempStats each iteration to avoid omitzero fields retaining stale values
|
||||
*stats = system.Stats{}
|
||||
|
||||
queryParams["id"] = id
|
||||
db.NewQuery("SELECT stats FROM system_stats WHERE id = {:id}").Bind(queryParams).One(&statsRecord)
|
||||
@@ -194,6 +200,15 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
}
|
||||
|
||||
sum.Cpu += stats.Cpu
|
||||
// accumulate cpu time breakdowns if present
|
||||
if stats.CpuBreakdown != nil {
|
||||
if len(cpuBreakdownSums) < len(stats.CpuBreakdown) {
|
||||
cpuBreakdownSums = append(cpuBreakdownSums, make([]float64, len(stats.CpuBreakdown)-len(cpuBreakdownSums))...)
|
||||
}
|
||||
for i, v := range stats.CpuBreakdown {
|
||||
cpuBreakdownSums[i] += v
|
||||
}
|
||||
}
|
||||
sum.Mem += stats.Mem
|
||||
sum.MemUsed += stats.MemUsed
|
||||
sum.MemPct += stats.MemPct
|
||||
@@ -217,6 +232,17 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
sum.DiskIO[1] += stats.DiskIO[1]
|
||||
batterySum += int(stats.Battery[0])
|
||||
sum.Battery[1] = stats.Battery[1]
|
||||
|
||||
// accumulate per-core usage if present
|
||||
if stats.CpuCoresUsage != nil {
|
||||
if len(cpuCoresSums) < len(stats.CpuCoresUsage) {
|
||||
// extend slices to accommodate core count
|
||||
cpuCoresSums = append(cpuCoresSums, make([]uint64, len(stats.CpuCoresUsage)-len(cpuCoresSums))...)
|
||||
}
|
||||
for i, v := range stats.CpuCoresUsage {
|
||||
cpuCoresSums[i] += uint64(v)
|
||||
}
|
||||
}
|
||||
// Set peak values
|
||||
sum.MaxCpu = max(sum.MaxCpu, stats.MaxCpu, stats.Cpu)
|
||||
sum.MaxMem = max(sum.MaxMem, stats.MaxMem, stats.MemUsed)
|
||||
@@ -269,6 +295,10 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
fs.DiskReadPs += value.DiskReadPs
|
||||
fs.MaxDiskReadPS = max(fs.MaxDiskReadPS, value.MaxDiskReadPS, value.DiskReadPs)
|
||||
fs.MaxDiskWritePS = max(fs.MaxDiskWritePS, value.MaxDiskWritePS, value.DiskWritePs)
|
||||
fs.DiskReadBytes += value.DiskReadBytes
|
||||
fs.DiskWriteBytes += value.DiskWriteBytes
|
||||
fs.MaxDiskReadBytes = max(fs.MaxDiskReadBytes, value.MaxDiskReadBytes, value.DiskReadBytes)
|
||||
fs.MaxDiskWriteBytes = max(fs.MaxDiskWriteBytes, value.MaxDiskWriteBytes, value.DiskWriteBytes)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -356,6 +386,8 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
fs.DiskUsed = twoDecimals(fs.DiskUsed / count)
|
||||
fs.DiskWritePs = twoDecimals(fs.DiskWritePs / count)
|
||||
fs.DiskReadPs = twoDecimals(fs.DiskReadPs / count)
|
||||
fs.DiskReadBytes = fs.DiskReadBytes / uint64(count)
|
||||
fs.DiskWriteBytes = fs.DiskWriteBytes / uint64(count)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -379,6 +411,25 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
sum.GPUData[id] = gpu
|
||||
}
|
||||
}
|
||||
|
||||
// Average per-core usage
|
||||
if len(cpuCoresSums) > 0 {
|
||||
avg := make(system.Uint8Slice, len(cpuCoresSums))
|
||||
for i := range cpuCoresSums {
|
||||
v := math.Round(float64(cpuCoresSums[i]) / count)
|
||||
avg[i] = uint8(v)
|
||||
}
|
||||
sum.CpuCoresUsage = avg
|
||||
}
|
||||
|
||||
// Average CPU breakdown
|
||||
if len(cpuBreakdownSums) > 0 {
|
||||
avg := make([]float64, len(cpuBreakdownSums))
|
||||
for i := range cpuBreakdownSums {
|
||||
avg[i] = twoDecimals(cpuBreakdownSums[i] / count)
|
||||
}
|
||||
sum.CpuBreakdown = avg
|
||||
}
|
||||
}
|
||||
|
||||
return sum
|
||||
@@ -395,9 +446,11 @@ func (rm *RecordManager) AverageContainerStats(db dbx.Builder, records RecordIds
|
||||
|
||||
for i := range records {
|
||||
id := records[i].Id
|
||||
// clear global statsRecord and containerStats for reuse
|
||||
// clear global statsRecord for reuse
|
||||
statsRecord.Stats = statsRecord.Stats[:0]
|
||||
containerStats = containerStats[:0]
|
||||
// must set to nil (not [:0]) to avoid json.Unmarshal reusing backing array
|
||||
// which causes omitzero fields to inherit stale values from previous iterations
|
||||
containerStats = nil
|
||||
|
||||
queryParams["id"] = id
|
||||
db.NewQuery("SELECT stats FROM container_stats WHERE id = {:id}").Bind(queryParams).One(&statsRecord)
|
||||
@@ -412,19 +465,24 @@ func (rm *RecordManager) AverageContainerStats(db dbx.Builder, records RecordIds
|
||||
}
|
||||
sums[stat.Name].Cpu += stat.Cpu
|
||||
sums[stat.Name].Mem += stat.Mem
|
||||
sums[stat.Name].NetworkSent += stat.NetworkSent
|
||||
sums[stat.Name].NetworkRecv += stat.NetworkRecv
|
||||
sentBytes := stat.Bandwidth[0]
|
||||
recvBytes := stat.Bandwidth[1]
|
||||
if sentBytes == 0 && recvBytes == 0 && (stat.NetworkSent != 0 || stat.NetworkRecv != 0) {
|
||||
sentBytes = uint64(stat.NetworkSent * 1024 * 1024)
|
||||
recvBytes = uint64(stat.NetworkRecv * 1024 * 1024)
|
||||
}
|
||||
sums[stat.Name].Bandwidth[0] += sentBytes
|
||||
sums[stat.Name].Bandwidth[1] += recvBytes
|
||||
}
|
||||
}
|
||||
|
||||
result := make([]container.Stats, 0, len(sums))
|
||||
for _, value := range sums {
|
||||
result = append(result, container.Stats{
|
||||
Name: value.Name,
|
||||
Cpu: twoDecimals(value.Cpu / count),
|
||||
Mem: twoDecimals(value.Mem / count),
|
||||
NetworkSent: twoDecimals(value.NetworkSent / count),
|
||||
NetworkRecv: twoDecimals(value.NetworkRecv / count),
|
||||
Name: value.Name,
|
||||
Cpu: twoDecimals(value.Cpu / count),
|
||||
Mem: twoDecimals(value.Mem / count),
|
||||
Bandwidth: [2]uint64{uint64(float64(value.Bandwidth[0]) / count), uint64(float64(value.Bandwidth[1]) / count)},
|
||||
})
|
||||
}
|
||||
return result
|
||||
@@ -441,10 +499,18 @@ func (rm *RecordManager) DeleteOldRecords() {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = deleteOldSystemdServiceRecords(txApp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = deleteOldAlertsHistory(txApp, 200, 250)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = deleteOldQuietHours(txApp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
@@ -510,6 +576,20 @@ func deleteOldSystemStats(app core.App) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes systemd service records that haven't been updated in the last 20 minutes
|
||||
func deleteOldSystemdServiceRecords(app core.App) error {
|
||||
now := time.Now().UTC()
|
||||
twentyMinutesAgo := now.Add(-20 * time.Minute)
|
||||
|
||||
// Delete systemd service records where updated < twentyMinutesAgo
|
||||
_, err := app.DB().NewQuery("DELETE FROM systemd_services WHERE updated < {:updated}").Bind(dbx.Params{"updated": twentyMinutesAgo.UnixMilli()}).Execute()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete old systemd service records: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes container records that haven't been updated in the last 10 minutes
|
||||
func deleteOldContainerRecords(app core.App) error {
|
||||
now := time.Now().UTC()
|
||||
@@ -524,6 +604,17 @@ func deleteOldContainerRecords(app core.App) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes old quiet hours records where end date has passed
|
||||
func deleteOldQuietHours(app core.App) error {
|
||||
now := time.Now().UTC()
|
||||
_, err := app.DB().NewQuery("DELETE FROM quiet_hours WHERE type = 'one-time' AND end < {:now}").Bind(dbx.Params{"now": now}).Execute()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
/* Round float to two decimals */
|
||||
func twoDecimals(value float64) float64 {
|
||||
return math.Round(value*100) / 100
|
||||
|
||||
@@ -351,6 +351,83 @@ func TestDeleteOldAlertsHistoryEdgeCases(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// TestDeleteOldSystemdServiceRecords tests systemd service cleanup via DeleteOldRecords
|
||||
func TestDeleteOldSystemdServiceRecords(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
rm := records.NewRecordManager(hub)
|
||||
|
||||
// Create test user and system
|
||||
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
// Create old systemd service records that should be deleted (older than 20 minutes)
|
||||
oldRecord, err := tests.CreateRecord(hub, "systemd_services", map[string]any{
|
||||
"system": system.Id,
|
||||
"name": "nginx.service",
|
||||
"state": 0, // Active
|
||||
"sub": 1, // Running
|
||||
"cpu": 5.0,
|
||||
"cpuPeak": 10.0,
|
||||
"memory": 1024000,
|
||||
"memPeak": 2048000,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// Set updated time to 25 minutes ago (should be deleted)
|
||||
oldRecord.SetRaw("updated", now.Add(-25*time.Minute).UnixMilli())
|
||||
err = hub.SaveNoValidate(oldRecord)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create recent systemd service record that should be kept (within 20 minutes)
|
||||
recentRecord, err := tests.CreateRecord(hub, "systemd_services", map[string]any{
|
||||
"system": system.Id,
|
||||
"name": "apache.service",
|
||||
"state": 1, // Inactive
|
||||
"sub": 0, // Dead
|
||||
"cpu": 2.0,
|
||||
"cpuPeak": 3.0,
|
||||
"memory": 512000,
|
||||
"memPeak": 1024000,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// Set updated time to 10 minutes ago (should be kept)
|
||||
recentRecord.SetRaw("updated", now.Add(-10*time.Minute).UnixMilli())
|
||||
err = hub.SaveNoValidate(recentRecord)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Count records before deletion
|
||||
countBefore, err := hub.CountRecords("systemd_services")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(2), countBefore, "Should have 2 systemd service records initially")
|
||||
|
||||
// Run deletion via RecordManager
|
||||
rm.DeleteOldRecords()
|
||||
|
||||
// Count records after deletion
|
||||
countAfter, err := hub.CountRecords("systemd_services")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(1), countAfter, "Should have 1 systemd service record after deletion")
|
||||
|
||||
// Verify the correct record was kept
|
||||
remainingRecords, err := hub.FindRecordsByFilter("systemd_services", "", "", 10, 0, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, remainingRecords, 1, "Should have exactly 1 record remaining")
|
||||
assert.Equal(t, "apache.service", remainingRecords[0].Get("name"), "The recent record should be kept")
|
||||
}
|
||||
|
||||
// TestRecordManagerCreation tests RecordManager creation
|
||||
func TestRecordManagerCreation(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
|
||||
@@ -17,6 +17,9 @@
|
||||
"enabled": true,
|
||||
"rules": {
|
||||
"recommended": true,
|
||||
"a11y": {
|
||||
"useButtonType": "off"
|
||||
},
|
||||
"complexity": {
|
||||
"noUselessStringConcat": "error",
|
||||
"noUselessUndefinedInitialization": "error",
|
||||
@@ -30,13 +33,14 @@
|
||||
"noUnusedFunctionParameters": "error",
|
||||
"noUnusedPrivateClassMembers": "error",
|
||||
"useExhaustiveDependencies": {
|
||||
"level": "error",
|
||||
"options": {
|
||||
"reportUnnecessaryDependencies": false
|
||||
}
|
||||
"level": "off"
|
||||
},
|
||||
"useUniqueElementIds": "off",
|
||||
"noUnusedVariables": "error"
|
||||
},
|
||||
"security": {
|
||||
"noDangerouslySetInnerHtml": "warn"
|
||||
},
|
||||
"style": {
|
||||
"noParameterProperties": "error",
|
||||
"noYodaExpression": "error",
|
||||
@@ -47,7 +51,8 @@
|
||||
},
|
||||
"suspicious": {
|
||||
"useAwait": "error",
|
||||
"noEvolvingTypes": "error"
|
||||
"noEvolvingTypes": "error",
|
||||
"noArrayIndexKey": "off"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user