mirror of
https://github.com/henrygd/beszel.git
synced 2025-12-10 13:25:43 +00:00
Compare commits
6 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ca58ff66ba | ||
|
|
133d229361 | ||
|
|
960cac4060 | ||
|
|
d83865cb4f | ||
|
|
4b43d68da6 | ||
|
|
c790d76211 |
@@ -258,6 +258,7 @@ func (gm *GPUManager) GetCurrentData() map[string]system.GPUData {
|
||||
gpuAvg.Engines[name] = twoDecimals(engine / count)
|
||||
maxEngineUsage = max(maxEngineUsage, engine/count)
|
||||
}
|
||||
gpuAvg.PowerPkg = twoDecimals(gpu.PowerPkg / count)
|
||||
gpuAvg.Usage = twoDecimals(maxEngineUsage)
|
||||
} else {
|
||||
gpuAvg.Usage = twoDecimals(gpu.Usage / count)
|
||||
@@ -266,7 +267,7 @@ func (gm *GPUManager) GetCurrentData() map[string]system.GPUData {
|
||||
}
|
||||
|
||||
// reset accumulators in the original gpu data for next collection
|
||||
gpu.Usage, gpu.Power, gpu.Count = gpuAvg.Usage, gpuAvg.Power, 1
|
||||
gpu.Usage, gpu.Power, gpu.PowerPkg, gpu.Count = gpuAvg.Usage, gpuAvg.Power, gpuAvg.PowerPkg, 1
|
||||
gpu.Engines = gpuAvg.Engines
|
||||
|
||||
// append id to the name if there are multiple GPUs with the same name
|
||||
@@ -358,6 +359,9 @@ func (gm *GPUManager) startCollector(command string) {
|
||||
|
||||
// NewGPUManager creates and initializes a new GPUManager
|
||||
func NewGPUManager() (*GPUManager, error) {
|
||||
if skipGPU, _ := GetEnv("SKIP_GPU"); skipGPU == "true" {
|
||||
return nil, nil
|
||||
}
|
||||
var gm GPUManager
|
||||
if err := gm.detectGPUs(); err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -17,6 +17,7 @@ const (
|
||||
|
||||
type intelGpuStats struct {
|
||||
PowerGPU float64
|
||||
PowerPkg float64
|
||||
Engines map[string]float64
|
||||
}
|
||||
|
||||
@@ -33,6 +34,7 @@ func (gm *GPUManager) updateIntelFromStats(sample *intelGpuStats) bool {
|
||||
}
|
||||
|
||||
gpuData.Power += sample.PowerGPU
|
||||
gpuData.PowerPkg += sample.PowerPkg
|
||||
|
||||
if gpuData.Engines == nil {
|
||||
gpuData.Engines = make(map[string]float64, len(sample.Engines))
|
||||
@@ -46,7 +48,7 @@ func (gm *GPUManager) updateIntelFromStats(sample *intelGpuStats) bool {
|
||||
}
|
||||
|
||||
// collectIntelStats executes intel_gpu_top in text mode (-l) and parses the output
|
||||
func (gm *GPUManager) collectIntelStats() error {
|
||||
func (gm *GPUManager) collectIntelStats() (err error) {
|
||||
cmd := exec.Command(intelGpuStatsCmd, "-s", intelGpuStatsInterval, "-l")
|
||||
// Avoid blocking if intel_gpu_top writes to stderr
|
||||
cmd.Stderr = io.Discard
|
||||
@@ -58,23 +60,28 @@ func (gm *GPUManager) collectIntelStats() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ensure we always reap the child to avoid zombies on any return path.
|
||||
// Ensure we always reap the child to avoid zombies on any return path and
|
||||
// propagate a non-zero exit code if no other error was set.
|
||||
defer func() {
|
||||
// Best-effort close of the pipe (unblock the child if it writes)
|
||||
_ = stdout.Close()
|
||||
if cmd.ProcessState == nil || !cmd.ProcessState.Exited() {
|
||||
_ = cmd.Process.Kill()
|
||||
}
|
||||
_ = cmd.Wait()
|
||||
if waitErr := cmd.Wait(); err == nil && waitErr != nil {
|
||||
err = waitErr
|
||||
}
|
||||
}()
|
||||
|
||||
scanner := bufio.NewScanner(stdout)
|
||||
var header1 string
|
||||
var header2 string
|
||||
var engineNames []string
|
||||
var friendlyNames []string
|
||||
var preEngineCols int
|
||||
var powerIndex int
|
||||
var hadDataRow bool
|
||||
// skip first data row because it sometimes has erroneous data
|
||||
var skippedFirstDataRow bool
|
||||
|
||||
for scanner.Scan() {
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
@@ -83,24 +90,34 @@ func (gm *GPUManager) collectIntelStats() error {
|
||||
}
|
||||
|
||||
// first header line
|
||||
if header1 == "" {
|
||||
if strings.HasPrefix(line, "Freq") {
|
||||
header1 = line
|
||||
continue
|
||||
}
|
||||
|
||||
// second header line
|
||||
if header2 == "" {
|
||||
if strings.HasPrefix(line, "req") {
|
||||
engineNames, friendlyNames, powerIndex, preEngineCols = gm.parseIntelHeaders(header1, line)
|
||||
header1, header2 = "x", "x" // don't need these anymore
|
||||
continue
|
||||
}
|
||||
|
||||
// Data row
|
||||
sample := gm.parseIntelData(line, engineNames, friendlyNames, powerIndex, preEngineCols)
|
||||
if !skippedFirstDataRow {
|
||||
skippedFirstDataRow = true
|
||||
continue
|
||||
}
|
||||
sample, err := gm.parseIntelData(line, engineNames, friendlyNames, powerIndex, preEngineCols)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hadDataRow = true
|
||||
gm.updateIntelFromStats(&sample)
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return err
|
||||
if scanErr := scanner.Err(); scanErr != nil {
|
||||
return scanErr
|
||||
}
|
||||
if !hadDataRow {
|
||||
return errNoValidData
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -145,19 +162,22 @@ func (gm *GPUManager) parseIntelHeaders(header1 string, header2 string) (engineN
|
||||
return engineNames, friendlyNames, powerIndex, preEngineCols
|
||||
}
|
||||
|
||||
func (gm *GPUManager) parseIntelData(line string, engineNames []string, friendlyNames []string, powerIndex int, preEngineCols int) (sample intelGpuStats) {
|
||||
func (gm *GPUManager) parseIntelData(line string, engineNames []string, friendlyNames []string, powerIndex int, preEngineCols int) (sample intelGpuStats, err error) {
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) == 0 {
|
||||
return sample
|
||||
return sample, errNoValidData
|
||||
}
|
||||
// Make sure row has enough columns for engines
|
||||
if need := preEngineCols + 3*len(engineNames); len(fields) < need {
|
||||
return sample
|
||||
return sample, errNoValidData
|
||||
}
|
||||
if powerIndex >= 0 && powerIndex < len(fields) {
|
||||
if v, perr := strconv.ParseFloat(fields[powerIndex], 64); perr == nil {
|
||||
sample.PowerGPU = v
|
||||
}
|
||||
if v, perr := strconv.ParseFloat(fields[powerIndex+1], 64); perr == nil {
|
||||
sample.PowerPkg = v
|
||||
}
|
||||
}
|
||||
if len(engineNames) > 0 {
|
||||
sample.Engines = make(map[string]float64, len(engineNames))
|
||||
@@ -175,5 +195,5 @@ func (gm *GPUManager) parseIntelData(line string, engineNames []string, friendly
|
||||
}
|
||||
}
|
||||
}
|
||||
return sample
|
||||
return sample, nil
|
||||
}
|
||||
|
||||
@@ -849,13 +849,15 @@ func TestIntelCollectorStreaming(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
os.Setenv("PATH", dir)
|
||||
|
||||
// Create a fake intel_gpu_top that prints -l format with two samples and exits
|
||||
// Create a fake intel_gpu_top that prints -l format with four samples (first will be skipped) and exits
|
||||
scriptPath := filepath.Join(dir, "intel_gpu_top")
|
||||
script := `#!/bin/sh
|
||||
echo "Freq MHz IRQ RC6 Power W IMC MiB/s RCS BCS VCS"
|
||||
echo " req act /s % gpu pkg rd wr % se wa % se wa % se wa"
|
||||
echo "373 373 224 45 1.50 4.13 2554 714 12.34 0 0 0.00 0 0 5.00 0 0"
|
||||
echo "226 223 338 58 2.00 2.69 1820 965 0.00 0 0 0.00 0 0 0.00 0 0"`
|
||||
echo "226 223 338 58 2.00 2.69 1820 965 0.00 0 0 0.00 0 0 0.00 0 0"
|
||||
echo "189 187 412 67 1.80 2.45 1950 823 8.50 2 1 15.00 1 0 22.00 0 1"
|
||||
echo "298 295 278 51 2.20 3.12 1675 942 5.75 1 2 9.50 3 1 12.00 1 0"`
|
||||
if err := os.WriteFile(scriptPath, []byte(script), 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -864,21 +866,22 @@ echo "226 223 338 58 2.00 2.69 1820 965 0.00 0 0 0.00
|
||||
GpuDataMap: make(map[string]*system.GPUData),
|
||||
}
|
||||
|
||||
// Run the collector once; it should read two samples and return
|
||||
// Run the collector once; it should read four samples but skip the first and return
|
||||
if err := gm.collectIntelStats(); err != nil {
|
||||
t.Fatalf("collectIntelStats error: %v", err)
|
||||
}
|
||||
|
||||
gpu := gm.GpuDataMap["0"]
|
||||
require.NotNil(t, gpu)
|
||||
// Power should be sum of samples: 1.5 + 2.0 = 3.5
|
||||
assert.EqualValues(t, 3.5, gpu.Power)
|
||||
// Engines aggregated
|
||||
assert.EqualValues(t, 12.34, gpu.Engines["Render/3D"])
|
||||
assert.EqualValues(t, 5.0, gpu.Engines["Video"])
|
||||
assert.EqualValues(t, 0.0, gpu.Engines["Blitter"]) // BCS is zero in both samples
|
||||
// Count should be 2 samples
|
||||
assert.Equal(t, float64(2), gpu.Count)
|
||||
// Power should be sum of samples 2-4 (first is skipped): 2.0 + 1.8 + 2.2 = 6.0
|
||||
assert.EqualValues(t, 6.0, gpu.Power)
|
||||
assert.InDelta(t, 8.26, gpu.PowerPkg, 0.01) // Allow small floating point differences
|
||||
// Engines aggregated from samples 2-4
|
||||
assert.EqualValues(t, 14.25, gpu.Engines["Render/3D"]) // 0.00 + 8.50 + 5.75
|
||||
assert.EqualValues(t, 34.0, gpu.Engines["Video"]) // 0.00 + 22.00 + 12.00
|
||||
assert.EqualValues(t, 24.5, gpu.Engines["Blitter"]) // 0.00 + 15.00 + 9.50
|
||||
// Count should be 3 samples (first is skipped)
|
||||
assert.Equal(t, float64(3), gpu.Count)
|
||||
}
|
||||
|
||||
func TestParseIntelHeaders(t *testing.T) {
|
||||
@@ -970,6 +973,7 @@ func TestParseIntelData(t *testing.T) {
|
||||
preEngineCols int
|
||||
wantPowerGPU float64
|
||||
wantEngines map[string]float64
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
name: "basic data with power and engines",
|
||||
@@ -1022,6 +1026,7 @@ func TestParseIntelData(t *testing.T) {
|
||||
preEngineCols: 8,
|
||||
wantPowerGPU: 0.0,
|
||||
wantEngines: nil, // empty sample returned
|
||||
wantErr: errNoValidData,
|
||||
},
|
||||
{
|
||||
name: "empty line",
|
||||
@@ -1032,6 +1037,7 @@ func TestParseIntelData(t *testing.T) {
|
||||
preEngineCols: 8,
|
||||
wantPowerGPU: 0.0,
|
||||
wantEngines: nil,
|
||||
wantErr: errNoValidData,
|
||||
},
|
||||
{
|
||||
name: "data with invalid power value",
|
||||
@@ -1076,7 +1082,8 @@ func TestParseIntelData(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gm := &GPUManager{}
|
||||
sample := gm.parseIntelData(tt.line, tt.engineNames, tt.friendlyNames, tt.powerIndex, tt.preEngineCols)
|
||||
sample, err := gm.parseIntelData(tt.line, tt.engineNames, tt.friendlyNames, tt.powerIndex, tt.preEngineCols)
|
||||
assert.Equal(t, tt.wantErr, err)
|
||||
|
||||
assert.Equal(t, tt.wantPowerGPU, sample.PowerGPU)
|
||||
assert.Equal(t, tt.wantEngines, sample.Engines)
|
||||
|
||||
@@ -102,6 +102,9 @@ func (a *Agent) getSystemStats() system.Stats {
|
||||
// cache + buffers value for default mem calculation
|
||||
// note: gopsutil automatically adds SReclaimable to v.Cached
|
||||
cacheBuff := v.Cached + v.Buffers - v.Shared
|
||||
if cacheBuff <= 0 {
|
||||
cacheBuff = max(v.Total-v.Free-v.Used, 0)
|
||||
}
|
||||
// htop memory calculation overrides (likely outdated as of mid 2025)
|
||||
if a.memCalc == "htop" {
|
||||
// cacheBuff = v.Cached + v.Buffers - v.Shared
|
||||
|
||||
@@ -30,11 +30,11 @@ func (s *systemdRestarter) Restart() error {
|
||||
type openRCRestarter struct{ cmd string }
|
||||
|
||||
func (o *openRCRestarter) Restart() error {
|
||||
if err := exec.Command(o.cmd, "status", "beszel-agent").Run(); err != nil {
|
||||
if err := exec.Command(o.cmd, "beszel-agent", "status").Run(); err != nil {
|
||||
return nil
|
||||
}
|
||||
ghupdate.ColorPrint(ghupdate.ColorYellow, "Restarting beszel-agent via OpenRC…")
|
||||
return exec.Command(o.cmd, "restart", "beszel-agent").Run()
|
||||
return exec.Command(o.cmd, "beszel-agent", "restart").Run()
|
||||
}
|
||||
|
||||
type openWRTRestarter struct{ cmd string }
|
||||
|
||||
@@ -6,7 +6,7 @@ import "github.com/blang/semver"
|
||||
|
||||
const (
|
||||
// Version is the current version of the application.
|
||||
Version = "0.12.11"
|
||||
Version = "0.12.12"
|
||||
// AppName is the name of the application.
|
||||
AppName = "beszel"
|
||||
)
|
||||
|
||||
@@ -53,6 +53,7 @@ type GPUData struct {
|
||||
Power float64 `json:"p,omitempty" cbor:"4,keyasint,omitempty"`
|
||||
Count float64 `json:"-"`
|
||||
Engines map[string]float64 `json:"e,omitempty" cbor:"5,keyasint,omitempty"`
|
||||
PowerPkg float64 `json:"pp,omitempty" cbor:"6,keyasint,omitempty"`
|
||||
}
|
||||
|
||||
type FsStats struct {
|
||||
|
||||
4
internal/site/package-lock.json
generated
4
internal/site/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "beszel",
|
||||
"version": "0.12.11",
|
||||
"version": "0.12.12",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "beszel",
|
||||
"version": "0.12.11",
|
||||
"version": "0.12.12",
|
||||
"dependencies": {
|
||||
"@henrygd/queue": "^1.0.7",
|
||||
"@henrygd/semaphore": "^0.0.2",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "beszel",
|
||||
"private": true,
|
||||
"version": "0.12.11",
|
||||
"version": "0.12.12",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "vite --host",
|
||||
|
||||
@@ -9,46 +9,58 @@ import {
|
||||
xAxis,
|
||||
} from "@/components/ui/chart"
|
||||
import { chartMargin, cn, decimalString, formatShortDate, toFixedFloat } from "@/lib/utils"
|
||||
import type { ChartData } from "@/types"
|
||||
import type { ChartData, GPUData } from "@/types"
|
||||
import { useYAxisWidth } from "./hooks"
|
||||
import type { DataPoint } from "./line-chart"
|
||||
|
||||
export default memo(function GpuPowerChart({ chartData }: { chartData: ChartData }) {
|
||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
||||
const packageKey = " package"
|
||||
|
||||
const { gpuData, dataPoints } = useMemo(() => {
|
||||
const dataPoints = [] as DataPoint[]
|
||||
const gpuData = [] as Record<string, GPUData | string>[]
|
||||
const addedKeys = new Map<string, number>()
|
||||
|
||||
const addKey = (key: string, value: number) => {
|
||||
addedKeys.set(key, (addedKeys.get(key) ?? 0) + value)
|
||||
}
|
||||
|
||||
for (const stats of chartData.systemStats) {
|
||||
const gpus = stats.stats?.g ?? {}
|
||||
const data = { created: stats.created } as Record<string, GPUData | string>
|
||||
for (const id in gpus) {
|
||||
const gpu = gpus[id] as GPUData
|
||||
data[gpu.n] = gpu
|
||||
addKey(gpu.n, gpu.p ?? 0)
|
||||
if (gpu.pp) {
|
||||
data[`${gpu.n}${packageKey}`] = gpu
|
||||
addKey(`${gpu.n}${packageKey}`, gpu.pp ?? 0)
|
||||
}
|
||||
}
|
||||
gpuData.push(data)
|
||||
}
|
||||
const sortedKeys = Array.from(addedKeys.entries())
|
||||
.sort(([, a], [, b]) => b - a)
|
||||
.map(([key]) => key)
|
||||
|
||||
for (let i = 0; i < sortedKeys.length; i++) {
|
||||
const id = sortedKeys[i]
|
||||
dataPoints.push({
|
||||
label: id,
|
||||
dataKey: (gpuData: Record<string, GPUData>) => {
|
||||
return id.endsWith(packageKey) ? (gpuData[id]?.pp ?? 0) : (gpuData[id]?.p ?? 0)
|
||||
},
|
||||
color: `hsl(${226 + (((i * 360) / addedKeys.size) % 360)}, 65%, 52%)`,
|
||||
})
|
||||
}
|
||||
return { gpuData, dataPoints }
|
||||
}, [chartData])
|
||||
|
||||
if (chartData.systemStats.length === 0) {
|
||||
return null
|
||||
}
|
||||
|
||||
/** Format temperature data for chart and assign colors */
|
||||
const newChartData = useMemo(() => {
|
||||
const newChartData = { data: [], colors: {} } as {
|
||||
data: Record<string, number | string>[]
|
||||
colors: Record<string, string>
|
||||
}
|
||||
const powerSums = {} as Record<string, number>
|
||||
for (const data of chartData.systemStats) {
|
||||
const newData = { created: data.created } as Record<string, number | string>
|
||||
|
||||
for (const gpu of Object.values(data.stats?.g ?? {})) {
|
||||
if (gpu.p) {
|
||||
const name = gpu.n
|
||||
newData[name] = gpu.p
|
||||
powerSums[name] = (powerSums[name] ?? 0) + newData[name]
|
||||
}
|
||||
}
|
||||
newChartData.data.push(newData)
|
||||
}
|
||||
const keys = Object.keys(powerSums).sort((a, b) => powerSums[b] - powerSums[a])
|
||||
for (const key of keys) {
|
||||
newChartData.colors[key] = `hsl(${(226 + (keys.indexOf(key) * 360) / keys.length) % 360}, 60%, 55%)`
|
||||
}
|
||||
return newChartData
|
||||
}, [chartData])
|
||||
|
||||
const colors = Object.keys(newChartData.colors)
|
||||
|
||||
// console.log('rendered at', new Date())
|
||||
|
||||
return (
|
||||
<div>
|
||||
<ChartContainer
|
||||
@@ -56,7 +68,7 @@ export default memo(function GpuPowerChart({ chartData }: { chartData: ChartData
|
||||
"opacity-100": yAxisWidth,
|
||||
})}
|
||||
>
|
||||
<LineChart accessibilityLayer data={newChartData.data} margin={chartMargin}>
|
||||
<LineChart accessibilityLayer data={gpuData} margin={chartMargin}>
|
||||
<CartesianGrid vertical={false} />
|
||||
<YAxis
|
||||
direction="ltr"
|
||||
@@ -85,19 +97,19 @@ export default memo(function GpuPowerChart({ chartData }: { chartData: ChartData
|
||||
/>
|
||||
}
|
||||
/>
|
||||
{colors.map((key) => (
|
||||
{dataPoints.map((dataPoint) => (
|
||||
<Line
|
||||
key={key}
|
||||
dataKey={key}
|
||||
name={key}
|
||||
key={dataPoint.label}
|
||||
dataKey={dataPoint.dataKey}
|
||||
name={dataPoint.label}
|
||||
type="monotoneX"
|
||||
dot={false}
|
||||
strokeWidth={1.5}
|
||||
stroke={newChartData.colors[key]}
|
||||
stroke={dataPoint.color as string}
|
||||
isAnimationActive={false}
|
||||
/>
|
||||
))}
|
||||
{colors.length > 1 && <ChartLegend content={<ChartLegendContent />} />}
|
||||
{dataPoints.length > 1 && <ChartLegend content={<ChartLegendContent />} />}
|
||||
</LineChart>
|
||||
</ChartContainer>
|
||||
</div>
|
||||
|
||||
@@ -398,7 +398,7 @@ export default memo(function SystemDetail({ name }: { name: string }) {
|
||||
const dataEmpty = !chartLoading && chartData.systemStats.length === 0
|
||||
const lastGpuVals = Object.values(systemStats.at(-1)?.stats.g ?? {})
|
||||
const hasGpuData = lastGpuVals.length > 0
|
||||
const hasGpuPowerData = lastGpuVals.some((gpu) => gpu.p !== undefined)
|
||||
const hasGpuPowerData = lastGpuVals.some((gpu) => gpu.p !== undefined || gpu.pp !== undefined)
|
||||
const hasGpuEnginesData = lastGpuVals.some((gpu) => gpu.e !== undefined)
|
||||
|
||||
let translatedStatus: string = system.status
|
||||
|
||||
2
internal/site/src/types.d.ts
vendored
2
internal/site/src/types.d.ts
vendored
@@ -158,6 +158,8 @@ export interface GPUData {
|
||||
u: number
|
||||
/** power (w) */
|
||||
p?: number
|
||||
/** power package (w) */
|
||||
pp?: number
|
||||
/** engines */
|
||||
e?: Record<string, number>
|
||||
}
|
||||
|
||||
@@ -1,3 +1,13 @@
|
||||
## 0.12.12
|
||||
|
||||
- Fix high CPU usage when `intel_gpu_top` returns an error. (#1203)
|
||||
|
||||
- Add `SKIP_GPU` environment variable to skip GPU data collection. (#1203)
|
||||
|
||||
- Add fallback cache/buff memory calculation when cache/buff isn't available ([#1198](https://github.com/henrygd/beszel/issues/1198))
|
||||
|
||||
- Fix automatic agent update / restart on OpenRC. (#1199)
|
||||
|
||||
## 0.12.11
|
||||
|
||||
- Adjust calculation of cached memory (fixes #1187, #1196)
|
||||
|
||||
@@ -15,8 +15,6 @@ StateDirectory=beszel-agent
|
||||
# Security/sandboxing settings
|
||||
KeyringMode=private
|
||||
LockPersonality=yes
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectClock=yes
|
||||
ProtectHome=read-only
|
||||
ProtectHostname=yes
|
||||
@@ -24,7 +22,6 @@ ProtectKernelLogs=yes
|
||||
ProtectSystem=strict
|
||||
RemoveIPC=yes
|
||||
RestrictSUIDSGID=true
|
||||
SystemCallArchitectures=native
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
@@ -920,7 +920,6 @@ StateDirectory=beszel-agent
|
||||
# Security/sandboxing settings
|
||||
KeyringMode=private
|
||||
LockPersonality=yes
|
||||
NoNewPrivileges=yes
|
||||
ProtectClock=yes
|
||||
ProtectHome=read-only
|
||||
ProtectHostname=yes
|
||||
|
||||
Reference in New Issue
Block a user