fix: limit frame and total sizes when reading docker logs (#1322)

- Add per-frame size limit (1MB) to prevent single massive log entries
- Add total log size limit (5MB) for network transfer and browser rendering
- Gracefully truncate logs that exceed limits instead of consuming unbounded memory
This commit is contained in:
henrygd
2025-10-26 11:02:18 -04:00
parent 97394e775f
commit 7cf123a99e
2 changed files with 199 additions and 0 deletions

View File

@@ -32,6 +32,12 @@ const (
maxMemoryUsage uint64 = 100 * 1024 * 1024 * 1024 * 1024 maxMemoryUsage uint64 = 100 * 1024 * 1024 * 1024 * 1024
// Number of log lines to request when fetching container logs // Number of log lines to request when fetching container logs
dockerLogsTail = 200 dockerLogsTail = 200
// Maximum size of a single log frame (1MB) to prevent memory exhaustion
// A single log line larger than 1MB is likely an error or misconfiguration
maxLogFrameSize = 1024 * 1024
// Maximum total log content size (5MB) to prevent memory exhaustion
// This provides a reasonable limit for network transfer and browser rendering
maxTotalLogSize = 5 * 1024 * 1024
) )
type dockerManager struct { type dockerManager struct {
@@ -657,6 +663,7 @@ func decodeDockerLogStream(reader io.Reader, builder *strings.Builder) error {
const headerSize = 8 const headerSize = 8
var header [headerSize]byte var header [headerSize]byte
buf := make([]byte, 0, dockerLogsTail*200) buf := make([]byte, 0, dockerLogsTail*200)
totalBytesRead := 0
for { for {
if _, err := io.ReadFull(reader, header[:]); err != nil { if _, err := io.ReadFull(reader, header[:]); err != nil {
@@ -671,6 +678,19 @@ func decodeDockerLogStream(reader io.Reader, builder *strings.Builder) error {
continue continue
} }
// Prevent memory exhaustion from excessively large frames
if frameLen > maxLogFrameSize {
return fmt.Errorf("log frame size (%d) exceeds maximum (%d)", frameLen, maxLogFrameSize)
}
// Check if reading this frame would exceed total log size limit
if totalBytesRead+int(frameLen) > maxTotalLogSize {
// Read and discard remaining data to avoid blocking
_, _ = io.Copy(io.Discard, io.LimitReader(reader, int64(frameLen)))
slog.Debug("Truncating logs: limit reached", "read", totalBytesRead, "limit", maxTotalLogSize)
return nil
}
buf = allocateBuffer(buf, int(frameLen)) buf = allocateBuffer(buf, int(frameLen))
if _, err := io.ReadFull(reader, buf[:frameLen]); err != nil { if _, err := io.ReadFull(reader, buf[:frameLen]); err != nil {
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
@@ -682,6 +702,7 @@ func decodeDockerLogStream(reader io.Reader, builder *strings.Builder) error {
return err return err
} }
builder.Write(buf[:frameLen]) builder.Write(buf[:frameLen])
totalBytesRead += int(frameLen)
} }
} }

View File

@@ -4,8 +4,10 @@
package agent package agent
import ( import (
"bytes"
"encoding/json" "encoding/json"
"os" "os"
"strings"
"testing" "testing"
"time" "time"
@@ -911,6 +913,8 @@ func TestConstantsAndUtilityFunctions(t *testing.T) {
assert.Equal(t, uint16(60000), defaultCacheTimeMs) assert.Equal(t, uint16(60000), defaultCacheTimeMs)
assert.Equal(t, uint64(5e9), maxNetworkSpeedBps) assert.Equal(t, uint64(5e9), maxNetworkSpeedBps)
assert.Equal(t, 2100, dockerTimeoutMs) assert.Equal(t, 2100, dockerTimeoutMs)
assert.Equal(t, uint32(1024*1024), uint32(maxLogFrameSize)) // 1MB
assert.Equal(t, 5*1024*1024, maxTotalLogSize) // 5MB
// Test utility functions // Test utility functions
assert.Equal(t, 1.5, twoDecimals(1.499)) assert.Equal(t, 1.5, twoDecimals(1.499))
@@ -921,3 +925,177 @@ func TestConstantsAndUtilityFunctions(t *testing.T) {
assert.Equal(t, 0.5, bytesToMegabytes(524288)) // 512 KB assert.Equal(t, 0.5, bytesToMegabytes(524288)) // 512 KB
assert.Equal(t, 0.0, bytesToMegabytes(0)) assert.Equal(t, 0.0, bytesToMegabytes(0))
} }
func TestDecodeDockerLogStream(t *testing.T) {
tests := []struct {
name string
input []byte
expected string
expectError bool
}{
{
name: "simple log entry",
input: []byte{
// Frame 1: stdout, 11 bytes
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0B,
'H', 'e', 'l', 'l', 'o', ' ', 'W', 'o', 'r', 'l', 'd',
},
expected: "Hello World",
expectError: false,
},
{
name: "multiple frames",
input: []byte{
// Frame 1: stdout, 5 bytes
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
'H', 'e', 'l', 'l', 'o',
// Frame 2: stdout, 5 bytes
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
'W', 'o', 'r', 'l', 'd',
},
expected: "HelloWorld",
expectError: false,
},
{
name: "zero length frame",
input: []byte{
// Frame 1: stdout, 0 bytes
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// Frame 2: stdout, 5 bytes
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
'H', 'e', 'l', 'l', 'o',
},
expected: "Hello",
expectError: false,
},
{
name: "empty input",
input: []byte{},
expected: "",
expectError: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
reader := bytes.NewReader(tt.input)
var builder strings.Builder
err := decodeDockerLogStream(reader, &builder)
if tt.expectError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.Equal(t, tt.expected, builder.String())
}
})
}
}
func TestDecodeDockerLogStreamMemoryProtection(t *testing.T) {
t.Run("excessively large frame should error", func(t *testing.T) {
// Create a frame with size exceeding maxLogFrameSize
excessiveSize := uint32(maxLogFrameSize + 1)
input := []byte{
// Frame header with excessive size
0x01, 0x00, 0x00, 0x00,
byte(excessiveSize >> 24), byte(excessiveSize >> 16), byte(excessiveSize >> 8), byte(excessiveSize),
}
reader := bytes.NewReader(input)
var builder strings.Builder
err := decodeDockerLogStream(reader, &builder)
assert.Error(t, err)
assert.Contains(t, err.Error(), "log frame size")
assert.Contains(t, err.Error(), "exceeds maximum")
})
t.Run("total size limit should truncate", func(t *testing.T) {
// Create frames that exceed maxTotalLogSize (5MB)
// Use frames within maxLogFrameSize (1MB) to avoid single-frame rejection
frameSize := uint32(800 * 1024) // 800KB per frame
var input []byte
// Frames 1-6: 800KB each (total 4.8MB - within 5MB limit)
for i := 0; i < 6; i++ {
char := byte('A' + i)
frameHeader := []byte{
0x01, 0x00, 0x00, 0x00,
byte(frameSize >> 24), byte(frameSize >> 16), byte(frameSize >> 8), byte(frameSize),
}
input = append(input, frameHeader...)
input = append(input, bytes.Repeat([]byte{char}, int(frameSize))...)
}
// Frame 7: 800KB (would bring total to 5.6MB, exceeding 5MB limit - should be truncated)
frame7Header := []byte{
0x01, 0x00, 0x00, 0x00,
byte(frameSize >> 24), byte(frameSize >> 16), byte(frameSize >> 8), byte(frameSize),
}
input = append(input, frame7Header...)
input = append(input, bytes.Repeat([]byte{'Z'}, int(frameSize))...)
reader := bytes.NewReader(input)
var builder strings.Builder
err := decodeDockerLogStream(reader, &builder)
// Should complete without error (graceful truncation)
assert.NoError(t, err)
// Should have read 6 frames (4.8MB total, stopping before 7th would exceed 5MB limit)
expectedSize := int(frameSize) * 6
assert.Equal(t, expectedSize, builder.Len())
// Should contain A-F but not Z
result := builder.String()
assert.Contains(t, result, "A")
assert.Contains(t, result, "F")
assert.NotContains(t, result, "Z")
})
}
func TestAllocateBuffer(t *testing.T) {
tests := []struct {
name string
currentCap int
needed int
expectedCap int
shouldRealloc bool
}{
{
name: "buffer has enough capacity",
currentCap: 1024,
needed: 512,
expectedCap: 1024,
shouldRealloc: false,
},
{
name: "buffer needs reallocation",
currentCap: 512,
needed: 1024,
expectedCap: 1024,
shouldRealloc: true,
},
{
name: "buffer needs exact size",
currentCap: 1024,
needed: 1024,
expectedCap: 1024,
shouldRealloc: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
current := make([]byte, 0, tt.currentCap)
result := allocateBuffer(current, tt.needed)
assert.Equal(t, tt.needed, len(result))
assert.GreaterOrEqual(t, cap(result), tt.expectedCap)
if tt.shouldRealloc {
// If reallocation was needed, capacity should be at least the needed size
assert.GreaterOrEqual(t, cap(result), tt.needed)
}
})
}
}