agent: remove unused log filter and unrequired library. (#24873)

The Nomad agent used a log filter to ensure logs were written at
the expected level. Since the use of hclog this is not required,
as hclog acts as the gate keeper and filter for logging. All log
writers accept messages from hclog which has already done the
filtering.
This commit is contained in:
James Rasell
2025-01-17 08:51:27 +01:00
committed by GitHub
parent b4cc5d88e7
commit 753f752cdd
8 changed files with 219 additions and 193 deletions

View File

@@ -28,7 +28,6 @@ import (
discover "github.com/hashicorp/go-discover"
hclog "github.com/hashicorp/go-hclog"
gsyslog "github.com/hashicorp/go-syslog"
"github.com/hashicorp/logutils"
"github.com/hashicorp/nomad/helper"
flaghelper "github.com/hashicorp/nomad/helper/flags"
gatedwriter "github.com/hashicorp/nomad/helper/gated-writer"
@@ -55,8 +54,6 @@ type Command struct {
args []string
agent *Agent
httpServers []*HTTPServer
logFilter *logutils.LevelFilter
logOutput io.Writer
retryJoinErrCh chan struct{}
}
@@ -552,28 +549,31 @@ func (c *Command) IsValidConfig(config, cmdConfig *Config) bool {
return true
}
// SetupLoggers is used to set up the logGate, and our logOutput
func SetupLoggers(ui cli.Ui, config *Config) (*logutils.LevelFilter, *gatedwriter.Writer, io.Writer) {
// Setup logging. First create the gated log writer, which will
// store logs until we're ready to show them. Then create the level
// filter, filtering logs of the specified level.
// setupLoggers is used to set up the logGate and our logOutput.
func setupLoggers(ui cli.Ui, config *Config) (*gatedwriter.Writer, io.Writer) {
// Pull the log level from the configuration, ensure it is titled and then
// perform validation. Do this before the gated writer, as this can
// generate an error, whereas the writer does not.
logLevel := strings.ToUpper(config.LogLevel)
if !isLogLevelValid(logLevel) {
ui.Error(fmt.Sprintf(
"Invalid log level: %s. Valid log levels are: %v",
logLevel, validLogLevels.Slice()))
return nil, nil
}
// Create a gated log writer, which will store logs until we're ready to
// output them.
logGate := &gatedwriter.Writer{
Writer: &cli.UiWriter{Ui: ui},
}
logFilter := LevelFilter()
logFilter.MinLevel = logutils.LogLevel(strings.ToUpper(config.LogLevel))
logFilter.Writer = logGate
if !ValidateLevelFilter(logFilter.MinLevel, logFilter) {
ui.Error(fmt.Sprintf(
"Invalid log level: %s. Valid log levels are: %v",
logFilter.MinLevel, logFilter.Levels))
return nil, nil, nil
}
// Initialize our array of log writers with the gated writer. Additional
// log writers will be appended if/when configured.
writers := []io.Writer{logGate}
// Create a log writer, and wrap a logOutput around it
writers := []io.Writer{logFilter}
logLevel := strings.ToUpper(config.LogLevel)
if logLevel == "OFF" {
config.EnableSyslog = false
}
@@ -583,7 +583,7 @@ func SetupLoggers(ui cli.Ui, config *Config) (*logutils.LevelFilter, *gatedwrite
l, err := gsyslog.NewLogger(getSysLogPriority(logLevel), config.SyslogFacility, "nomad")
if err != nil {
ui.Error(fmt.Sprintf("Syslog setup failed: %v", err))
return nil, nil, nil
return nil, nil
}
writers = append(writers, newSyslogWriter(l, config.LogJson))
}
@@ -603,7 +603,7 @@ func SetupLoggers(ui cli.Ui, config *Config) (*logutils.LevelFilter, *gatedwrite
duration, err := time.ParseDuration(config.LogRotateDuration)
if err != nil {
ui.Error(fmt.Sprintf("Failed to parse log rotation duration: %v", err))
return nil, nil, nil
return nil, nil
}
logRotateDuration = duration
} else {
@@ -612,19 +612,18 @@ func SetupLoggers(ui cli.Ui, config *Config) (*logutils.LevelFilter, *gatedwrite
}
logFile := &logFile{
logFilter: logFilter,
fileName: fileName,
logPath: dir,
duration: logRotateDuration,
MaxBytes: config.LogRotateBytes,
MaxFiles: config.LogRotateMaxFiles,
fileName: fileName,
logPath: dir,
duration: logRotateDuration,
MaxBytes: config.LogRotateBytes,
MaxFiles: config.LogRotateMaxFiles,
}
writers = append(writers, logFile)
}
logOutput := io.MultiWriter(writers...)
return logFilter, logGate, logOutput
return logGate, logOutput
}
// setupAgent is used to start the agent and various interfaces
@@ -801,10 +800,8 @@ func (c *Command) Run(args []string) int {
}
}
// Setup the log outputs
logFilter, logGate, logOutput := SetupLoggers(c.Ui, config)
c.logFilter = logFilter
c.logOutput = logOutput
// Set up the log outputs.
logGate, logOutput := setupLoggers(c.Ui, config)
if logGate == nil {
return 1
}
@@ -1116,13 +1113,12 @@ func (c *Command) handleReload() {
}
// Change the log level
minLevel := logutils.LogLevel(strings.ToUpper(newConf.LogLevel))
if ValidateLevelFilter(minLevel, c.logFilter) {
c.logFilter.SetMinLevel(minLevel)
} else {
minLevel := strings.ToUpper(newConf.LogLevel)
if !isLogLevelValid(minLevel) {
c.Ui.Error(fmt.Sprintf(
"Invalid log level: %s. Valid log levels are: %v",
minLevel, c.logFilter.Levels))
minLevel, validLogLevels.Slice()))
// Keep the current log level
newConf.LogLevel = c.agent.GetConfig().LogLevel

View File

@@ -12,15 +12,15 @@ import (
"testing"
"github.com/hashicorp/cli"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/helper/pointer"
"github.com/shoenig/test/must"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/nomad/structs/config"
"github.com/hashicorp/nomad/version"
"github.com/shoenig/test/must"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestCommand_Implements(t *testing.T) {
@@ -625,3 +625,59 @@ vault {
})
}
}
func Test_setupLoggers_logFile(t *testing.T) {
// Generate a mock UI and temporary log file location to write to.
mockUI := cli.NewMockUi()
logFile := filepath.Join(t.TempDir(), "nomad.log")
// The initial configuration contains an invalid log level parameter.
cfg := &Config{
LogFile: logFile,
LogLevel: "warning",
}
// Generate the loggers and ensure the correct error is generated.
gatedWriter, writer := setupLoggers(mockUI, cfg)
must.Nil(t, gatedWriter)
must.Nil(t, writer)
must.StrContains(t, mockUI.ErrorWriter.String(), "Invalid log level: WARNING")
mockUI.ErrorWriter.Reset()
mockUI.OutputWriter.Reset()
// Update the log level, so that it is a valid option and set up the
// loggers again.
cfg.LogLevel = "warn"
gatedWriter, writer = setupLoggers(mockUI, cfg)
must.NotNil(t, gatedWriter)
must.NotNil(t, writer)
// Build the logger as the command does.
testLogger := hclog.NewInterceptLogger(&hclog.LoggerOptions{
Name: "agent",
Level: hclog.LevelFromString(cfg.LogLevel),
Output: writer,
})
// Flush the log gate and write messages at all levels.
gatedWriter.Flush()
testLogger.Error("error log entry")
testLogger.Warn("warn log entry")
testLogger.Info("info log entry")
testLogger.Debug("debug log entry")
testLogger.Trace("trace log entry")
// Read the file out and ensure it only contains log entries which match
// our desired level.
fileContents, err := os.ReadFile(logFile)
must.NoError(t, err)
fileContentsStr := string(fileContents)
must.StrContains(t, fileContentsStr, "error log entry")
must.StrContains(t, fileContentsStr, "warn log entry")
must.StrNotContains(t, fileContentsStr, "info log entry")
must.StrNotContains(t, fileContentsStr, "debug log entry")
must.StrNotContains(t, fileContentsStr, "trace log entry")
}

View File

@@ -12,8 +12,6 @@ import (
"strings"
"sync"
"time"
"github.com/hashicorp/logutils"
)
var (
@@ -22,8 +20,6 @@ var (
// logFile is used to setup a file based logger that also performs log rotation
type logFile struct {
// Log level Filter to filter out logs that do not matcch LogLevel criteria
logFilter *logutils.LevelFilter
//Name of the log file
fileName string
@@ -121,7 +117,7 @@ func (l *logFile) pruneFiles() error {
return err
}
// Stort the strings as filepath.Glob does not publicly guarantee that files
// Sort the strings as filepath.Glob does not publicly guarantee that files
// are sorted, so here we add an extra defensive sort.
sort.Strings(matches)
@@ -135,15 +131,14 @@ func (l *logFile) pruneFiles() error {
return nil
}
// Write is used to implement io.Writer
// Write is used to implement io.Writer.
//
// Nomad's log file capability is fed by go-hclog which is responsible for
// performing the log level filtering. It is not needed here.
func (l *logFile) Write(b []byte) (int, error) {
// Filter out log entries that do not match log level criteria
if !l.logFilter.Check(b) {
return 0, nil
}
l.acquire.Lock()
defer l.acquire.Unlock()
//Create a new file if we have no file to write to
if l.FileInfo == nil {
if err := l.openNew(); err != nil {

View File

@@ -9,9 +9,8 @@ import (
"testing"
"time"
"github.com/hashicorp/logutils"
"github.com/hashicorp/nomad/ci"
"github.com/stretchr/testify/require"
"github.com/shoenig/test/must"
)
const (
@@ -25,158 +24,125 @@ func TestLogFile_timeRotation(t *testing.T) {
tempDir := t.TempDir()
filt := LevelFilter()
logFile := logFile{
logFilter: filt,
fileName: testFileName,
logPath: tempDir,
duration: testDuration,
fileName: testFileName,
logPath: tempDir,
duration: testDuration,
}
logFile.Write([]byte("Hello World"))
_, err := logFile.Write([]byte("Hello World"))
must.NoError(t, err)
time.Sleep(2 * time.Second)
logFile.Write([]byte("Second File"))
want := 2
if got, _ := os.ReadDir(tempDir); len(got) != want {
t.Errorf("Expected %d files, got %v file(s)", want, len(got))
}
_, err = logFile.Write([]byte("Second File"))
must.NoError(t, err)
numEntries, err := os.ReadDir(tempDir)
must.NoError(t, err)
must.Len(t, 2, numEntries)
}
func TestLogFile_openNew(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
tempDir := t.TempDir()
filt := LevelFilter()
filt.MinLevel = logutils.LogLevel("INFO")
logFile := logFile{
logFilter: filt,
fileName: testFileName,
logPath: tempDir,
MaxBytes: testBytes,
duration: 24 * time.Hour,
fileName: testFileName,
logPath: tempDir,
MaxBytes: testBytes,
duration: 24 * time.Hour,
}
require.NoError(logFile.openNew())
must.NoError(t, logFile.openNew())
_, err := os.ReadFile(logFile.FileInfo.Name())
require.NoError(err)
must.NoError(t, err)
require.Equal(logFile.FileInfo.Name(), filepath.Join(tempDir, testFileName))
must.Eq(t, logFile.FileInfo.Name(), filepath.Join(tempDir, testFileName))
// Check if create time and bytes written are kept when opening the active
// log file again.
bytesWritten, err := logFile.Write([]byte("test"))
require.NoError(err)
must.NoError(t, err)
time.Sleep(2 * time.Second)
require.NoError(logFile.openNew())
must.NoError(t, err)
timeDelta := time.Now().Sub(logFile.LastCreated)
require.GreaterOrEqual(timeDelta, 2*time.Second)
require.Equal(logFile.BytesWritten, int64(bytesWritten))
must.Greater(t, 2*time.Second, timeDelta)
must.Eq(t, logFile.BytesWritten, int64(bytesWritten))
}
func TestLogFile_byteRotation(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
tempDir := t.TempDir()
filt := LevelFilter()
filt.MinLevel = logutils.LogLevel("INFO")
logFile := logFile{
logFilter: filt,
fileName: testFileName,
logPath: tempDir,
MaxBytes: testBytes,
duration: 24 * time.Hour,
fileName: testFileName,
logPath: tempDir,
MaxBytes: testBytes,
duration: 24 * time.Hour,
}
logFile.Write([]byte("Hello World"))
logFile.Write([]byte("Second File"))
want := 2
tempFiles, _ := os.ReadDir(tempDir)
require.Equal(want, len(tempFiles))
}
_, err := logFile.Write([]byte("Hello World"))
must.NoError(t, err)
_, err = logFile.Write([]byte("Second File"))
must.NoError(t, err)
func TestLogFile_logLevelFiltering(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
tempDir := t.TempDir()
filt := LevelFilter()
logFile := logFile{
logFilter: filt,
fileName: testFileName,
logPath: tempDir,
MaxBytes: testBytes,
duration: testDuration,
}
logFile.Write([]byte("[INFO] This is an info message"))
logFile.Write([]byte("[DEBUG] This is a debug message"))
logFile.Write([]byte("[ERR] This is an error message"))
want := 2
tempFiles, _ := os.ReadDir(tempDir)
require.Equal(want, len(tempFiles))
tempFiles, err := os.ReadDir(tempDir)
must.NoError(t, err)
must.Len(t, 2, tempFiles)
}
func TestLogFile_deleteArchives(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
tempDir := t.TempDir()
filt := LevelFilter()
filt.MinLevel = logutils.LogLevel("INFO")
logFile := logFile{
logFilter: filt,
fileName: testFileName,
logPath: tempDir,
MaxBytes: testBytes,
duration: 24 * time.Hour,
MaxFiles: 1,
fileName: testFileName,
logPath: tempDir,
MaxBytes: testBytes,
duration: 24 * time.Hour,
MaxFiles: 1,
}
logFile.Write([]byte("[INFO] Hello World"))
logFile.Write([]byte("[INFO] Second File"))
logFile.Write([]byte("[INFO] Third File"))
want := 2
tempFiles, _ := os.ReadDir(tempDir)
_, err := logFile.Write([]byte("[INFO] Hello World"))
must.NoError(t, err)
_, err = logFile.Write([]byte("[INFO] Second File"))
must.NoError(t, err)
_, err = logFile.Write([]byte("[INFO] Third File"))
must.NoError(t, err)
require.Equal(want, len(tempFiles))
tempFiles, err := os.ReadDir(tempDir)
must.NoError(t, err)
must.Len(t, 2, tempFiles)
for _, tempFile := range tempFiles {
var bytes []byte
var err error
path := filepath.Join(tempDir, tempFile.Name())
if bytes, err = os.ReadFile(path); err != nil {
t.Errorf(err.Error())
return
}
contents := string(bytes)
require.NotEqual("[INFO] Hello World", contents, "oldest log should have been deleted")
bytes, err := os.ReadFile(filepath.Join(tempDir, tempFile.Name()))
must.NoError(t, err)
must.StrNotEqFold(t, "[INFO] Hello World", string(bytes))
}
}
func TestLogFile_deleteArchivesDisabled(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
tempDir := t.TempDir()
filt := LevelFilter()
filt.MinLevel = logutils.LogLevel("INFO")
logFile := logFile{
logFilter: filt,
fileName: testFileName,
logPath: tempDir,
MaxBytes: testBytes,
duration: 24 * time.Hour,
MaxFiles: 0,
fileName: testFileName,
logPath: tempDir,
MaxBytes: testBytes,
duration: 24 * time.Hour,
MaxFiles: 0,
}
logFile.Write([]byte("[INFO] Hello World"))
logFile.Write([]byte("[INFO] Second File"))
logFile.Write([]byte("[INFO] Third File"))
want := 3
tempFiles, _ := os.ReadDir(tempDir)
require.Equal(want, len(tempFiles))
_, err := logFile.Write([]byte("[INFO] Hello World"))
must.NoError(t, err)
_, err = logFile.Write([]byte("[INFO] Second File"))
must.NoError(t, err)
_, err = logFile.Write([]byte("[INFO] Third File"))
must.NoError(t, err)
tempFiles, err := os.ReadDir(tempDir)
must.NoError(t, err)
must.Len(t, 3, tempFiles)
}

View File

@@ -4,28 +4,12 @@
package agent
import (
"io"
"github.com/hashicorp/logutils"
"github.com/hashicorp/go-set/v3"
)
// LevelFilter returns a LevelFilter that is configured with the log
// levels that we use.
func LevelFilter() *logutils.LevelFilter {
return &logutils.LevelFilter{
Levels: []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR", "OFF"},
MinLevel: "INFO",
Writer: io.Discard,
}
}
// validLogLevels is the set of log level values that are valid for a Nomad
// agent.
var validLogLevels = set.From([]string{"TRACE", "DEBUG", "INFO", "WARN", "ERROR", "OFF"})
// ValidateLevelFilter verifies that the log levels within the filter
// are valid.
func ValidateLevelFilter(minLevel logutils.LogLevel, filter *logutils.LevelFilter) bool {
for _, level := range filter.Levels {
if level == minLevel {
return true
}
}
return false
}
// isLogLevelValid returns whether the passed agent log level is valid.
func isLogLevelValid(level string) bool { return validLogLevels.Contains(level) }

View File

@@ -6,26 +6,58 @@ package agent
import (
"testing"
"github.com/hashicorp/logutils"
"github.com/hashicorp/nomad/ci"
"github.com/shoenig/test/must"
)
func TestLevelFilter(t *testing.T) {
func Test_isLogLevelValid(t *testing.T) {
ci.Parallel(t)
filt := LevelFilter()
filt.Levels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERR"}
level := logutils.LogLevel("INFO")
// LevelFilter regards INFO as valid level
if !ValidateLevelFilter(level, filt) {
t.Fatalf("expected valid LogLevel, %s was invalid", level)
testCases := []struct {
name string
inputLevel string
expectedOutput bool
}{
{
name: "trace",
inputLevel: "TRACE",
expectedOutput: true,
},
{
name: "debug",
inputLevel: "DEBUG",
expectedOutput: true,
},
{
name: "info",
inputLevel: "INFO",
expectedOutput: true,
},
{
name: "warn",
inputLevel: "WARN",
expectedOutput: true,
},
{
name: "error",
inputLevel: "ERROR",
expectedOutput: true,
},
{
name: "off",
inputLevel: "OFF",
expectedOutput: true,
},
{
name: "invalid",
inputLevel: "INVALID",
expectedOutput: false,
},
}
level = logutils.LogLevel("FOO")
// LevelFilter regards FOO as invalid level
if ValidateLevelFilter(level, filt) {
t.Fatalf("expected invalid LogLevel, %s was valid", level)
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
must.Eq(t, tc.expectedOutput, isLogLevelValid(tc.inputLevel))
})
}
}

1
go.mod
View File

@@ -83,7 +83,6 @@ require (
github.com/hashicorp/hcl v1.0.1-vault-3
github.com/hashicorp/hcl/v2 v2.20.2-0.20240517235513-55d9c02d147d
github.com/hashicorp/hil v0.0.0-20210521165536-27a72121fd40
github.com/hashicorp/logutils v1.0.0
github.com/hashicorp/memberlist v0.5.1
github.com/hashicorp/net-rpc-msgpackrpc/v2 v2.0.0
github.com/hashicorp/nomad/api v0.0.0-20230103221135-ce00d683f9be

2
go.sum
View File

@@ -769,8 +769,6 @@ github.com/hashicorp/hcl/v2 v2.20.2-0.20240517235513-55d9c02d147d h1:7abftkc86B+
github.com/hashicorp/hcl/v2 v2.20.2-0.20240517235513-55d9c02d147d/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA=
github.com/hashicorp/hil v0.0.0-20210521165536-27a72121fd40 h1:ExwaL+hUy1ys2AWDbsbh/lxQS2EVCYxuj0LoyLTdB3Y=
github.com/hashicorp/hil v0.0.0-20210521165536-27a72121fd40/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE=
github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
github.com/hashicorp/mdns v1.0.4 h1:sY0CMhFmjIPDMlTB+HfymFHCaYLhgifZ0QhjaYKD/UQ=
github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=