Audit config, seams for enterprise audit features

allow oss to parse sink duration

clean up audit sink parsing

ent eventer config reload

fix typo

SetEnabled to eventer interface

client acl test

rm dead code

fix failing test
This commit is contained in:
Drew Bailey
2020-03-22 12:17:33 -04:00
parent f0505a24ad
commit ae5777c4ea
21 changed files with 779 additions and 23 deletions

View File

@@ -74,6 +74,11 @@ func (c *Client) ResolveToken(secretID string) (*acl.ACL, error) {
return a, err
}
func (c *Client) ResolveSecretToken(secretID string) (*structs.ACLToken, error) {
_, t, err := c.resolveTokenAndACL(secretID)
return t, err
}
func (c *Client) resolveTokenAndACL(secretID string) (*acl.ACL, *structs.ACLToken, error) {
// Fast-path if ACLs are disabled
if !c.config.ACLEnabled {

View File

@@ -165,3 +165,29 @@ func TestClient_ACL_ResolveToken(t *testing.T) {
assert.Equal(t, structs.ErrTokenNotFound, err)
assert.Nil(t, out4)
}
func TestClient_ACL_ResolveSecretToken(t *testing.T) {
t.Parallel()
s1, _, _, cleanupS1 := testACLServer(t, nil)
defer cleanupS1()
testutil.WaitForLeader(t, s1.RPC)
c1, cleanup := TestClient(t, func(c *config.Config) {
c.RPCHandler = s1
c.ACLEnabled = true
})
defer cleanup()
token := mock.ACLToken()
err := s1.State().UpsertACLTokens(110, []*structs.ACLToken{token})
assert.Nil(t, err)
respToken, err := c1.ResolveSecretToken(token.SecretID)
assert.Nil(t, err)
if assert.NotNil(t, respToken) {
assert.NotEmpty(t, respToken.AccessorID)
}
}

View File

@@ -22,6 +22,7 @@ import (
clientconfig "github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/state"
"github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/command/agent/event"
"github.com/hashicorp/nomad/helper/pluginutils/loader"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad"
@@ -53,6 +54,7 @@ type Agent struct {
configLock sync.Mutex
logger log.InterceptLogger
eventer event.Eventer
httpLogger log.Logger
logOutput io.Writer
@@ -118,6 +120,9 @@ func NewAgent(config *Config, logger log.InterceptLogger, logOutput io.Writer, i
if err := a.setupClient(); err != nil {
return nil, err
}
if err := a.setupEnterpriseAgent(logger); err != nil {
return nil, err
}
if a.client == nil && a.server == nil {
return nil, fmt.Errorf("must have at least client or server mode enabled")
}
@@ -998,6 +1003,13 @@ func (a *Agent) Reload(newConfig *Config) error {
a.logger.SetLevel(log.LevelFromString(newConfig.LogLevel))
}
// Update eventer config
if newConfig.Audit != nil {
if err := a.entReloadEventer(a.config.Audit); err != nil {
return err
}
}
fullUpdateTLSConfig := func() {
// Completely reload the agent's TLS configuration (moving from non-TLS to
// TLS, or vice versa)

View File

@@ -0,0 +1,41 @@
// +build !ent
package agent
import (
"context"
hclog "github.com/hashicorp/go-hclog"
"github.com/hashicorp/nomad/command/agent/event"
"github.com/hashicorp/nomad/nomad/structs/config"
)
type noOpEventer struct{}
// Ensure noOpEventer is an Eventer
var _ event.Eventer = &noOpEventer{}
func (e *noOpEventer) Event(ctx context.Context, eventType string, payload interface{}) error {
return nil
}
func (e *noOpEventer) Enabled() bool {
return false
}
func (e *noOpEventer) Reopen() error {
return nil
}
func (e *noOpEventer) SetEnabled(enabled bool) {}
func (a *Agent) setupEnterpriseAgent(log hclog.Logger) error {
// configure eventer
a.eventer = &noOpEventer{}
return nil
}
func (a *Agent) entReloadEventer(cfg *config.AuditConfig) error {
return nil
}

View File

@@ -72,6 +72,7 @@ func (c *Command) readConfig() *Config {
},
Vault: &config.VaultConfig{},
ACL: &ACLConfig{},
Audit: &config.AuditConfig{},
}
flags := flag.NewFlagSet("agent", flag.ContinueOnError)

View File

@@ -169,6 +169,9 @@ type Config struct {
// Limits contains the configuration for timeouts.
Limits config.Limits `hcl:"limits"`
// Audit contains the configuration for audit logging.
Audit *config.AuditConfig `hcl:"audit"`
// ExtraKeysHCL is used by hcl to surface unexpected keys
ExtraKeysHCL []string `hcl:",unusedKeys" json:"-"`
}
@@ -865,6 +868,7 @@ func DefaultConfig() *Config {
Sentinel: &config.SentinelConfig{},
Version: version.GetVersion(),
Autopilot: config.DefaultAutopilotConfig(),
Audit: &config.AuditConfig{},
DisableUpdateCheck: helper.BoolToPtr(false),
Limits: config.DefaultLimits(),
}
@@ -996,6 +1000,14 @@ func (c *Config) Merge(b *Config) *Config {
result.ACL = result.ACL.Merge(b.ACL)
}
// Apply the Audit config
if result.Audit == nil && b.Audit != nil {
audit := *b.Audit
result.Audit = &audit
} else if b.ACL != nil {
result.Audit = result.Audit.Merge(b.Audit)
}
// Apply the ports config
if result.Ports == nil && b.Ports != nil {
ports := *b.Ports

View File

@@ -35,6 +35,7 @@ func ParseConfigFile(path string) (*Config, error) {
c := &Config{
Client: &ClientConfig{ServerJoin: &ServerJoin{}},
ACL: &ACLConfig{},
Audit: &config.AuditConfig{},
Server: &ServerConfig{ServerJoin: &ServerJoin{}},
Consul: &config.ConsulConfig{},
Autopilot: &config.AutopilotConfig{},
@@ -48,7 +49,7 @@ func ParseConfigFile(path string) (*Config, error) {
}
// convert strings to time.Durations
err = durations([]td{
tds := []td{
{"gc_interval", &c.Client.GCInterval, &c.Client.GCIntervalHCL},
{"acl.token_ttl", &c.ACL.TokenTTL, &c.ACL.TokenTTLHCL},
{"acl.policy_ttl", &c.ACL.PolicyTTL, &c.ACL.PolicyTTLHCL},
@@ -61,7 +62,17 @@ func ParseConfigFile(path string) (*Config, error) {
{"autopilot.server_stabilization_time", &c.Autopilot.ServerStabilizationTime, &c.Autopilot.ServerStabilizationTimeHCL},
{"autopilot.last_contact_threshold", &c.Autopilot.LastContactThreshold, &c.Autopilot.LastContactThresholdHCL},
{"telemetry.collection_interval", &c.Telemetry.collectionInterval, &c.Telemetry.CollectionInterval},
})
}
// Add enterprise audit sinks for time.Duration parsing
for i, sink := range c.Audit.Sinks {
tds = append(tds, td{
fmt.Sprintf("audit.sink.%d", i), &sink.RotateDuration, &sink.RotateDurationHCL,
})
}
// convert strings to time.Durations
err = durations(tds)
if err != nil {
return nil, err
}
@@ -144,6 +155,17 @@ func extraKeys(c *Config) error {
removeEqualFold(&c.Client.ExtraKeysHCL, "host_volume")
}
// Remove AuditConfig extra keys
for _, f := range c.Audit.Filters {
removeEqualFold(&c.Audit.ExtraKeysHCL, f.Name)
removeEqualFold(&c.Audit.ExtraKeysHCL, "filter")
}
for _, s := range c.Audit.Sinks {
removeEqualFold(&c.Audit.ExtraKeysHCL, s.Name)
removeEqualFold(&c.Audit.ExtraKeysHCL, "sink")
}
for _, k := range []string{"enabled_schedulers", "start_join", "retry_join", "server_join"} {
removeEqualFold(&c.ExtraKeysHCL, k)
removeEqualFold(&c.ExtraKeysHCL, "server")

View File

@@ -134,6 +134,31 @@ var basicConfig = &Config{
PolicyTTLHCL: "60s",
ReplicationToken: "foobar",
},
Audit: &config.AuditConfig{
Enabled: helper.BoolToPtr(true),
Sinks: []*config.AuditSink{
{
DeliveryGuarantee: "enforced",
Name: "file",
Type: "file",
Format: "json",
Path: "/opt/nomad/audit.log",
RotateDuration: 24 * time.Hour,
RotateDurationHCL: "24h",
RotateBytes: 100,
RotateMaxFiles: 10,
},
},
Filters: []*config.AuditFilter{
{
Name: "default",
Type: "HTTPEvent",
Endpoints: []string{"/ui/", "/v1/agent/health"},
Stages: []string{"*"},
Operations: []string{"*"},
},
},
},
Telemetry: &Telemetry{
StatsiteAddr: "127.0.0.1:1234",
StatsdAddr: "127.0.0.1:2345",
@@ -389,6 +414,7 @@ func TestConfig_ParseMerge(t *testing.T) {
Autopilot: config.DefaultAutopilotConfig(),
Client: &ClientConfig{},
Server: &ServerConfig{},
Audit: &config.AuditConfig{},
}
merged := oldDefault.Merge(actual)
require.Equal(t, basicConfig.Client, merged.Client)
@@ -480,6 +506,9 @@ func (c *Config) addDefaults() {
if c.ACL == nil {
c.ACL = &ACLConfig{}
}
if c.Audit == nil {
c.Audit = &config.AuditConfig{}
}
if c.Consul == nil {
c.Consul = config.DefaultConsulConfig()
}
@@ -575,6 +604,31 @@ var sample0 = &Config{
ACL: &ACLConfig{
Enabled: true,
},
Audit: &config.AuditConfig{
Enabled: helper.BoolToPtr(true),
Sinks: []*config.AuditSink{
{
DeliveryGuarantee: "enforced",
Name: "file",
Type: "file",
Format: "json",
Path: "/opt/nomad/audit.log",
RotateDuration: 24 * time.Hour,
RotateDurationHCL: "24h",
RotateBytes: 100,
RotateMaxFiles: 10,
},
},
Filters: []*config.AuditFilter{
{
Name: "default",
Type: "HTTPEvent",
Endpoints: []string{"/ui/", "/v1/agent/health"},
Stages: []string{"*"},
Operations: []string{"*"},
},
},
},
Telemetry: &Telemetry{
PrometheusMetrics: true,
DisableHostname: true,
@@ -638,6 +692,31 @@ var sample1 = &Config{
ACL: &ACLConfig{
Enabled: true,
},
Audit: &config.AuditConfig{
Enabled: helper.BoolToPtr(true),
Sinks: []*config.AuditSink{
{
Name: "file",
Type: "file",
DeliveryGuarantee: "enforced",
Format: "json",
Path: "/opt/nomad/audit.log",
RotateDuration: 24 * time.Hour,
RotateDurationHCL: "24h",
RotateBytes: 100,
RotateMaxFiles: 10,
},
},
Filters: []*config.AuditFilter{
{
Name: "default",
Type: "HTTPEvent",
Endpoints: []string{"/ui/", "/v1/agent/health"},
Stages: []string{"*"},
Operations: []string{"*"},
},
},
},
Telemetry: &Telemetry{
PrometheusMetrics: true,
DisableHostname: true,

View File

@@ -34,6 +34,7 @@ func TestConfig_Merge(t *testing.T) {
Client: &ClientConfig{},
Server: &ServerConfig{},
ACL: &ACLConfig{},
Audit: &config.AuditConfig{},
Ports: &Ports{},
Addresses: &Addresses{},
AdvertiseAddrs: &AdvertiseAddrs{},
@@ -83,6 +84,22 @@ func TestConfig_Merge(t *testing.T) {
CirconusBrokerSelectTag: "dc:dc1",
PrefixFilter: []string{"filter1", "filter2"},
},
Audit: &config.AuditConfig{
Enabled: helper.BoolToPtr(true),
Sinks: []*config.AuditSink{
{
DeliveryGuarantee: "enforced",
Name: "file",
Type: "file",
Format: "json",
Path: "/opt/nomad/audit.log",
RotateDuration: 24 * time.Hour,
RotateDurationHCL: "24h",
RotateBytes: 100,
RotateMaxFiles: 10,
},
},
},
Client: &ClientConfig{
Enabled: false,
StateDir: "/tmp/state1",
@@ -213,6 +230,22 @@ func TestConfig_Merge(t *testing.T) {
DisableUpdateCheck: helper.BoolToPtr(true),
DisableAnonymousSignature: true,
BindAddr: "127.0.0.2",
Audit: &config.AuditConfig{
Enabled: helper.BoolToPtr(true),
Sinks: []*config.AuditSink{
{
DeliveryGuarantee: "enforced",
Name: "file",
Type: "file",
Format: "json",
Path: "/opt/nomad/audit.log",
RotateDuration: 24 * time.Hour,
RotateDurationHCL: "24h",
RotateBytes: 100,
RotateMaxFiles: 10,
},
},
},
Telemetry: &Telemetry{
StatsiteAddr: "127.0.0.2:8125",
StatsdAddr: "127.0.0.2:8125",

View File

@@ -0,0 +1,19 @@
package event
import (
"context"
)
// Eventer describes the interface that must be implemented by an eventer.
type Eventer interface {
// Emit and event
Event(ctx context.Context, eventType string, payload interface{}) error
// Specifies if the eventer is enabled or not
Enabled() bool
// Reopen signals to eventer to reopen any files they have open.
Reopen() error
// SetEnabled sets the eventer to enabled or disabled.
SetEnabled(enabled bool)
}

View File

@@ -31,6 +31,13 @@ const (
// ErrEntOnly is the error returned if accessing an enterprise only
// endpoint
ErrEntOnly = "Nomad Enterprise only endpoint"
// ContextKeyReqID is a unique ID for a given request
ContextKeyReqID = "requestID"
// MissingRequestID is a placeholder if we cannot retrieve a request
// UUID from context
MissingRequestID = "<missing request id>"
)
var (
@@ -49,6 +56,9 @@ var (
})
)
type handlerFn func(resp http.ResponseWriter, req *http.Request) (interface{}, error)
type handlerByteFn func(resp http.ResponseWriter, req *http.Request) ([]byte, error)
// HTTPServer is used to wrap an Agent and expose it over an HTTP interface
type HTTPServer struct {
agent *Agent
@@ -380,6 +390,32 @@ func handleRootFallthrough() http.Handler {
})
}
func errCodeFromHandler(err error) (int, string) {
if err == nil {
return 0, ""
}
code := 500
errMsg := err.Error()
if http, ok := err.(HTTPCodedError); ok {
code = http.Code()
} else if ecode, emsg, ok := structs.CodeFromRPCCodedErr(err); ok {
code = ecode
errMsg = emsg
} else {
// RPC errors get wrapped, so manually unwrap by only looking at their suffix
if strings.HasSuffix(errMsg, structs.ErrPermissionDenied.Error()) {
errMsg = structs.ErrPermissionDenied.Error()
code = 403
} else if strings.HasSuffix(errMsg, structs.ErrTokenNotFound.Error()) {
errMsg = structs.ErrTokenNotFound.Error()
code = 403
}
}
return code, errMsg
}
// wrap is used to wrap functions to make them more convenient
func (s *HTTPServer) wrap(handler func(resp http.ResponseWriter, req *http.Request) (interface{}, error)) func(resp http.ResponseWriter, req *http.Request) {
f := func(resp http.ResponseWriter, req *http.Request) {
@@ -390,7 +426,7 @@ func (s *HTTPServer) wrap(handler func(resp http.ResponseWriter, req *http.Reque
defer func() {
s.logger.Debug("request complete", "method", req.Method, "path", reqURL, "duration", time.Now().Sub(start))
}()
obj, err := handler(resp, req)
obj, err := s.auditHandler(handler)(resp, req)
// Check for an error
HAS_ERR:
@@ -462,28 +498,11 @@ func (s *HTTPServer) wrapNonJSON(handler func(resp http.ResponseWriter, req *htt
defer func() {
s.logger.Debug("request complete", "method", req.Method, "path", reqURL, "duration", time.Now().Sub(start))
}()
obj, err := handler(resp, req)
obj, err := s.auditByteHandler(handler)(resp, req)
// Check for an error
if err != nil {
code := 500
errMsg := err.Error()
if http, ok := err.(HTTPCodedError); ok {
code = http.Code()
} else if ecode, emsg, ok := structs.CodeFromRPCCodedErr(err); ok {
code = ecode
errMsg = emsg
} else {
// RPC errors get wrapped, so manually unwrap by only looking at their suffix
if strings.HasSuffix(errMsg, structs.ErrPermissionDenied.Error()) {
errMsg = structs.ErrPermissionDenied.Error()
code = 403
} else if strings.HasSuffix(errMsg, structs.ErrTokenNotFound.Error()) {
errMsg = structs.ErrTokenNotFound.Error()
code = 403
}
}
code, errMsg := errCodeFromHandler(err)
resp.WriteHeader(code)
resp.Write([]byte(errMsg))
s.logger.Error("request failed", "method", req.Method, "path", reqURL, "error", err, "code", code)

View File

@@ -22,3 +22,15 @@ func (s *HTTPServer) registerEnterpriseHandlers() {
func (s *HTTPServer) entOnly(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
return nil, CodedError(501, ErrEntOnly)
}
func (s HTTPServer) auditHandler(h handlerFn) handlerFn {
return h
}
func (s *HTTPServer) auditByteHandler(h handlerByteFn) handlerByteFn {
return h
}
func (s *HTTPServer) auditHTTPHandler(h http.Handler) http.Handler {
return h
}

View File

@@ -142,6 +142,27 @@ acl {
replication_token = "foobar"
}
audit {
enabled = true
sink "file" {
type = "file"
delivery_guarantee = "enforced"
format = "json"
path = "/opt/nomad/audit.log"
rotate_bytes = 100
rotate_duration = "24h"
rotate_max_files = 10
}
filter "default" {
type = "HTTPEvent"
endpoints = ["/ui/", "/v1/agent/health"]
stages = ["*"]
operations = ["*"]
}
}
telemetry {
statsite_address = "127.0.0.1:1234"
statsd_address = "127.0.0.1:2345"

View File

@@ -7,6 +7,34 @@
"token_ttl": "60s"
}
],
"audit": {
"enabled": true,
"sink": [
{
"file": {
"type": "file",
"format": "json",
"delivery_guarantee": "enforced",
"path": "/opt/nomad/audit.log",
"rotate_bytes": 100,
"rotate_duration": "24h",
"rotate_max_files": 10
}
}
],
"filter": [
{
"default": [
{
"endpoints": ["/ui/", "/v1/agent/health"],
"operations": ["*"],
"stages": ["*"],
"type": "HTTPEvent"
}
]
}
]
},
"addresses": [
{
"http": "127.0.0.1",

View File

@@ -5,6 +5,34 @@
"acl": {
"enabled": true
},
"audit": {
"enabled": true,
"sink": [
{
"file": {
"type": "file",
"format": "json",
"delivery_guarantee": "enforced",
"path": "/opt/nomad/audit.log",
"rotate_bytes": 100,
"rotate_duration": "24h",
"rotate_max_files": 10
}
}
],
"filter": [
{
"default": [
{
"endpoints": ["/ui/", "/v1/agent/health"],
"operations": ["*"],
"stages": ["*"],
"type": "HTTPEvent"
}
]
}
]
},
"advertise": {
"http": "host.example.com",
"rpc": "host.example.com",

View File

@@ -17,3 +17,24 @@
vault = {
enabled = true
}
audit {
enabled = true
sink "file" {
type = "file"
format = "json"
delivery_guarantee = "enforced"
path = "/opt/nomad/audit.log"
rotate_bytes = 100
rotate_duration = "24h"
rotate_max_files = 10
}
filter "default" {
type = "HTTPEvent"
endpoints = ["/ui/", "/v1/agent/health"]
stages = ["*"]
operations = ["*"]
}
}

View File

@@ -293,7 +293,7 @@ job "binstore-storagelocker" {
driver = "docker"
lifecycle {
hook = "prestart"
hook = "prestart"
sidecar = true
}

View File

@@ -84,3 +84,37 @@ func resolveTokenFromSnapshotCache(snap *state.StateSnapshot, cache *lru.TwoQueu
}
return aclObj, nil
}
// ResolveSecretToken is used to translate an ACL Token Secret ID into
// an ACLToken object, nil if ACLs are disabled, or an error.
func (s *Server) ResolveSecretToken(secretID string) (*structs.ACLToken, error) {
// TODO(Drew) Look into using ACLObject cache or create a separate cache
// Fast-path if ACLs are disabled
if !s.config.ACLEnabled {
return nil, nil
}
defer metrics.MeasureSince([]string{"nomad", "acl", "resolveSecretToken"}, time.Now())
snap, err := s.fsm.State().Snapshot()
if err != nil {
return nil, err
}
// Lookup the ACL Token
var token *structs.ACLToken
// Handle anonymous requests
if secretID == "" {
token = structs.AnonymousACLToken
} else {
token, err = snap.ACLTokenBySecretID(nil, secretID)
if err != nil {
return nil, err
}
if token == nil {
return nil, structs.ErrTokenNotFound
}
}
return token, nil
}

View File

@@ -107,3 +107,27 @@ func TestResolveACLToken_LeaderToken(t *testing.T) {
assert.True(token.IsManagement())
}
}
func TestResolveSecretToken(t *testing.T) {
t.Parallel()
s1, _, cleanupS1 := TestACLServer(t, nil)
defer cleanupS1()
testutil.WaitForLeader(t, s1.RPC)
state := s1.State()
leaderToken := s1.getLeaderAcl()
assert.NotEmpty(t, leaderToken)
token := mock.ACLToken()
err := state.UpsertACLTokens(110, []*structs.ACLToken{token})
assert.Nil(t, err)
respToken, err := s1.ResolveSecretToken(token.SecretID)
assert.Nil(t, err)
if assert.NotNil(t, respToken) {
assert.NotEmpty(t, respToken.AccessorID)
}
}

View File

@@ -0,0 +1,213 @@
package config
import (
"time"
"github.com/hashicorp/nomad/helper"
)
// AuditConfig is the configuration specific to Audit Logging
type AuditConfig struct {
// Enabled controls the Audit Logging mode
Enabled *bool `hcl:"enabled"`
// Sinks configure output sinks for audit logs
Sinks []*AuditSink `hcl:"sink"`
// Filters configure audit event filters to filter out certain eevents
// from being written to a sink.
Filters []*AuditFilter `hcl:"filter"`
// ExtraKeysHCL is used by hcl to surface unexpected keys
ExtraKeysHCL []string `hcl:",unusedKeys" json:"-"`
}
type AuditSink struct {
// Name is a unique name given to the filter
Name string `hcl:",key"`
// DeliveryGuarantee is the level at which delivery of logs must
// be met in order to successfully make requests
DeliveryGuarantee string `hcl:"delivery_guarantee"`
// Type is the sink type to configure. (file)
Type string `hcl:"type"`
// Format is the sink output format. (json)
Format string `hcl:"format"`
// FileName is the name that the audit log should follow.
// If rotation is enabled the pattern will be name-timestamp.log
Path string `hcl:"path"`
// RotateDuration is the time period that logs should be rotated in
RotateDuration time.Duration
RotateDurationHCL string `hcl:"rotate_duration" json:"-"`
// RotateBytes is the max number of bytes that should be written to a file
RotateBytes int `hcl:"rotate_bytes"`
// RotateMaxFiles is the max number of log files to keep
RotateMaxFiles int `hcl:"rotate_max_files"`
}
// AuditFilter is the configuration for a Audit Log Filter
type AuditFilter struct {
// Name is a unique name given to the filter
Name string `hcl:",key"`
// Type of auditing event to filter, such as HTTPEvent
Type string `hcl:"type"`
// Endpoints is the list of endpoints to include in the filter
Endpoints []string `hcl:"endpoints"`
// State is the auditing request lifecycle stage to filter
Stages []string `hcl:"stages"`
// Operations is the type of operation to filter, such as GET, DELETE
Operations []string `hcl:"operations"`
}
// Copy returns a new copy of an AuditConfig
func (a *AuditConfig) Copy() *AuditConfig {
if a == nil {
return nil
}
nc := new(AuditConfig)
*nc = *a
// Copy bool pointers
if a.Enabled != nil {
nc.Enabled = helper.BoolToPtr(*a.Enabled)
}
// Copy Sinks and Filters
nc.Sinks = copySliceAuditSink(nc.Sinks)
nc.Filters = copySliceAuditFilter(nc.Filters)
return nc
}
// Merge is used to merge two Audit Configs together. Settings from the input take precedence.
func (a *AuditConfig) Merge(b *AuditConfig) *AuditConfig {
result := a.Copy()
if b.Enabled != nil {
result.Enabled = helper.BoolToPtr(*b.Enabled)
}
// Merge Sinks
if len(a.Sinks) == 0 && len(b.Sinks) != 0 {
result.Sinks = copySliceAuditSink(b.Sinks)
} else if len(b.Sinks) != 0 {
result.Sinks = auditSinkSliceMerge(a.Sinks, b.Sinks)
}
// Merge Filters
if len(a.Filters) == 0 && len(b.Filters) != 0 {
result.Filters = copySliceAuditFilter(b.Filters)
} else if len(b.Filters) != 0 {
result.Filters = auditFilterSliceMerge(a.Filters, b.Filters)
}
return result
}
func (a *AuditSink) Copy() *AuditSink {
if a == nil {
return nil
}
nc := new(AuditSink)
*nc = *a
return nc
}
func (a *AuditFilter) Copy() *AuditFilter {
if a == nil {
return nil
}
nc := new(AuditFilter)
*nc = *a
// Copy slices
nc.Endpoints = helper.CopySliceString(nc.Endpoints)
nc.Stages = helper.CopySliceString(nc.Stages)
nc.Operations = helper.CopySliceString(nc.Operations)
return nc
}
func copySliceAuditFilter(a []*AuditFilter) []*AuditFilter {
l := len(a)
if l == 0 {
return nil
}
ns := make([]*AuditFilter, l)
for idx, cfg := range a {
ns[idx] = cfg.Copy()
}
return ns
}
func auditFilterSliceMerge(a, b []*AuditFilter) []*AuditFilter {
n := make([]*AuditFilter, len(a))
seenKeys := make(map[string]int, len(a))
for i, config := range a {
n[i] = config.Copy()
seenKeys[config.Name] = i
}
for _, config := range b {
if fIndex, ok := seenKeys[config.Name]; ok {
n[fIndex] = config.Copy()
continue
}
n = append(n, config.Copy())
}
return n
}
func copySliceAuditSink(a []*AuditSink) []*AuditSink {
l := len(a)
if l == 0 {
return nil
}
ns := make([]*AuditSink, l)
for idx, cfg := range a {
ns[idx] = cfg.Copy()
}
return ns
}
func auditSinkSliceMerge(a, b []*AuditSink) []*AuditSink {
n := make([]*AuditSink, len(a))
seenKeys := make(map[string]int, len(a))
for i, config := range a {
n[i] = config.Copy()
seenKeys[config.Name] = i
}
for _, config := range b {
if fIndex, ok := seenKeys[config.Name]; ok {
n[fIndex] = config.Copy()
continue
}
n = append(n, config.Copy())
}
return n
}

View File

@@ -0,0 +1,106 @@
package config
import (
"testing"
"time"
"github.com/hashicorp/nomad/helper"
"github.com/stretchr/testify/require"
)
func TestAuditConfig_Merge(t *testing.T) {
c1 := &AuditConfig{
Enabled: helper.BoolToPtr(true),
Sinks: []*AuditSink{
{
DeliveryGuarantee: "enforced",
Name: "file",
Type: "file",
Format: "json",
Path: "/opt/nomad/audit.log",
RotateDuration: 24 * time.Hour,
RotateDurationHCL: "24h",
RotateBytes: 100,
RotateMaxFiles: 10,
},
},
Filters: []*AuditFilter{
{
Name: "one",
Type: "HTTPEvent",
Endpoints: []string{"/ui/", "/v1/agent/health"},
Stages: []string{"*"},
Operations: []string{"*"},
},
},
}
c2 := &AuditConfig{
Sinks: []*AuditSink{
{
DeliveryGuarantee: "best-effort",
Name: "file",
Type: "file",
Format: "json",
Path: "/opt/nomad/audit.log",
RotateDuration: 48 * time.Hour,
RotateDurationHCL: "48h",
RotateBytes: 20,
RotateMaxFiles: 2,
},
},
Filters: []*AuditFilter{
{
Name: "one",
Type: "HTTPEvent",
Endpoints: []string{"/ui/", "/v1/agent/health"},
Stages: []string{"OperationReceived"},
Operations: []string{"GET"},
},
{
Name: "two",
Type: "HTTPEvent",
Endpoints: []string{"*"},
Stages: []string{"OperationReceived"},
Operations: []string{"OPTIONS"},
},
},
}
e := &AuditConfig{
Enabled: helper.BoolToPtr(true),
Sinks: []*AuditSink{
{
DeliveryGuarantee: "best-effort",
Name: "file",
Type: "file",
Format: "json",
Path: "/opt/nomad/audit.log",
RotateDuration: 48 * time.Hour,
RotateDurationHCL: "48h",
RotateBytes: 20,
RotateMaxFiles: 2,
},
},
Filters: []*AuditFilter{
{
Name: "one",
Type: "HTTPEvent",
Endpoints: []string{"/ui/", "/v1/agent/health"},
Stages: []string{"OperationReceived"},
Operations: []string{"GET"},
},
{
Name: "two",
Type: "HTTPEvent",
Endpoints: []string{"*"},
Stages: []string{"OperationReceived"},
Operations: []string{"OPTIONS"},
},
},
}
result := c1.Merge(c2)
require.Equal(t, e, result)
}