From 5e03fec048b22ee2e4e09b1ace487ced371e0fe2 Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Mon, 7 Oct 2019 15:48:01 -0400 Subject: [PATCH 01/34] Update gziphandler to latest version This version of gziphandler includes a fix that fixes GzipResponseWriter to implement CloseNotifier https://github.com/nytimes/gziphandler/pull/63 --- .../github.com/NYTimes/gziphandler/README.md | 8 +- vendor/github.com/NYTimes/gziphandler/go.mod | 5 + vendor/github.com/NYTimes/gziphandler/go.sum | 7 + vendor/github.com/NYTimes/gziphandler/gzip.go | 227 ++++++++++++++---- vendor/vendor.json | 2 +- 5 files changed, 195 insertions(+), 54 deletions(-) create mode 100644 vendor/github.com/NYTimes/gziphandler/go.mod create mode 100644 vendor/github.com/NYTimes/gziphandler/go.sum diff --git a/vendor/github.com/NYTimes/gziphandler/README.md b/vendor/github.com/NYTimes/gziphandler/README.md index 6d7246070..6259acaca 100644 --- a/vendor/github.com/NYTimes/gziphandler/README.md +++ b/vendor/github.com/NYTimes/gziphandler/README.md @@ -6,6 +6,10 @@ response body, for clients which support it. Although it's usually simpler to leave that to a reverse proxy (like nginx or Varnish), this package is useful when that's undesirable. +## Install +```bash +go get -u github.com/NYTimes/gziphandler +``` ## Usage @@ -48,5 +52,5 @@ The docs can be found at [godoc.org][docs], as usual. -[docs]: https://godoc.org/github.com/nytimes/gziphandler -[license]: https://github.com/nytimes/gziphandler/blob/master/LICENSE.md +[docs]: https://godoc.org/github.com/NYTimes/gziphandler +[license]: https://github.com/NYTimes/gziphandler/blob/master/LICENSE diff --git a/vendor/github.com/NYTimes/gziphandler/go.mod b/vendor/github.com/NYTimes/gziphandler/go.mod new file mode 100644 index 000000000..801901274 --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/go.mod @@ -0,0 +1,5 @@ +module github.com/NYTimes/gziphandler + +go 1.11 + +require github.com/stretchr/testify v1.3.0 diff --git a/vendor/github.com/NYTimes/gziphandler/go.sum b/vendor/github.com/NYTimes/gziphandler/go.sum new file mode 100644 index 000000000..4347755af --- /dev/null +++ b/vendor/github.com/NYTimes/gziphandler/go.sum @@ -0,0 +1,7 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/NYTimes/gziphandler/gzip.go b/vendor/github.com/NYTimes/gziphandler/gzip.go index b6af9115a..c112bbdf8 100644 --- a/vendor/github.com/NYTimes/gziphandler/gzip.go +++ b/vendor/github.com/NYTimes/gziphandler/gzip.go @@ -1,10 +1,11 @@ -package gziphandler +package gziphandler // import "github.com/NYTimes/gziphandler" import ( "bufio" "compress/gzip" "fmt" "io" + "mime" "net" "net/http" "strconv" @@ -28,9 +29,11 @@ const ( // The examples seem to indicate that it is. DefaultQValue = 1.0 - // DefaultMinSize defines the minimum size to reach to enable compression. - // It's 512 bytes. - DefaultMinSize = 512 + // DefaultMinSize is the default minimum size until we enable gzip compression. + // 1500 bytes is the MTU size for the internet since that is the largest size allowed at the network layer. + // If you take a file that is 1300 bytes and compress it to 800 bytes, it’s still transmitted in that same 1500 byte packet regardless, so you’ve gained nothing. + // That being the case, you should restrict the gzip compression to files with a size greater than a single packet, 1400 bytes (1.4KB) is a safe value. + DefaultMinSize = 1400 ) // gzipWriterPools stores a sync.Pool for each compression level for reuse of @@ -80,44 +83,71 @@ type GzipResponseWriter struct { minSize int // Specifed the minimum response size to gzip. If the response length is bigger than this value, it is compressed. buf []byte // Holds the first part of the write before reaching the minSize or the end of the write. + ignore bool // If true, then we immediately passthru writes to the underlying ResponseWriter. - contentTypes []string // Only compress if the response is one of these content-types. All are accepted if empty. + contentTypes []parsedContentType // Only compress if the response is one of these content-types. All are accepted if empty. +} + +type GzipResponseWriterWithCloseNotify struct { + *GzipResponseWriter +} + +func (w GzipResponseWriterWithCloseNotify) CloseNotify() <-chan bool { + return w.ResponseWriter.(http.CloseNotifier).CloseNotify() } // Write appends data to the gzip writer. func (w *GzipResponseWriter) Write(b []byte) (int, error) { - // If content type is not set. - if _, ok := w.Header()[contentType]; !ok { - // It infer it from the uncompressed body. - w.Header().Set(contentType, http.DetectContentType(b)) - } - // GZIP responseWriter is initialized. Use the GZIP responseWriter. if w.gw != nil { - n, err := w.gw.Write(b) - return n, err + return w.gw.Write(b) + } + + // If we have already decided not to use GZIP, immediately passthrough. + if w.ignore { + return w.ResponseWriter.Write(b) } // Save the write into a buffer for later use in GZIP responseWriter (if content is long enough) or at close with regular responseWriter. // On the first write, w.buf changes from nil to a valid slice w.buf = append(w.buf, b...) - // If the global writes are bigger than the minSize and we're about to write - // a response containing a content type we want to handle, enable - // compression. - if len(w.buf) >= w.minSize && handleContentType(w.contentTypes, w) && w.Header().Get(contentEncoding) == "" { - err := w.startGzip() - if err != nil { - return 0, err + var ( + cl, _ = strconv.Atoi(w.Header().Get(contentLength)) + ct = w.Header().Get(contentType) + ce = w.Header().Get(contentEncoding) + ) + // Only continue if they didn't already choose an encoding or a known unhandled content length or type. + if ce == "" && (cl == 0 || cl >= w.minSize) && (ct == "" || handleContentType(w.contentTypes, ct)) { + // If the current buffer is less than minSize and a Content-Length isn't set, then wait until we have more data. + if len(w.buf) < w.minSize && cl == 0 { + return len(b), nil + } + // If the Content-Length is larger than minSize or the current buffer is larger than minSize, then continue. + if cl >= w.minSize || len(w.buf) >= w.minSize { + // If a Content-Type wasn't specified, infer it from the current buffer. + if ct == "" { + ct = http.DetectContentType(w.buf) + w.Header().Set(contentType, ct) + } + // If the Content-Type is acceptable to GZIP, initialize the GZIP writer. + if handleContentType(w.contentTypes, ct) { + if err := w.startGzip(); err != nil { + return 0, err + } + return len(b), nil + } } } - + // If we got here, we should not GZIP this response. + if err := w.startPlain(); err != nil { + return 0, err + } return len(b), nil } -// startGzip initialize any GZIP specific informations. +// startGzip initializes a GZIP writer and writes the buffer. func (w *GzipResponseWriter) startGzip() error { - // Set the GZIP header. w.Header().Set(contentEncoding, "gzip") @@ -129,28 +159,57 @@ func (w *GzipResponseWriter) startGzip() error { // Write the header to gzip response. if w.code != 0 { w.ResponseWriter.WriteHeader(w.code) + // Ensure that no other WriteHeader's happen + w.code = 0 } - // Initialize the GZIP response. - w.init() + // Initialize and flush the buffer into the gzip response if there are any bytes. + // If there aren't any, we shouldn't initialize it yet because on Close it will + // write the gzip header even if nothing was ever written. + if len(w.buf) > 0 { + // Initialize the GZIP response. + w.init() + n, err := w.gw.Write(w.buf) - // Flush the buffer into the gzip response. - n, err := w.gw.Write(w.buf) + // This should never happen (per io.Writer docs), but if the write didn't + // accept the entire buffer but returned no specific error, we have no clue + // what's going on, so abort just to be safe. + if err == nil && n < len(w.buf) { + err = io.ErrShortWrite + } + return err + } + return nil +} +// startPlain writes to sent bytes and buffer the underlying ResponseWriter without gzip. +func (w *GzipResponseWriter) startPlain() error { + if w.code != 0 { + w.ResponseWriter.WriteHeader(w.code) + // Ensure that no other WriteHeader's happen + w.code = 0 + } + w.ignore = true + // If Write was never called then don't call Write on the underlying ResponseWriter. + if w.buf == nil { + return nil + } + n, err := w.ResponseWriter.Write(w.buf) + w.buf = nil // This should never happen (per io.Writer docs), but if the write didn't // accept the entire buffer but returned no specific error, we have no clue // what's going on, so abort just to be safe. if err == nil && n < len(w.buf) { - return io.ErrShortWrite + err = io.ErrShortWrite } - - w.buf = nil return err } // WriteHeader just saves the response code until close or GZIP effective writes. func (w *GzipResponseWriter) WriteHeader(code int) { - w.code = code + if w.code == 0 { + w.code = code + } } // init graps a new gzip writer from the gzipWriterPool and writes the correct @@ -165,21 +224,20 @@ func (w *GzipResponseWriter) init() { // Close will close the gzip.Writer and will put it back in the gzipWriterPool. func (w *GzipResponseWriter) Close() error { - if w.gw == nil { - // Gzip not trigged yet, write out regular response. - if w.code != 0 { - w.ResponseWriter.WriteHeader(w.code) - } - if w.buf != nil { - _, writeErr := w.ResponseWriter.Write(w.buf) - // Returns the error if any at write. - if writeErr != nil { - return fmt.Errorf("gziphandler: write to regular responseWriter at close gets error: %q", writeErr.Error()) - } - } + if w.ignore { return nil } + if w.gw == nil { + // GZIP not triggered yet, write out regular response. + err := w.startPlain() + // Returns the error if any at write. + if err != nil { + err = fmt.Errorf("gziphandler: write to regular responseWriter at close gets error: %q", err.Error()) + } + return err + } + err := w.gw.Close() gzipWriterPools[w.index].Put(w.gw) w.gw = nil @@ -190,6 +248,14 @@ func (w *GzipResponseWriter) Close() error { // http.ResponseWriter if it is an http.Flusher. This makes GzipResponseWriter // an http.Flusher. func (w *GzipResponseWriter) Flush() { + if w.gw == nil && !w.ignore { + // Only flush once startGzip or startPlain has been called. + // + // Flush is thus a no-op until we're certain whether a plain + // or gzipped response will be served. + return + } + if w.gw != nil { w.gw.Flush() } @@ -256,7 +322,6 @@ func GzipHandlerWithOpts(opts ...option) (func(http.Handler) http.Handler, error return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Add(vary, acceptEncoding) - if acceptsGzip(r) { gw := &GzipResponseWriter{ ResponseWriter: w, @@ -266,7 +331,13 @@ func GzipHandlerWithOpts(opts ...option) (func(http.Handler) http.Handler, error } defer gw.Close() - h.ServeHTTP(gw, r) + if _, ok := w.(http.CloseNotifier); ok { + gwcn := GzipResponseWriterWithCloseNotify{gw} + h.ServeHTTP(gwcn, r) + } else { + h.ServeHTTP(gw, r) + } + } else { h.ServeHTTP(w, r) } @@ -274,11 +345,40 @@ func GzipHandlerWithOpts(opts ...option) (func(http.Handler) http.Handler, error }, nil } +// Parsed representation of one of the inputs to ContentTypes. +// See https://golang.org/pkg/mime/#ParseMediaType +type parsedContentType struct { + mediaType string + params map[string]string +} + +// equals returns whether this content type matches another content type. +func (pct parsedContentType) equals(mediaType string, params map[string]string) bool { + if pct.mediaType != mediaType { + return false + } + // if pct has no params, don't care about other's params + if len(pct.params) == 0 { + return true + } + + // if pct has any params, they must be identical to other's. + if len(pct.params) != len(params) { + return false + } + for k, v := range pct.params { + if w, ok := params[k]; !ok || v != w { + return false + } + } + return true +} + // Used for functional configuration. type config struct { minSize int level int - contentTypes []string + contentTypes []parsedContentType } func (c *config) validate() error { @@ -307,11 +407,32 @@ func CompressionLevel(level int) option { } } +// ContentTypes specifies a list of content types to compare +// the Content-Type header to before compressing. If none +// match, the response will be returned as-is. +// +// Content types are compared in a case-insensitive, whitespace-ignored +// manner. +// +// A MIME type without any other directive will match a content type +// that has the same MIME type, regardless of that content type's other +// directives. I.e., "text/html" will match both "text/html" and +// "text/html; charset=utf-8". +// +// A MIME type with any other directive will only match a content type +// that has the same MIME type and other directives. I.e., +// "text/html; charset=utf-8" will only match "text/html; charset=utf-8". +// +// By default, responses are gzipped regardless of +// Content-Type. func ContentTypes(types []string) option { return func(c *config) { - c.contentTypes = []string{} + c.contentTypes = []parsedContentType{} for _, v := range types { - c.contentTypes = append(c.contentTypes, strings.ToLower(v)) + mediaType, params, err := mime.ParseMediaType(v) + if err == nil { + c.contentTypes = append(c.contentTypes, parsedContentType{mediaType, params}) + } } } } @@ -332,15 +453,19 @@ func acceptsGzip(r *http.Request) bool { } // returns true if we've been configured to compress the specific content type. -func handleContentType(contentTypes []string, w http.ResponseWriter) bool { +func handleContentType(contentTypes []parsedContentType, ct string) bool { // If contentTypes is empty we handle all content types. if len(contentTypes) == 0 { return true } - ct := strings.ToLower(w.Header().Get(contentType)) + mediaType, params, err := mime.ParseMediaType(ct) + if err != nil { + return false + } + for _, c := range contentTypes { - if c == ct { + if c.equals(mediaType, params) { return true } } diff --git a/vendor/vendor.json b/vendor/vendor.json index fe000cbfc..31de2a9af 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -20,7 +20,7 @@ {"path":"github.com/Microsoft/go-winio/pkg/guid","checksumSHA1":"/ykkyb7gmtZC68n7T24xwbmlCBc=","origin":"github.com/endocrimes/go-winio/pkg/guid","revision":"fb47a8b419480a700368c176bc1d5d7e3393b98d","revisionTime":"2019-06-20T17:03:19Z","version":"dani/safe-relisten","versionExact":"dani/safe-relisten"}, {"path":"github.com/NVIDIA/gpu-monitoring-tools","checksumSHA1":"kF1vk+8Xvb3nGBiw9+qbUc0SZ4M=","revision":"86f2a9fac6c5b597dc494420005144b8ef7ec9fb","revisionTime":"2018-08-29T22:20:09Z"}, {"path":"github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml","checksumSHA1":"P8FATSSgpe5A17FyPrGpsX95Xw8=","revision":"86f2a9fac6c5b597dc494420005144b8ef7ec9fb","revisionTime":"2018-08-29T22:20:09Z"}, - {"path":"github.com/NYTimes/gziphandler","checksumSHA1":"jktW57+vJsziNVPeXMCoujTzdW4=","revision":"97ae7fbaf81620fe97840685304a78a306a39c64","revisionTime":"2017-09-16T00:36:49Z"}, + {"path":"github.com/NYTimes/gziphandler","checksumSHA1":"jktW57+vJsziNVPeXMCoujTzdW4=","revision":"dd0439581c7657cb652dfe5c71d7d48baf39541d","revisionTime":"2017-09-16T00:36:49Z"}, {"path":"github.com/Nvveen/Gotty","checksumSHA1":"Aqy8/FoAIidY/DeQ5oTYSZ4YFVc=","revision":"cd527374f1e5bff4938207604a14f2e38a9cf512","revisionTime":"2012-06-04T00:48:16Z"}, {"path":"github.com/StackExchange/wmi","checksumSHA1":"qtjd74+bErubh+qyv3s+lWmn9wc=","revision":"ea383cf3ba6ec950874b8486cd72356d007c768f","revisionTime":"2017-04-10T19:29:09Z"}, {"path":"github.com/agext/levenshtein","checksumSHA1":"jQh1fnoKPKMURvKkpdRjN695nAQ=","revision":"5f10fee965225ac1eecdc234c09daf5cd9e7f7b6","revisionTime":"2017-02-17T06:30:20Z"}, From 91c01847733f48a701f738951538b39d4bca846b Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Mon, 7 Oct 2019 15:52:26 -0400 Subject: [PATCH 02/34] Adds AgentMonitor Endpoint AgentMonitor is an endpoint to stream logs for a given agent. It allows callers to pass in a supplied log level, which may be different than the agents config allowing for temporary debugging with lower log levels. Pass in logWriter when setting up Agent --- command/agent/agent.go | 4 +- command/agent/agent_endpoint.go | 87 +++++++++++++++++++++++++++++++++ command/agent/command.go | 8 +-- command/agent/testagent.go | 2 +- vendor/vendor.json | 2 +- 5 files changed, 96 insertions(+), 7 deletions(-) diff --git a/command/agent/agent.go b/command/agent/agent.go index 1dce4d976..0fca145c7 100644 --- a/command/agent/agent.go +++ b/command/agent/agent.go @@ -56,6 +56,7 @@ type Agent struct { logger log.Logger httpLogger log.Logger logOutput io.Writer + logWriter *logWriter // consulService is Nomad's custom Consul client for managing services // and checks. @@ -87,10 +88,11 @@ type Agent struct { } // NewAgent is used to create a new agent with the given configuration -func NewAgent(config *Config, logger log.Logger, logOutput io.Writer, inmem *metrics.InmemSink) (*Agent, error) { +func NewAgent(config *Config, logger log.Logger, logOutput io.Writer, logWriter *logWriter, inmem *metrics.InmemSink) (*Agent, error) { a := &Agent{ config: config, logOutput: logOutput, + logWriter: logWriter, shutdownCh: make(chan struct{}), InmemSink: inmem, } diff --git a/command/agent/agent_endpoint.go b/command/agent/agent_endpoint.go index 7fe9eae6f..cdf7a50ad 100644 --- a/command/agent/agent_endpoint.go +++ b/command/agent/agent_endpoint.go @@ -2,11 +2,14 @@ package agent import ( "encoding/json" + "fmt" "net" "net/http" "sort" "strings" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/logutils" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/serf/serf" @@ -145,6 +148,90 @@ func (s *HTTPServer) AgentMembersRequest(resp http.ResponseWriter, req *http.Req return out, nil } +func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + var secret string + s.parseToken(req, &secret) + + // Check agent read permissions + if aclObj, err := s.agent.Server().ResolveToken(secret); err != nil { + return nil, err + } else if aclObj != nil && !aclObj.AllowAgentRead() { + return nil, structs.ErrPermissionDenied + } + + // Get the provided loglevel. + logLevel := req.URL.Query().Get("loglevel") + if logLevel == "" { + logLevel = "INFO" + } + + // Create a level filter and flusher. + filter := LevelFilter() + filter.MinLevel = logutils.LogLevel(strings.ToUpper(logLevel)) + + if !ValidateLevelFilter(filter.MinLevel, filter) { + return nil, CodedError(400, fmt.Sprintf("Unknown log level: %s", filter.MinLevel)) + } + + flusher, ok := resp.(http.Flusher) + if !ok { + return nil, CodedError(400, "Streaming not supported") + } + + handler := &httpLogHandler{ + filter: filter, + logCh: make(chan string, 512), + logger: s.agent.logger, + } + s.agent.logWriter.RegisterHandler(handler) + defer s.agent.logWriter.DeregisterHandler(handler) + notify := resp.(http.CloseNotifier).CloseNotify() + + // Send header so client can start streaming body + resp.WriteHeader(http.StatusOK) + + // 0 byte write is needed before the Flush call so that if we are using + // a gzip stream it will go ahead and write out the HTTP response header + resp.Write([]byte("")) + flusher.Flush() + + for { + select { + case <-notify: + s.agent.logWriter.DeregisterHandler(handler) + if handler.droppedCount > 0 { + s.agent.logger.Warn(fmt.Sprintf("agent: Dropped %d logs during monitor request", handler.droppedCount)) + } + return nil, nil + case log := <-handler.logCh: + fmt.Fprintln(resp, log) + flusher.Flush() + } + } +} + +type httpLogHandler struct { + filter *logutils.LevelFilter + logCh chan string + logger log.Logger + droppedCount int +} + +func (h *httpLogHandler) HandleLog(log string) { + // Check the log level + if !h.filter.Check([]byte(log)) { + return + } + + // Do a non-blocking send + select { + case h.logCh <- log: + default: + // Just increment a counter for dropped logs to this handler; we can't log now + // because the lock is already held by the LogWriter invoking this + h.droppedCount++ + } +} func (s *HTTPServer) AgentForceLeaveRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { if req.Method != "PUT" && req.Method != "POST" { return nil, CodedError(405, ErrInvalidMethod) diff --git a/command/agent/command.go b/command/agent/command.go index 49aa738a1..03115156b 100644 --- a/command/agent/command.go +++ b/command/agent/command.go @@ -447,9 +447,9 @@ func (c *Command) setupLoggers(config *Config) (*gatedwriter.Writer, *logWriter, } // setupAgent is used to start the agent and various interfaces -func (c *Command) setupAgent(config *Config, logger hclog.Logger, logOutput io.Writer, inmem *metrics.InmemSink) error { +func (c *Command) setupAgent(config *Config, logger hclog.Logger, logOutput io.Writer, logWriter *logWriter, inmem *metrics.InmemSink) error { c.Ui.Output("Starting Nomad agent...") - agent, err := NewAgent(config, logger, logOutput, inmem) + agent, err := NewAgent(config, logger, logOutput, logWriter, inmem) if err != nil { c.Ui.Error(fmt.Sprintf("Error starting agent: %s", err)) return err @@ -596,7 +596,7 @@ func (c *Command) Run(args []string) int { } // Setup the log outputs - logGate, _, logOutput := c.setupLoggers(config) + logGate, logWriter, logOutput := c.setupLoggers(config) if logGate == nil { return 1 } @@ -629,7 +629,7 @@ func (c *Command) Run(args []string) int { } // Create the agent - if err := c.setupAgent(config, logger, logOutput, inmem); err != nil { + if err := c.setupAgent(config, logger, logOutput, logWriter, inmem); err != nil { logGate.Flush() return 1 } diff --git a/command/agent/testagent.go b/command/agent/testagent.go index 59d8eaeb8..93c41be5b 100644 --- a/command/agent/testagent.go +++ b/command/agent/testagent.go @@ -223,7 +223,7 @@ func (a *TestAgent) start() (*Agent, error) { JSONFormat: a.Config.LogJson, }) - agent, err := NewAgent(a.Config, logger, a.LogOutput, inm) + agent, err := NewAgent(a.Config, logger, a.LogOutput, NewLogWriter(512), inm) if err != nil { return nil, err } diff --git a/vendor/vendor.json b/vendor/vendor.json index 31de2a9af..4bd094abf 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -20,7 +20,7 @@ {"path":"github.com/Microsoft/go-winio/pkg/guid","checksumSHA1":"/ykkyb7gmtZC68n7T24xwbmlCBc=","origin":"github.com/endocrimes/go-winio/pkg/guid","revision":"fb47a8b419480a700368c176bc1d5d7e3393b98d","revisionTime":"2019-06-20T17:03:19Z","version":"dani/safe-relisten","versionExact":"dani/safe-relisten"}, {"path":"github.com/NVIDIA/gpu-monitoring-tools","checksumSHA1":"kF1vk+8Xvb3nGBiw9+qbUc0SZ4M=","revision":"86f2a9fac6c5b597dc494420005144b8ef7ec9fb","revisionTime":"2018-08-29T22:20:09Z"}, {"path":"github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml","checksumSHA1":"P8FATSSgpe5A17FyPrGpsX95Xw8=","revision":"86f2a9fac6c5b597dc494420005144b8ef7ec9fb","revisionTime":"2018-08-29T22:20:09Z"}, - {"path":"github.com/NYTimes/gziphandler","checksumSHA1":"jktW57+vJsziNVPeXMCoujTzdW4=","revision":"dd0439581c7657cb652dfe5c71d7d48baf39541d","revisionTime":"2017-09-16T00:36:49Z"}, + {"path":"github.com/NYTimes/gziphandler","checksumSHA1":"Ylaw7hBEShLk8L5U89e7l6OKWKo=","revision":"dd0439581c7657cb652dfe5c71d7d48baf39541d","revisionTime":"2019-02-21T23:16:47Z"}, {"path":"github.com/Nvveen/Gotty","checksumSHA1":"Aqy8/FoAIidY/DeQ5oTYSZ4YFVc=","revision":"cd527374f1e5bff4938207604a14f2e38a9cf512","revisionTime":"2012-06-04T00:48:16Z"}, {"path":"github.com/StackExchange/wmi","checksumSHA1":"qtjd74+bErubh+qyv3s+lWmn9wc=","revision":"ea383cf3ba6ec950874b8486cd72356d007c768f","revisionTime":"2017-04-10T19:29:09Z"}, {"path":"github.com/agext/levenshtein","checksumSHA1":"jQh1fnoKPKMURvKkpdRjN695nAQ=","revision":"5f10fee965225ac1eecdc234c09daf5cd9e7f7b6","revisionTime":"2017-02-17T06:30:20Z"}, From dc3286481ac21dc33e8118ee00288cb3642a8924 Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Mon, 7 Oct 2019 16:19:32 -0400 Subject: [PATCH 03/34] Add Agent Monitor to receive streaming logs Queries /v1/agent/monitor and receives streaming logs from client --- api/agent.go | 50 +++++++++++++++++++++++++++++++++++++++++++ api/agent_test.go | 26 ++++++++++++++++++++++ command/agent/http.go | 4 +++- 3 files changed, 79 insertions(+), 1 deletion(-) diff --git a/api/agent.go b/api/agent.go index 46979403c..6df8482bd 100644 --- a/api/agent.go +++ b/api/agent.go @@ -1,6 +1,7 @@ package api import ( + "bufio" "encoding/json" "fmt" "net/url" @@ -237,6 +238,55 @@ func (a *Agent) Health() (*AgentHealthResponse, error) { return nil, fmt.Errorf("unable to unmarshal response with status %d: %v", resp.StatusCode, err) } +// Monitor returns a channel which will receive streaming logs from the agent +// Providing a non-nil stopCh can be used to close the connection and stop log streaming +func (a *Agent) Monitor(loglevel string, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) { + r, err := a.client.newRequest("GET", "/v1/agent/monitor") + if err != nil { + return nil, err + } + + r.setQueryOptions(q) + if loglevel != "" { + r.params.Add("loglevel", loglevel) + } + + _, resp, err := requireOK(a.client.doRequest(r)) + if err != nil { + return nil, err + } + + logCh := make(chan string, 64) + go func() { + defer resp.Body.Close() + + scanner := bufio.NewScanner(resp.Body) + for { + select { + case <-stopCh: + close(logCh) + return + default: + } + if scanner.Scan() { + // An empty string signals to the caller that + // the scan is done, so make sure we only emit + // that when the scanner says it's done, not if + // we happen to ingest an empty line. + if text := scanner.Text(); text != "" { + logCh <- text + } else { + logCh <- " " + } + } else { + logCh <- "" + } + } + }() + + return logCh, nil +} + // joinResponse is used to decode the response we get while // sending a member join request. type joinResponse struct { diff --git a/api/agent_test.go b/api/agent_test.go index b8658ae08..b80e3ac5a 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -3,7 +3,9 @@ package api import ( "reflect" "sort" + "strings" "testing" + "time" "github.com/hashicorp/nomad/api/internal/testutil" "github.com/stretchr/testify/assert" @@ -257,3 +259,27 @@ func TestAgent_Health(t *testing.T) { assert.Nil(err) assert.True(health.Server.Ok) } + +func TestAgent_Monitor(t *testing.T) { + t.Parallel() + c, s := makeClient(t, nil, nil) + defer s.Stop() + + agent := c.Agent() + + logCh, err := agent.Monitor("info", nil, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Wait for the first log message and validate it + select { + case log := <-logCh: + // TODO: checkout why stub_asset.go help text returns here + if !strings.Contains(log, "[INFO ] nomad: raft: Initial configuration") { + t.Fatalf("bad: %q", log) + } + case <-time.After(1000 * time.Second): + t.Fatalf("failed to get a log message") + } +} diff --git a/command/agent/http.go b/command/agent/http.go index 1bb673a2a..6ea45dcff 100644 --- a/command/agent/http.go +++ b/command/agent/http.go @@ -106,7 +106,9 @@ func NewHTTPServer(agent *Agent, config *Config) (*HTTPServer, error) { srv.registerHandlers(config.EnableDebug) // Handle requests with gzip compression - gzip, err := gziphandler.GzipHandlerWithOpts(gziphandler.MinSize(0)) + // Use MinSize of 1 to allow a zero byte flush to return + // response header used for streaming + gzip, err := gziphandler.GzipHandlerWithOpts(gziphandler.MinSize(1)) if err != nil { return nil, err } From 74cfdf55bb252a8b74ece90837755353d5cfa8ee Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Mon, 7 Oct 2019 16:41:52 -0400 Subject: [PATCH 04/34] Adds nomad monitor command Adds nomad monitor command. Like consul monitor, this command allows you to stream logs from a nomad agent in real time with a a specified log level add endpoint tests Upgrade go-hclog to latest version The current version of go-hclog pads log prefixes to equal lengths so info becomes [INFO ] and debug becomes [DEBUG]. This breaks hashicorp/logutils/level.go Check function. Upgrading to the latest version removes this padding and fixes log filtering that uses logutils Check --- command/agent/agent_endpoint.go | 2 +- command/agent/agent_endpoint_test.go | 66 +++ command/agent/http.go | 1 + command/agent/testagent.go | 10 +- command/agent_monitor.go | 85 ++++ command/agent_monitor_test.go | 34 ++ command/commands.go | 5 + .../github.com/hashicorp/go-hclog/README.md | 15 + .../github.com/hashicorp/go-hclog/context.go | 38 ++ .../github.com/hashicorp/go-hclog/global.go | 40 +- vendor/github.com/hashicorp/go-hclog/go.mod | 7 + vendor/github.com/hashicorp/go-hclog/go.sum | 6 + .../go-hclog/{int.go => intlogger.go} | 380 +++++++++++------- .../hashicorp/go-hclog/{log.go => logger.go} | 47 ++- .../hashicorp/go-hclog/nulllogger.go | 7 +- .../hashicorp/go-hclog/stacktrace.go | 9 +- .../github.com/hashicorp/go-hclog/stdlog.go | 50 ++- .../github.com/hashicorp/go-hclog/writer.go | 74 ++++ vendor/vendor.json | 2 +- 19 files changed, 673 insertions(+), 205 deletions(-) create mode 100644 command/agent_monitor.go create mode 100644 command/agent_monitor_test.go create mode 100644 vendor/github.com/hashicorp/go-hclog/context.go create mode 100644 vendor/github.com/hashicorp/go-hclog/go.mod create mode 100644 vendor/github.com/hashicorp/go-hclog/go.sum rename vendor/github.com/hashicorp/go-hclog/{int.go => intlogger.go} (54%) rename vendor/github.com/hashicorp/go-hclog/{log.go => logger.go} (72%) create mode 100644 vendor/github.com/hashicorp/go-hclog/writer.go diff --git a/command/agent/agent_endpoint.go b/command/agent/agent_endpoint.go index cdf7a50ad..ffb505c0e 100644 --- a/command/agent/agent_endpoint.go +++ b/command/agent/agent_endpoint.go @@ -167,7 +167,7 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) ( // Create a level filter and flusher. filter := LevelFilter() - filter.MinLevel = logutils.LogLevel(strings.ToUpper(logLevel)) + filter.SetMinLevel(logutils.LogLevel(strings.ToUpper(logLevel))) if !ValidateLevelFilter(filter.MinLevel, filter) { return nil, CodedError(400, fmt.Sprintf("Unknown log level: %s", filter.MinLevel)) diff --git a/command/agent/agent_endpoint_test.go b/command/agent/agent_endpoint_test.go index d9e26f186..5714b1c7b 100644 --- a/command/agent/agent_endpoint_test.go +++ b/command/agent/agent_endpoint_test.go @@ -8,6 +8,7 @@ import ( "net/http" "net/http/httptest" "net/url" + "strings" "testing" "time" @@ -249,11 +250,76 @@ func TestHTTP_AgentMembers_ACL(t *testing.T) { }) } +func TestHTTP_AgentMonitor(t *testing.T) { + t.Parallel() + + httpTest(t, nil, func(s *TestAgent) { + { + req, err := http.NewRequest("GET", "/v1/agent/monitor?loglevel=unkown", nil) + require.Nil(t, err) + resp := newClosableRecorder() + + // Make the request + _, err = s.Server.AgentMonitor(resp, req) + if err.(HTTPCodedError).Code() != 400 { + t.Fatalf("expected 400 response, got: %v", resp.Code) + } + } + + // check for a specific log + { + req, err := http.NewRequest("GET", "/v1/agent/monitor?loglevel=warn", nil) + require.Nil(t, err) + resp := newClosableRecorder() + defer resp.Close() + + go func() { + s.Server.logger.Debug("log that should not be sent") + s.Server.logger.Warn("log that should be sent") + _, err = s.Server.AgentMonitor(resp, req) + require.NoError(t, err) + }() + + testutil.WaitForResult(func() (bool, error) { + got := resp.Body.String() + want := "[WARN ] http: log that should be sent" + if strings.Contains(got, want) { + require.NotContains(t, resp.Body.String(), "[INFO ]") + return true, nil + } + return false, fmt.Errorf("missing expected log, got: %v, want: %v", got, want) + }, func(err error) { + require.Fail(t, err.Error()) + }) + } + }) +} + +type closableRecorder struct { + *httptest.ResponseRecorder + closer chan bool +} + +func newClosableRecorder() *closableRecorder { + r := httptest.NewRecorder() + closer := make(chan bool) + return &closableRecorder{r, closer} +} + +func (r *closableRecorder) Close() { + close(r.closer) +} + +func (r *closableRecorder) CloseNotify() <-chan bool { + return r.closer +} + func TestHTTP_AgentForceLeave(t *testing.T) { t.Parallel() httpTest(t, nil, func(s *TestAgent) { // Make the HTTP request req, err := http.NewRequest("PUT", "/v1/agent/force-leave?node=foo", nil) + require.Nil(t, err) if err != nil { t.Fatalf("err: %v", err) } diff --git a/command/agent/http.go b/command/agent/http.go index 6ea45dcff..045a2fa49 100644 --- a/command/agent/http.go +++ b/command/agent/http.go @@ -185,6 +185,7 @@ func (s *HTTPServer) registerHandlers(enableDebug bool) { s.mux.HandleFunc("/v1/agent/servers", s.wrap(s.AgentServersRequest)) s.mux.HandleFunc("/v1/agent/keyring/", s.wrap(s.KeyringOperationRequest)) s.mux.HandleFunc("/v1/agent/health", s.wrap(s.HealthRequest)) + s.mux.HandleFunc("/v1/agent/monitor", s.wrap(s.AgentMonitor)) s.mux.HandleFunc("/v1/metrics", s.wrap(s.MetricsRequest)) diff --git a/command/agent/testagent.go b/command/agent/testagent.go index 93c41be5b..b773cda18 100644 --- a/command/agent/testagent.go +++ b/command/agent/testagent.go @@ -60,6 +60,8 @@ type TestAgent struct { // to os.Stderr. LogOutput io.Writer + logWriter *logWriter + // DataDir is the data directory which is used when Config.DataDir // is not set. It is created automatically and removed when // Shutdown() is called. @@ -205,8 +207,12 @@ RETRY: } func (a *TestAgent) start() (*Agent, error) { + if a.logWriter == nil { + a.logWriter = NewLogWriter(512) + } + if a.LogOutput == nil { - a.LogOutput = testlog.NewWriter(a.T) + a.LogOutput = io.MultiWriter(testlog.NewWriter(a.T), a.logWriter) } inm := metrics.NewInmemSink(10*time.Second, time.Minute) @@ -223,7 +229,7 @@ func (a *TestAgent) start() (*Agent, error) { JSONFormat: a.Config.LogJson, }) - agent, err := NewAgent(a.Config, logger, a.LogOutput, NewLogWriter(512), inm) + agent, err := NewAgent(a.Config, logger, a.LogOutput, a.logWriter, inm) if err != nil { return nil, err } diff --git a/command/agent_monitor.go b/command/agent_monitor.go new file mode 100644 index 000000000..cc58dbbe9 --- /dev/null +++ b/command/agent_monitor.go @@ -0,0 +1,85 @@ +package command + +import ( + "fmt" + "os" + "os/signal" + "strings" + "syscall" +) + +type MonitorCommand struct { + Meta +} + +func (c *MonitorCommand) Help() string { + helpText := ` +Usage: nomad monitor [options] + + Shows recent log messages of a nomad agent, and attaches to the agent, + outputting log messagse as they occur in real time. The monitor lets you + listen for log levels that may be filtered out of the Nomad agent. For + example your agent may only be logging at INFO level, but with the monitor + command you can set -log-level DEBUG + +General Options: + + ` + generalOptionsUsage() + return strings.TrimSpace(helpText) +} + +func (c *MonitorCommand) Synopsis() string { + return "stream logs from a nomad agent" +} + +func (c *MonitorCommand) Name() string { return "monitor" } + +func (c *MonitorCommand) Run(args []string) int { + var logLevel string + + flags := c.Meta.FlagSet(c.Name(), FlagSetClient) + flags.Usage = func() { c.Ui.Output(c.Help()) } + flags.StringVar(&logLevel, "log-level", "", "") + + if err := flags.Parse(args); err != nil { + return 1 + } + + client, err := c.Meta.Client() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err)) + c.Ui.Error(commandErrorText(c)) + return 1 + } + + eventDoneCh := make(chan struct{}) + logCh, err := client.Agent().Monitor(logLevel, eventDoneCh, nil) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error starting monitor: %s", err)) + c.Ui.Error(commandErrorText(c)) + return 1 + } + + go func() { + defer close(eventDoneCh) + OUTER: + for { + select { + case log := <-logCh: + if log == "" { + break OUTER + } + c.Ui.Output(log) + } + } + + }() + + signalCh := make(chan os.Signal, 1) + signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM) + + select { + case <-signalCh: + return 0 + } +} diff --git a/command/agent_monitor_test.go b/command/agent_monitor_test.go new file mode 100644 index 000000000..eb49e92c3 --- /dev/null +++ b/command/agent_monitor_test.go @@ -0,0 +1,34 @@ +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func TestMonitorCommand_Implements(t *testing.T) { + t.Parallel() + var _ cli.Command = &MonitorCommand{} +} + +func TestMonitorCommand_Fails(t *testing.T) { + t.Parallel() + + ui := new(cli.MockUi) + cmd := &MonitorCommand{Meta: Meta{Ui: ui}} + + // Fails on misuse + if code := cmd.Run([]string{"some", "bad", "args"}); code != 1 { + t.Fatalf("exepected exit code 1, got: %d", code) + } + if out := ui.ErrorWriter.String(); !strings.Contains(out, commandErrorText(cmd)) { + t.Fatalf("expected help output, got: %s", out) + } + + ui.ErrorWriter.Reset() + + if code := cmd.Run([]string{"-address=nope"}); code != 1 { + t.Fatalf("exepected exit code 1, got: %d", code) + } +} diff --git a/command/commands.go b/command/commands.go index 0eeade107..2c2d3052a 100644 --- a/command/commands.go +++ b/command/commands.go @@ -366,6 +366,11 @@ func Commands(metaPtr *Meta, agentUi cli.Ui) map[string]cli.CommandFactory { Meta: meta, }, nil }, + "monitor": func() (cli.Command, error) { + return &MonitorCommand{ + Meta: meta, + }, nil + }, "namespace": func() (cli.Command, error) { return &NamespaceCommand{ Meta: meta, diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md index 1153e2853..9b6845e98 100644 --- a/vendor/github.com/hashicorp/go-hclog/README.md +++ b/vendor/github.com/hashicorp/go-hclog/README.md @@ -128,6 +128,21 @@ stdLogger.Printf("[DEBUG] %+v", stdLogger) ... [DEBUG] my-app: &{mu:{state:0 sema:0} prefix: flag:0 out:0xc42000a0a0 buf:[]} ``` +Alternatively, you may configure the system-wide logger: + +```go +// log the standard logger from 'import "log"' +log.SetOutput(appLogger.Writer(&hclog.StandardLoggerOptions{InferLevels: true})) +log.SetPrefix("") +log.SetFlags(0) + +log.Printf("[DEBUG] %d", 42) +``` + +```text +... [DEBUG] my-app: 42 +``` + Notice that if `appLogger` is initialized with the `INFO` log level _and_ you specify `InferLevels: true`, you will not see any output here. You must change `appLogger` to `DEBUG` to see output. See the docs for more information. diff --git a/vendor/github.com/hashicorp/go-hclog/context.go b/vendor/github.com/hashicorp/go-hclog/context.go new file mode 100644 index 000000000..7815f5019 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/context.go @@ -0,0 +1,38 @@ +package hclog + +import ( + "context" +) + +// WithContext inserts a logger into the context and is retrievable +// with FromContext. The optional args can be set with the same syntax as +// Logger.With to set fields on the inserted logger. This will not modify +// the logger argument in-place. +func WithContext(ctx context.Context, logger Logger, args ...interface{}) context.Context { + // While we could call logger.With even with zero args, we have this + // check to avoid unnecessary allocations around creating a copy of a + // logger. + if len(args) > 0 { + logger = logger.With(args...) + } + + return context.WithValue(ctx, contextKey, logger) +} + +// FromContext returns a logger from the context. This will return L() +// (the default logger) if no logger is found in the context. Therefore, +// this will never return a nil value. +func FromContext(ctx context.Context) Logger { + logger, _ := ctx.Value(contextKey).(Logger) + if logger == nil { + return L() + } + + return logger +} + +// Unexported new type so that our context key never collides with another. +type contextKeyType struct{} + +// contextKey is the key used for the context to store the logger. +var contextKey = contextKeyType{} diff --git a/vendor/github.com/hashicorp/go-hclog/global.go b/vendor/github.com/hashicorp/go-hclog/global.go index 55ce43960..22ebc57d8 100644 --- a/vendor/github.com/hashicorp/go-hclog/global.go +++ b/vendor/github.com/hashicorp/go-hclog/global.go @@ -8,27 +8,55 @@ var ( protect sync.Once def Logger - // The options used to create the Default logger. These are - // read only when the Default logger is created, so set them - // as soon as the process starts. + // DefaultOptions is used to create the Default logger. These are read + // only when the Default logger is created, so set them as soon as the + // process starts. DefaultOptions = &LoggerOptions{ Level: DefaultLevel, Output: DefaultOutput, } ) -// Return a logger that is held globally. This can be a good starting +// Default returns a globally held logger. This can be a good starting // place, and then you can use .With() and .Name() to create sub-loggers // to be used in more specific contexts. +// The value of the Default logger can be set via SetDefault() or by +// changing the options in DefaultOptions. +// +// This method is goroutine safe, returning a global from memory, but +// cause should be used if SetDefault() is called it random times +// in the program as that may result in race conditions and an unexpected +// Logger being returned. func Default() Logger { protect.Do(func() { - def = New(DefaultOptions) + // If SetDefault was used before Default() was called, we need to + // detect that here. + if def == nil { + def = New(DefaultOptions) + } }) return def } -// A short alias for Default() +// L is a short alias for Default(). func L() Logger { return Default() } + +// SetDefault changes the logger to be returned by Default()and L() +// to the one given. This allows packages to use the default logger +// and have higher level packages change it to match the execution +// environment. It returns any old default if there is one. +// +// NOTE: This is expected to be called early in the program to setup +// a default logger. As such, it does not attempt to make itself +// not racy with regard to the value of the default logger. Ergo +// if it is called in goroutines, you may experience race conditions +// with other goroutines retrieving the default logger. Basically, +// don't do that. +func SetDefault(log Logger) Logger { + old := def + def = log + return old +} diff --git a/vendor/github.com/hashicorp/go-hclog/go.mod b/vendor/github.com/hashicorp/go-hclog/go.mod new file mode 100644 index 000000000..0d079a654 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/go.mod @@ -0,0 +1,7 @@ +module github.com/hashicorp/go-hclog + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 +) diff --git a/vendor/github.com/hashicorp/go-hclog/go.sum b/vendor/github.com/hashicorp/go-hclog/go.sum new file mode 100644 index 000000000..e03ee77d9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/go.sum @@ -0,0 +1,6 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= diff --git a/vendor/github.com/hashicorp/go-hclog/int.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go similarity index 54% rename from vendor/github.com/hashicorp/go-hclog/int.go rename to vendor/github.com/hashicorp/go-hclog/intlogger.go index 7d17d81cb..219656c4c 100644 --- a/vendor/github.com/hashicorp/go-hclog/int.go +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -1,12 +1,13 @@ package hclog import ( - "bufio" + "bytes" "encoding" "encoding/json" "fmt" + "io" "log" - "os" + "reflect" "runtime" "sort" "strconv" @@ -16,17 +17,44 @@ import ( "time" ) +// TimeFormat to use for logging. This is a version of RFC3339 that contains +// contains millisecond precision +const TimeFormat = "2006-01-02T15:04:05.000Z0700" + +// errJsonUnsupportedTypeMsg is included in log json entries, if an arg cannot be serialized to json +const errJsonUnsupportedTypeMsg = "logging contained values that don't serialize to json" + var ( _levelToBracket = map[Level]string{ Debug: "[DEBUG]", Trace: "[TRACE]", - Info: "[INFO ]", - Warn: "[WARN ]", + Info: "[INFO] ", + Warn: "[WARN] ", Error: "[ERROR]", } ) -// Given the options (nil for defaults), create a new Logger +// Make sure that intLogger is a Logger +var _ Logger = &intLogger{} + +// intLogger is an internal logger implementation. Internal in that it is +// defined entirely by this package. +type intLogger struct { + json bool + caller bool + name string + timeFormat string + + // This is a pointer so that it's shared by any derived loggers, since + // those derived loggers share the bufio.Writer as well. + mutex *sync.Mutex + writer *writer + level *int32 + + implied []interface{} +} + +// New returns a configured logger. func New(opts *LoggerOptions) Logger { if opts == nil { opts = &LoggerOptions{} @@ -34,7 +62,7 @@ func New(opts *LoggerOptions) Logger { output := opts.Output if output == nil { - output = os.Stderr + output = DefaultOutput } level := opts.Level @@ -42,70 +70,49 @@ func New(opts *LoggerOptions) Logger { level = DefaultLevel } - mtx := opts.Mutex - if mtx == nil { - mtx = new(sync.Mutex) + mutex := opts.Mutex + if mutex == nil { + mutex = new(sync.Mutex) } - ret := &intLogger{ - m: mtx, + l := &intLogger{ json: opts.JSONFormat, caller: opts.IncludeLocation, name: opts.Name, timeFormat: TimeFormat, - w: bufio.NewWriter(output), + mutex: mutex, + writer: newWriter(output), level: new(int32), } + if opts.TimeFormat != "" { - ret.timeFormat = opts.TimeFormat + l.timeFormat = opts.TimeFormat } - atomic.StoreInt32(ret.level, int32(level)) - return ret + + atomic.StoreInt32(l.level, int32(level)) + + return l } -// The internal logger implementation. Internal in that it is defined entirely -// by this package. -type intLogger struct { - json bool - caller bool - name string - timeFormat string - - // this is a pointer so that it's shared by any derived loggers, since - // those derived loggers share the bufio.Writer as well. - m *sync.Mutex - w *bufio.Writer - level *int32 - - implied []interface{} -} - -// Make sure that intLogger is a Logger -var _ Logger = &intLogger{} - -// The time format to use for logging. This is a version of RFC3339 that -// contains millisecond precision -const TimeFormat = "2006-01-02T15:04:05.000Z0700" - // Log a message and a set of key/value pairs if the given level is at // or more severe that the threshold configured in the Logger. -func (z *intLogger) Log(level Level, msg string, args ...interface{}) { - if level < Level(atomic.LoadInt32(z.level)) { +func (l *intLogger) Log(level Level, msg string, args ...interface{}) { + if level < Level(atomic.LoadInt32(l.level)) { return } t := time.Now() - z.m.Lock() - defer z.m.Unlock() + l.mutex.Lock() + defer l.mutex.Unlock() - if z.json { - z.logJson(t, level, msg, args...) + if l.json { + l.logJSON(t, level, msg, args...) } else { - z.log(t, level, msg, args...) + l.log(t, level, msg, args...) } - z.w.Flush() + l.writer.Flush(level) } // Cleanup a path by returning the last 2 segments of the path only. @@ -121,10 +128,8 @@ func trimCallerPath(path string) string { // and https://github.com/golang/go/issues/18151 // // for discussion on the issue on Go side. - // // Find the last separator. - // idx := strings.LastIndexByte(path, '/') if idx == -1 { return path @@ -140,37 +145,37 @@ func trimCallerPath(path string) string { } // Non-JSON logging format function -func (z *intLogger) log(t time.Time, level Level, msg string, args ...interface{}) { - z.w.WriteString(t.Format(z.timeFormat)) - z.w.WriteByte(' ') +func (l *intLogger) log(t time.Time, level Level, msg string, args ...interface{}) { + l.writer.WriteString(t.Format(l.timeFormat)) + l.writer.WriteByte(' ') s, ok := _levelToBracket[level] if ok { - z.w.WriteString(s) + l.writer.WriteString(s) } else { - z.w.WriteString("[UNKN ]") + l.writer.WriteString("[?????]") } - if z.caller { + if l.caller { if _, file, line, ok := runtime.Caller(3); ok { - z.w.WriteByte(' ') - z.w.WriteString(trimCallerPath(file)) - z.w.WriteByte(':') - z.w.WriteString(strconv.Itoa(line)) - z.w.WriteByte(':') + l.writer.WriteByte(' ') + l.writer.WriteString(trimCallerPath(file)) + l.writer.WriteByte(':') + l.writer.WriteString(strconv.Itoa(line)) + l.writer.WriteByte(':') } } - z.w.WriteByte(' ') + l.writer.WriteByte(' ') - if z.name != "" { - z.w.WriteString(z.name) - z.w.WriteString(": ") + if l.name != "" { + l.writer.WriteString(l.name) + l.writer.WriteString(": ") } - z.w.WriteString(msg) + l.writer.WriteString(msg) - args = append(z.implied, args...) + args = append(l.implied, args...) var stacktrace CapturedStacktrace @@ -185,11 +190,14 @@ func (z *intLogger) log(t time.Time, level Level, msg string, args ...interface{ } } - z.w.WriteByte(':') + l.writer.WriteByte(':') FOR: for i := 0; i < len(args); i = i + 2 { - var val string + var ( + val string + raw bool + ) switch st := args[i+1].(type) { case string: @@ -220,66 +228,79 @@ func (z *intLogger) log(t time.Time, level Level, msg string, args ...interface{ case Format: val = fmt.Sprintf(st[0].(string), st[1:]...) default: - val = fmt.Sprintf("%v", st) + v := reflect.ValueOf(st) + if v.Kind() == reflect.Slice { + val = l.renderSlice(v) + raw = true + } else { + val = fmt.Sprintf("%v", st) + } } - z.w.WriteByte(' ') - z.w.WriteString(args[i].(string)) - z.w.WriteByte('=') + l.writer.WriteByte(' ') + l.writer.WriteString(args[i].(string)) + l.writer.WriteByte('=') - if strings.ContainsAny(val, " \t\n\r") { - z.w.WriteByte('"') - z.w.WriteString(val) - z.w.WriteByte('"') + if !raw && strings.ContainsAny(val, " \t\n\r") { + l.writer.WriteByte('"') + l.writer.WriteString(val) + l.writer.WriteByte('"') } else { - z.w.WriteString(val) + l.writer.WriteString(val) } } } - z.w.WriteString("\n") + l.writer.WriteString("\n") if stacktrace != "" { - z.w.WriteString(string(stacktrace)) + l.writer.WriteString(string(stacktrace)) } } -// JSON logging function -func (z *intLogger) logJson(t time.Time, level Level, msg string, args ...interface{}) { - vals := map[string]interface{}{ - "@message": msg, - "@timestamp": t.Format("2006-01-02T15:04:05.000000Z07:00"), - } +func (l *intLogger) renderSlice(v reflect.Value) string { + var buf bytes.Buffer - var levelStr string - switch level { - case Error: - levelStr = "error" - case Warn: - levelStr = "warn" - case Info: - levelStr = "info" - case Debug: - levelStr = "debug" - case Trace: - levelStr = "trace" - default: - levelStr = "all" - } + buf.WriteRune('[') - vals["@level"] = levelStr + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteString(", ") + } - if z.name != "" { - vals["@module"] = z.name - } + sv := v.Index(i) - if z.caller { - if _, file, line, ok := runtime.Caller(3); ok { - vals["@caller"] = fmt.Sprintf("%s:%d", file, line) + var val string + + switch sv.Kind() { + case reflect.String: + val = sv.String() + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + val = strconv.FormatInt(sv.Int(), 10) + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + val = strconv.FormatUint(sv.Uint(), 10) + default: + val = fmt.Sprintf("%v", sv.Interface()) + } + + if strings.ContainsAny(val, " \t\n\r") { + buf.WriteByte('"') + buf.WriteString(val) + buf.WriteByte('"') + } else { + buf.WriteString(val) } } - args = append(z.implied, args...) + buf.WriteRune(']') + + return buf.String() +} + +// JSON logging function +func (l *intLogger) logJSON(t time.Time, level Level, msg string, args ...interface{}) { + vals := l.jsonMapEntry(t, level, msg) + args = append(l.implied, args...) if args != nil && len(args) > 0 { if len(args)%2 != 0 { @@ -317,80 +338,121 @@ func (z *intLogger) logJson(t time.Time, level Level, msg string, args ...interf } } - err := json.NewEncoder(z.w).Encode(vals) + err := json.NewEncoder(l.writer).Encode(vals) if err != nil { - panic(err) + if _, ok := err.(*json.UnsupportedTypeError); ok { + plainVal := l.jsonMapEntry(t, level, msg) + plainVal["@warn"] = errJsonUnsupportedTypeMsg + + json.NewEncoder(l.writer).Encode(plainVal) + } } } +func (l intLogger) jsonMapEntry(t time.Time, level Level, msg string) map[string]interface{} { + vals := map[string]interface{}{ + "@message": msg, + "@timestamp": t.Format("2006-01-02T15:04:05.000000Z07:00"), + } + + var levelStr string + switch level { + case Error: + levelStr = "error" + case Warn: + levelStr = "warn" + case Info: + levelStr = "info" + case Debug: + levelStr = "debug" + case Trace: + levelStr = "trace" + default: + levelStr = "all" + } + + vals["@level"] = levelStr + + if l.name != "" { + vals["@module"] = l.name + } + + if l.caller { + if _, file, line, ok := runtime.Caller(4); ok { + vals["@caller"] = fmt.Sprintf("%s:%d", file, line) + } + } + return vals +} + // Emit the message and args at DEBUG level -func (z *intLogger) Debug(msg string, args ...interface{}) { - z.Log(Debug, msg, args...) +func (l *intLogger) Debug(msg string, args ...interface{}) { + l.Log(Debug, msg, args...) } // Emit the message and args at TRACE level -func (z *intLogger) Trace(msg string, args ...interface{}) { - z.Log(Trace, msg, args...) +func (l *intLogger) Trace(msg string, args ...interface{}) { + l.Log(Trace, msg, args...) } // Emit the message and args at INFO level -func (z *intLogger) Info(msg string, args ...interface{}) { - z.Log(Info, msg, args...) +func (l *intLogger) Info(msg string, args ...interface{}) { + l.Log(Info, msg, args...) } // Emit the message and args at WARN level -func (z *intLogger) Warn(msg string, args ...interface{}) { - z.Log(Warn, msg, args...) +func (l *intLogger) Warn(msg string, args ...interface{}) { + l.Log(Warn, msg, args...) } // Emit the message and args at ERROR level -func (z *intLogger) Error(msg string, args ...interface{}) { - z.Log(Error, msg, args...) +func (l *intLogger) Error(msg string, args ...interface{}) { + l.Log(Error, msg, args...) } // Indicate that the logger would emit TRACE level logs -func (z *intLogger) IsTrace() bool { - return Level(atomic.LoadInt32(z.level)) == Trace +func (l *intLogger) IsTrace() bool { + return Level(atomic.LoadInt32(l.level)) == Trace } // Indicate that the logger would emit DEBUG level logs -func (z *intLogger) IsDebug() bool { - return Level(atomic.LoadInt32(z.level)) <= Debug +func (l *intLogger) IsDebug() bool { + return Level(atomic.LoadInt32(l.level)) <= Debug } // Indicate that the logger would emit INFO level logs -func (z *intLogger) IsInfo() bool { - return Level(atomic.LoadInt32(z.level)) <= Info +func (l *intLogger) IsInfo() bool { + return Level(atomic.LoadInt32(l.level)) <= Info } // Indicate that the logger would emit WARN level logs -func (z *intLogger) IsWarn() bool { - return Level(atomic.LoadInt32(z.level)) <= Warn +func (l *intLogger) IsWarn() bool { + return Level(atomic.LoadInt32(l.level)) <= Warn } // Indicate that the logger would emit ERROR level logs -func (z *intLogger) IsError() bool { - return Level(atomic.LoadInt32(z.level)) <= Error +func (l *intLogger) IsError() bool { + return Level(atomic.LoadInt32(l.level)) <= Error } // Return a sub-Logger for which every emitted log message will contain // the given key/value pairs. This is used to create a context specific // Logger. -func (z *intLogger) With(args ...interface{}) Logger { +func (l *intLogger) With(args ...interface{}) Logger { if len(args)%2 != 0 { panic("With() call requires paired arguments") } - var nz intLogger = *z + sl := *l - result := make(map[string]interface{}, len(z.implied)+len(args)) - keys := make([]string, 0, len(z.implied)+len(args)) + result := make(map[string]interface{}, len(l.implied)+len(args)) + keys := make([]string, 0, len(l.implied)+len(args)) // Read existing args, store map and key for consistent sorting - for i := 0; i < len(z.implied); i += 2 { - key := z.implied[i].(string) + for i := 0; i < len(l.implied); i += 2 { + key := l.implied[i].(string) keys = append(keys, key) - result[key] = z.implied[i+1] + result[key] = l.implied[i+1] } // Read new args, store map and key for consistent sorting for i := 0; i < len(args); i += 2 { @@ -405,53 +467,61 @@ func (z *intLogger) With(args ...interface{}) Logger { // Sort keys to be consistent sort.Strings(keys) - nz.implied = make([]interface{}, 0, len(z.implied)+len(args)) + sl.implied = make([]interface{}, 0, len(l.implied)+len(args)) for _, k := range keys { - nz.implied = append(nz.implied, k) - nz.implied = append(nz.implied, result[k]) + sl.implied = append(sl.implied, k) + sl.implied = append(sl.implied, result[k]) } - return &nz + return &sl } // Create a new sub-Logger that a name decending from the current name. // This is used to create a subsystem specific Logger. -func (z *intLogger) Named(name string) Logger { - var nz intLogger = *z +func (l *intLogger) Named(name string) Logger { + sl := *l - if nz.name != "" { - nz.name = nz.name + "." + name + if sl.name != "" { + sl.name = sl.name + "." + name } else { - nz.name = name + sl.name = name } - return &nz + return &sl } // Create a new sub-Logger with an explicit name. This ignores the current // name. This is used to create a standalone logger that doesn't fall // within the normal hierarchy. -func (z *intLogger) ResetNamed(name string) Logger { - var nz intLogger = *z +func (l *intLogger) ResetNamed(name string) Logger { + sl := *l - nz.name = name + sl.name = name - return &nz + return &sl } // Update the logging level on-the-fly. This will affect all subloggers as // well. -func (z *intLogger) SetLevel(level Level) { - atomic.StoreInt32(z.level, int32(level)) +func (l *intLogger) SetLevel(level Level) { + atomic.StoreInt32(l.level, int32(level)) } // Create a *log.Logger that will send it's data through this Logger. This // allows packages that expect to be using the standard library log to actually // use this logger. -func (z *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { +func (l *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { if opts == nil { opts = &StandardLoggerOptions{} } - return log.New(&stdlogAdapter{z, opts.InferLevels}, "", 0) + return log.New(l.StandardWriter(opts), "", 0) +} + +func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + return &stdlogAdapter{ + log: l, + inferLevels: opts.InferLevels, + forceLevel: opts.ForceLevel, + } } diff --git a/vendor/github.com/hashicorp/go-hclog/log.go b/vendor/github.com/hashicorp/go-hclog/logger.go similarity index 72% rename from vendor/github.com/hashicorp/go-hclog/log.go rename to vendor/github.com/hashicorp/go-hclog/logger.go index 894e8461b..080ed7999 100644 --- a/vendor/github.com/hashicorp/go-hclog/log.go +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -9,38 +9,42 @@ import ( ) var ( - DefaultOutput = os.Stderr - DefaultLevel = Info + //DefaultOutput is used as the default log output. + DefaultOutput io.Writer = os.Stderr + + // DefaultLevel is used as the default log level. + DefaultLevel = Info ) +// Level represents a log level. type Level int32 const ( - // This is a special level used to indicate that no level has been + // NoLevel is a special level used to indicate that no level has been // set and allow for a default to be used. NoLevel Level = 0 - // The most verbose level. Intended to be used for the tracing of actions - // in code, such as function enters/exits, etc. + // Trace is the most verbose level. Intended to be used for the tracing + // of actions in code, such as function enters/exits, etc. Trace Level = 1 - // For programmer lowlevel analysis. + // Debug information for programmer lowlevel analysis. Debug Level = 2 - // For information about steady state operations. + // Info information about steady state operations. Info Level = 3 - // For information about rare but handled events. + // Warn information about rare but handled events. Warn Level = 4 - // For information about unrecoverable events. + // Error information about unrecoverable events. Error Level = 5 ) -// When processing a value of this type, the logger automatically treats the first -// argument as a Printf formatting string and passes the rest as the values to be -// formatted. For example: L.Info(Fmt{"%d beans/day", beans}). This is a simple -// convience type for when formatting is required. +// Format is a simple convience type for when formatting is required. When +// processing a value of this type, the logger automatically treats the first +// argument as a Printf formatting string and passes the rest as the values +// to be formatted. For example: L.Info(Fmt{"%d beans/day", beans}). type Format []interface{} // Fmt returns a Format type. This is a convience function for creating a Format @@ -53,7 +57,7 @@ func Fmt(str string, args ...interface{}) Format { // the level string is invalid. This facilitates setting the log level via // config or environment variable by name in a predictable way. func LevelFromString(levelStr string) Level { - // We don't care about case. Accept "INFO" or "info" + // We don't care about case. Accept both "INFO" and "info". levelStr = strings.ToLower(strings.TrimSpace(levelStr)) switch levelStr { case "trace": @@ -71,7 +75,7 @@ func LevelFromString(levelStr string) Level { } } -// The main Logger interface. All code should code against this interface only. +// Logger describes the interface that must be implemeted by all loggers. type Logger interface { // Args are alternating key, val pairs // keys must be strings @@ -127,16 +131,27 @@ type Logger interface { // Return a value that conforms to the stdlib log.Logger interface StandardLogger(opts *StandardLoggerOptions) *log.Logger + + // Return a value that conforms to io.Writer, which can be passed into log.SetOutput() + StandardWriter(opts *StandardLoggerOptions) io.Writer } +// StandardLoggerOptions can be used to configure a new standard logger. type StandardLoggerOptions struct { // Indicate that some minimal parsing should be done on strings to try // and detect their level and re-emit them. // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO], // [DEBUG] and strip it off before reapplying it. InferLevels bool + + // ForceLevel is used to force all output from the standard logger to be at + // the specified level. Similar to InferLevels, this will strip any level + // prefix contained in the logged string before applying the forced level. + // If set, this override InferLevels. + ForceLevel Level } +// LoggerOptions can be used to configure a new logger. type LoggerOptions struct { // Name of the subsystem to prefix logs with Name string @@ -144,7 +159,7 @@ type LoggerOptions struct { // The threshold for the logger. Anything less severe is supressed Level Level - // Where to write the logs to. Defaults to os.Stdout if nil + // Where to write the logs to. Defaults to os.Stderr if nil Output io.Writer // An optional mutex pointer in case Output is shared diff --git a/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/vendor/github.com/hashicorp/go-hclog/nulllogger.go index 0942361a5..7ad6b351e 100644 --- a/vendor/github.com/hashicorp/go-hclog/nulllogger.go +++ b/vendor/github.com/hashicorp/go-hclog/nulllogger.go @@ -1,6 +1,7 @@ package hclog import ( + "io" "io/ioutil" "log" ) @@ -43,5 +44,9 @@ func (l *nullLogger) ResetNamed(name string) Logger { return l } func (l *nullLogger) SetLevel(level Level) {} func (l *nullLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { - return log.New(ioutil.Discard, "", log.LstdFlags) + return log.New(l.StandardWriter(opts), "", log.LstdFlags) +} + +func (l *nullLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + return ioutil.Discard } diff --git a/vendor/github.com/hashicorp/go-hclog/stacktrace.go b/vendor/github.com/hashicorp/go-hclog/stacktrace.go index 8af1a3be4..9b27bd3d3 100644 --- a/vendor/github.com/hashicorp/go-hclog/stacktrace.go +++ b/vendor/github.com/hashicorp/go-hclog/stacktrace.go @@ -40,12 +40,13 @@ var ( } ) -// A stacktrace gathered by a previous call to log.Stacktrace. If passed -// to a logging function, the stacktrace will be appended. +// CapturedStacktrace represents a stacktrace captured by a previous call +// to log.Stacktrace. If passed to a logging function, the stacktrace +// will be appended. type CapturedStacktrace string -// Gather a stacktrace of the current goroutine and return it to be passed -// to a logging function. +// Stacktrace captures a stacktrace of the current goroutine and returns +// it to be passed to a logging function. func Stacktrace() CapturedStacktrace { return CapturedStacktrace(takeStacktrace()) } diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go index 2bb927fc9..2cf0456a0 100644 --- a/vendor/github.com/hashicorp/go-hclog/stdlog.go +++ b/vendor/github.com/hashicorp/go-hclog/stdlog.go @@ -9,39 +9,51 @@ import ( // and back into our Logger. This is basically the only way to // build upon *log.Logger. type stdlogAdapter struct { - hl Logger + log Logger inferLevels bool + forceLevel Level } // Take the data, infer the levels if configured, and send it through -// a regular Logger +// a regular Logger. func (s *stdlogAdapter) Write(data []byte) (int, error) { str := string(bytes.TrimRight(data, " \t\n")) - if s.inferLevels { + if s.forceLevel != NoLevel { + // Use pickLevel to strip log levels included in the line since we are + // forcing the level + _, str := s.pickLevel(str) + + // Log at the forced level + s.dispatch(str, s.forceLevel) + } else if s.inferLevels { level, str := s.pickLevel(str) - switch level { - case Trace: - s.hl.Trace(str) - case Debug: - s.hl.Debug(str) - case Info: - s.hl.Info(str) - case Warn: - s.hl.Warn(str) - case Error: - s.hl.Error(str) - default: - s.hl.Info(str) - } + s.dispatch(str, level) } else { - s.hl.Info(str) + s.log.Info(str) } return len(data), nil } -// Detect, based on conventions, what log level this is +func (s *stdlogAdapter) dispatch(str string, level Level) { + switch level { + case Trace: + s.log.Trace(str) + case Debug: + s.log.Debug(str) + case Info: + s.log.Info(str) + case Warn: + s.log.Warn(str) + case Error: + s.log.Error(str) + default: + s.log.Info(str) + } +} + +// Detect, based on conventions, what log level this is. func (s *stdlogAdapter) pickLevel(str string) (Level, string) { switch { case strings.HasPrefix(str, "[DEBUG]"): diff --git a/vendor/github.com/hashicorp/go-hclog/writer.go b/vendor/github.com/hashicorp/go-hclog/writer.go new file mode 100644 index 000000000..7e8ec729d --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/writer.go @@ -0,0 +1,74 @@ +package hclog + +import ( + "bytes" + "io" +) + +type writer struct { + b bytes.Buffer + w io.Writer +} + +func newWriter(w io.Writer) *writer { + return &writer{w: w} +} + +func (w *writer) Flush(level Level) (err error) { + if lw, ok := w.w.(LevelWriter); ok { + _, err = lw.LevelWrite(level, w.b.Bytes()) + } else { + _, err = w.w.Write(w.b.Bytes()) + } + w.b.Reset() + return err +} + +func (w *writer) Write(p []byte) (int, error) { + return w.b.Write(p) +} + +func (w *writer) WriteByte(c byte) error { + return w.b.WriteByte(c) +} + +func (w *writer) WriteString(s string) (int, error) { + return w.b.WriteString(s) +} + +// LevelWriter is the interface that wraps the LevelWrite method. +type LevelWriter interface { + LevelWrite(level Level, p []byte) (n int, err error) +} + +// LeveledWriter writes all log messages to the standard writer, +// except for log levels that are defined in the overrides map. +type LeveledWriter struct { + standard io.Writer + overrides map[Level]io.Writer +} + +// NewLeveledWriter returns an initialized LeveledWriter. +// +// standard will be used as the default writer for all log levels, +// except for log levels that are defined in the overrides map. +func NewLeveledWriter(standard io.Writer, overrides map[Level]io.Writer) *LeveledWriter { + return &LeveledWriter{ + standard: standard, + overrides: overrides, + } +} + +// Write implements io.Writer. +func (lw *LeveledWriter) Write(p []byte) (int, error) { + return lw.standard.Write(p) +} + +// LevelWrite implements LevelWriter. +func (lw *LeveledWriter) LevelWrite(level Level, p []byte) (int, error) { + w, ok := lw.overrides[level] + if !ok { + w = lw.standard + } + return w.Write(p) +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 4bd094abf..8c1adba27 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -209,7 +209,7 @@ {"path":"github.com/hashicorp/go-envparse","checksumSHA1":"FKmqR4DC3nCXtnT9pe02z5CLNWo=","revision":"310ca1881b22af3522e3a8638c0b426629886196","revisionTime":"2018-01-19T21:58:41Z"}, {"path":"github.com/hashicorp/go-getter","checksumSHA1":"d4brua17AGQqMNtngK4xKOUwboY=","revision":"f5101da0117392c6e7960c934f05a2fd689a5b5f","revisionTime":"2019-08-22T19:45:07Z"}, {"path":"github.com/hashicorp/go-getter/helper/url","checksumSHA1":"9J+kDr29yDrwsdu2ULzewmqGjpA=","revision":"b345bfcec894fb7ff3fdf9b21baf2f56ea423d98","revisionTime":"2018-04-10T17:49:45Z"}, - {"path":"github.com/hashicorp/go-hclog","checksumSHA1":"dOP7kCX3dACHc9mU79826N411QA=","revision":"ff2cf002a8dd750586d91dddd4470c341f981fe1","revisionTime":"2018-07-09T16:53:50Z"}, + {"path":"github.com/hashicorp/go-hclog","checksumSHA1":"Iv7QhhSWySasqGcwbFyq0XrgYOQ=","revision":"3a6b159e7c8be95d9af1c989e4dcb730567f5c9c","revisionTime":"2019-08-26T16:23:21Z"}, {"path":"github.com/hashicorp/go-immutable-radix","checksumSHA1":"Cas2nprG6pWzf05A2F/OlnjUu2Y=","revision":"8aac2701530899b64bdea735a1de8da899815220","revisionTime":"2017-07-25T22:12:15Z"}, {"path":"github.com/hashicorp/go-memdb","checksumSHA1":"FMAvwDar2bQyYAW4XMFhAt0J5xA=","revision":"20ff6434c1cc49b80963d45bf5c6aa89c78d8d57","revisionTime":"2017-08-31T20:15:40Z"}, {"path":"github.com/hashicorp/go-msgpack/codec","checksumSHA1":"CKGYNUDKre3Z2g4hHNVfp5nTcfA=","revision":"23165f7bc3c2dda1891434ebb9da1511a7bafc1c","revisionTime":"2019-09-27T12:33:13Z","version":"upstream-08f7b40","versionExact":"upstream-08f7b40"}, From a828c92403a9b0c5d1d9e6afc11df28df8aee9fb Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Wed, 9 Oct 2019 11:17:29 -0400 Subject: [PATCH 05/34] Display error when remote side ended monitor multisink logger remove usage of logwriter --- command/agent/agent.go | 10 +-- command/agent/agent_endpoint.go | 83 +++++++++++-------- command/agent/agent_endpoint_test.go | 17 +++- command/agent/command.go | 11 +-- command/agent/testagent.go | 12 +-- command/agent_monitor.go | 4 + helper/testlog/testlog.go | 4 +- .../hashicorp/go-hclog/intlogger.go | 53 +++++++++++- .../github.com/hashicorp/go-hclog/logger.go | 11 +++ vendor/vendor.json | 2 +- 10 files changed, 145 insertions(+), 62 deletions(-) diff --git a/command/agent/agent.go b/command/agent/agent.go index 0fca145c7..b02e6a765 100644 --- a/command/agent/agent.go +++ b/command/agent/agent.go @@ -53,10 +53,9 @@ type Agent struct { config *Config configLock sync.Mutex - logger log.Logger - httpLogger log.Logger + logger log.MultiSinkLogger + httpLogger log.MultiSinkLogger logOutput io.Writer - logWriter *logWriter // consulService is Nomad's custom Consul client for managing services // and checks. @@ -88,18 +87,17 @@ type Agent struct { } // NewAgent is used to create a new agent with the given configuration -func NewAgent(config *Config, logger log.Logger, logOutput io.Writer, logWriter *logWriter, inmem *metrics.InmemSink) (*Agent, error) { +func NewAgent(config *Config, logger log.MultiSinkLogger, logOutput io.Writer, inmem *metrics.InmemSink) (*Agent, error) { a := &Agent{ config: config, logOutput: logOutput, - logWriter: logWriter, shutdownCh: make(chan struct{}), InmemSink: inmem, } // Create the loggers a.logger = logger - a.httpLogger = a.logger.ResetNamed("http") + a.httpLogger = a.logger.ResetNamed("http").(log.MultiSinkLogger) // Global logger should match internal logger as much as possible golog.SetFlags(golog.LstdFlags | golog.Lmicroseconds) diff --git a/command/agent/agent_endpoint.go b/command/agent/agent_endpoint.go index ffb505c0e..ac5597dfb 100644 --- a/command/agent/agent_endpoint.go +++ b/command/agent/agent_endpoint.go @@ -7,9 +7,9 @@ import ( "net/http" "sort" "strings" + "sync" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/logutils" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/serf/serf" @@ -165,26 +165,25 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) ( logLevel = "INFO" } - // Create a level filter and flusher. - filter := LevelFilter() - filter.SetMinLevel(logutils.LogLevel(strings.ToUpper(logLevel))) - - if !ValidateLevelFilter(filter.MinLevel, filter) { - return nil, CodedError(400, fmt.Sprintf("Unknown log level: %s", filter.MinLevel)) + if log.LevelFromString(logLevel) == log.NoLevel { + return nil, CodedError(400, fmt.Sprintf("Unknown log level: %s", logLevel)) } + // Create flusher for streaming flusher, ok := resp.(http.Flusher) if !ok { return nil, CodedError(400, "Streaming not supported") } - handler := &httpLogHandler{ - filter: filter, - logCh: make(chan string, 512), - logger: s.agent.logger, - } - s.agent.logWriter.RegisterHandler(handler) - defer s.agent.logWriter.DeregisterHandler(handler) + streamWriter := newStreamWriter(512) + + streamLog := log.New(&log.LoggerOptions{ + Level: log.LevelFromString(logLevel), + Output: streamWriter, + }) + s.agent.logger.RegisterSink(streamLog) + defer s.agent.logger.DeregisterSink(streamLog) + notify := resp.(http.CloseNotifier).CloseNotify() // Send header so client can start streaming body @@ -198,40 +197,56 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) ( for { select { case <-notify: - s.agent.logWriter.DeregisterHandler(handler) - if handler.droppedCount > 0 { - s.agent.logger.Warn(fmt.Sprintf("agent: Dropped %d logs during monitor request", handler.droppedCount)) + s.agent.logger.DeregisterSink(streamLog) + if streamWriter.droppedCount > 0 { + s.agent.logger.Warn(fmt.Sprintf("agent: Dropped %d logs during monitor request", streamWriter.droppedCount)) } return nil, nil - case log := <-handler.logCh: + case log := <-streamWriter.logCh: fmt.Fprintln(resp, log) flusher.Flush() } } } -type httpLogHandler struct { - filter *logutils.LevelFilter +type streamWriter struct { + sync.Mutex + logs []string logCh chan string - logger log.Logger + index int droppedCount int } -func (h *httpLogHandler) HandleLog(log string) { - // Check the log level - if !h.filter.Check([]byte(log)) { - return - } - - // Do a non-blocking send - select { - case h.logCh <- log: - default: - // Just increment a counter for dropped logs to this handler; we can't log now - // because the lock is already held by the LogWriter invoking this - h.droppedCount++ +func newStreamWriter(buf int) *streamWriter { + return &streamWriter{ + logs: make([]string, buf), + logCh: make(chan string, buf), + index: 0, } } + +func (d *streamWriter) Write(p []byte) (n int, err error) { + d.Lock() + defer d.Unlock() + + // Strip off newlines at the end if there are any since we store + // individual log lines in the agent. + n = len(p) + if p[n-1] == '\n' { + p = p[:n-1] + } + + d.logs[d.index] = string(p) + d.index = (d.index + 1) % len(d.logs) + + select { + case d.logCh <- string(p): + default: + d.droppedCount++ + } + return +} + func (s *HTTPServer) AgentForceLeaveRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { if req.Method != "PUT" && req.Method != "POST" { return nil, CodedError(405, ErrInvalidMethod) diff --git a/command/agent/agent_endpoint_test.go b/command/agent/agent_endpoint_test.go index 5714b1c7b..9909e3162 100644 --- a/command/agent/agent_endpoint_test.go +++ b/command/agent/agent_endpoint_test.go @@ -274,19 +274,28 @@ func TestHTTP_AgentMonitor(t *testing.T) { defer resp.Close() go func() { - s.Server.logger.Debug("log that should not be sent") - s.Server.logger.Warn("log that should be sent") _, err = s.Server.AgentMonitor(resp, req) require.NoError(t, err) }() + // send the same log a few times until monitor sink is + // fully set up + maxLogAttempts := 10 + tried := 0 testutil.WaitForResult(func() (bool, error) { + if tried < maxLogAttempts { + s.Server.logger.Debug("log that should not be sent") + s.Server.logger.Warn("log that should be sent") + tried++ + } + got := resp.Body.String() - want := "[WARN ] http: log that should be sent" + want := "[WARN] http: log that should be sent" if strings.Contains(got, want) { - require.NotContains(t, resp.Body.String(), "[INFO ]") + require.NotContains(t, resp.Body.String(), "[DEBUG]") return true, nil } + return false, fmt.Errorf("missing expected log, got: %v, want: %v", got, want) }, func(err error) { require.Fail(t, err.Error()) diff --git a/command/agent/command.go b/command/agent/command.go index 03115156b..c0a441e5e 100644 --- a/command/agent/command.go +++ b/command/agent/command.go @@ -382,6 +382,7 @@ func (c *Command) setupLoggers(config *Config) (*gatedwriter.Writer, *logWriter, Writer: &cli.UiWriter{Ui: c.Ui}, } + // TODO can this be killed c.logFilter = LevelFilter() c.logFilter.MinLevel = logutils.LogLevel(strings.ToUpper(config.LogLevel)) c.logFilter.Writer = logGate @@ -447,9 +448,9 @@ func (c *Command) setupLoggers(config *Config) (*gatedwriter.Writer, *logWriter, } // setupAgent is used to start the agent and various interfaces -func (c *Command) setupAgent(config *Config, logger hclog.Logger, logOutput io.Writer, logWriter *logWriter, inmem *metrics.InmemSink) error { +func (c *Command) setupAgent(config *Config, logger hclog.MultiSinkLogger, logOutput io.Writer, inmem *metrics.InmemSink) error { c.Ui.Output("Starting Nomad agent...") - agent, err := NewAgent(config, logger, logOutput, logWriter, inmem) + agent, err := NewAgent(config, logger, logOutput, inmem) if err != nil { c.Ui.Error(fmt.Sprintf("Error starting agent: %s", err)) return err @@ -596,13 +597,13 @@ func (c *Command) Run(args []string) int { } // Setup the log outputs - logGate, logWriter, logOutput := c.setupLoggers(config) + logGate, _, logOutput := c.setupLoggers(config) if logGate == nil { return 1 } // Create logger - logger := hclog.New(&hclog.LoggerOptions{ + logger := hclog.NewMultiSink(&hclog.LoggerOptions{ Name: "agent", Level: hclog.LevelFromString(config.LogLevel), Output: logOutput, @@ -629,7 +630,7 @@ func (c *Command) Run(args []string) int { } // Create the agent - if err := c.setupAgent(config, logger, logOutput, logWriter, inmem); err != nil { + if err := c.setupAgent(config, logger, logOutput, inmem); err != nil { logGate.Flush() return 1 } diff --git a/command/agent/testagent.go b/command/agent/testagent.go index b773cda18..8b5bb6afd 100644 --- a/command/agent/testagent.go +++ b/command/agent/testagent.go @@ -60,8 +60,6 @@ type TestAgent struct { // to os.Stderr. LogOutput io.Writer - logWriter *logWriter - // DataDir is the data directory which is used when Config.DataDir // is not set. It is created automatically and removed when // Shutdown() is called. @@ -207,12 +205,8 @@ RETRY: } func (a *TestAgent) start() (*Agent, error) { - if a.logWriter == nil { - a.logWriter = NewLogWriter(512) - } - if a.LogOutput == nil { - a.LogOutput = io.MultiWriter(testlog.NewWriter(a.T), a.logWriter) + a.LogOutput = io.MultiWriter(testlog.NewWriter(a.T)) } inm := metrics.NewInmemSink(10*time.Second, time.Minute) @@ -222,14 +216,14 @@ func (a *TestAgent) start() (*Agent, error) { return nil, fmt.Errorf("unable to set up in memory metrics needed for agent initialization") } - logger := hclog.New(&hclog.LoggerOptions{ + logger := hclog.NewMultiSink(&hclog.LoggerOptions{ Name: "agent", Level: hclog.LevelFromString(a.Config.LogLevel), Output: a.LogOutput, JSONFormat: a.Config.LogJson, }) - agent, err := NewAgent(a.Config, logger, a.LogOutput, a.logWriter, inm) + agent, err := NewAgent(a.Config, logger, a.LogOutput, inm) if err != nil { return nil, err } diff --git a/command/agent_monitor.go b/command/agent_monitor.go index cc58dbbe9..aeffc172e 100644 --- a/command/agent_monitor.go +++ b/command/agent_monitor.go @@ -79,6 +79,10 @@ func (c *MonitorCommand) Run(args []string) int { signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM) select { + case <-eventDoneCh: + c.Ui.Error("Remote side ended the monitor! This usually means that the\n" + + "remote side has exited or crashed.") + return 1 case <-signalCh: return 0 } diff --git a/helper/testlog/testlog.go b/helper/testlog/testlog.go index d8502dd0a..625c2cf9d 100644 --- a/helper/testlog/testlog.go +++ b/helper/testlog/testlog.go @@ -72,7 +72,7 @@ func Logger(t LogPrinter) *log.Logger { } //HCLogger returns a new test hc-logger. -func HCLogger(t LogPrinter) hclog.Logger { +func HCLogger(t LogPrinter) hclog.MultiSinkLogger { level := hclog.Trace envLogLevel := os.Getenv("NOMAD_TEST_LOG_LEVEL") if envLogLevel != "" { @@ -83,7 +83,7 @@ func HCLogger(t LogPrinter) hclog.Logger { Output: NewWriter(t), IncludeLocation: true, } - return hclog.New(opts) + return hclog.NewMultiSink(opts) } type prefixStdout struct { diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go index 219656c4c..a94c1424e 100644 --- a/vendor/github.com/hashicorp/go-hclog/intlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -36,6 +36,7 @@ var ( // Make sure that intLogger is a Logger var _ Logger = &intLogger{} +var _ MultiSinkLogger = &intLogger{} // intLogger is an internal logger implementation. Internal in that it is // defined entirely by this package. @@ -51,6 +52,8 @@ type intLogger struct { writer *writer level *int32 + sinks map[Logger]struct{} + implied []interface{} } @@ -83,6 +86,7 @@ func New(opts *LoggerOptions) Logger { mutex: mutex, writer: newWriter(output), level: new(int32), + sinks: make(map[Logger]struct{}), } if opts.TimeFormat != "" { @@ -94,10 +98,31 @@ func New(opts *LoggerOptions) Logger { return l } +func NewMultiSink(opts *LoggerOptions) MultiSinkLogger { + return New(opts).(MultiSinkLogger) +} + +func (l *intLogger) RegisterSink(logger Logger) { + l.mutex.Lock() + defer l.mutex.Unlock() + + if _, ok := l.sinks[logger]; ok { + return + } + + l.sinks[logger] = struct{}{} +} + +func (l *intLogger) DeregisterSink(logger Logger) { + l.mutex.Lock() + defer l.mutex.Unlock() + delete(l.sinks, logger) +} + // Log a message and a set of key/value pairs if the given level is at // or more severe that the threshold configured in the Logger. func (l *intLogger) Log(level Level, msg string, args ...interface{}) { - if level < Level(atomic.LoadInt32(l.level)) { + if level < Level(atomic.LoadInt32(l.level)) && len(l.sinks) == 0 { return } @@ -106,6 +131,32 @@ func (l *intLogger) Log(level Level, msg string, args ...interface{}) { l.mutex.Lock() defer l.mutex.Unlock() + for lh := range l.sinks { + lh, ok := lh.(*intLogger) + if !ok { + continue + } + + if level < Level(atomic.LoadInt32(lh.level)) { + continue + } + + // Set the sink name to the name of the calling log + lh.name = l.name + + if lh.json { + lh.logJSON(t, level, msg, args...) + } else { + lh.log(t, level, msg, args...) + } + + lh.writer.Flush(level) + } + + if level < Level(atomic.LoadInt32(l.level)) { + return + } + if l.json { l.logJSON(t, level, msg, args...) } else { diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go index 080ed7999..a7774e923 100644 --- a/vendor/github.com/hashicorp/go-hclog/logger.go +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -136,6 +136,17 @@ type Logger interface { StandardWriter(opts *StandardLoggerOptions) io.Writer } +// MultiSinkLogger describes the interface that allows a logger to +// write to multiple sub loggers which may be configured to have different +// level and writer settings. This is useful for monitor commands to allow +// for streaming of logs at a lower level than what is set for the parent logger +type MultiSinkLogger interface { + Logger + + RegisterSink(logger Logger) + DeregisterSink(logger Logger) +} + // StandardLoggerOptions can be used to configure a new standard logger. type StandardLoggerOptions struct { // Indicate that some minimal parsing should be done on strings to try diff --git a/vendor/vendor.json b/vendor/vendor.json index 8c1adba27..275933b8b 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -209,7 +209,7 @@ {"path":"github.com/hashicorp/go-envparse","checksumSHA1":"FKmqR4DC3nCXtnT9pe02z5CLNWo=","revision":"310ca1881b22af3522e3a8638c0b426629886196","revisionTime":"2018-01-19T21:58:41Z"}, {"path":"github.com/hashicorp/go-getter","checksumSHA1":"d4brua17AGQqMNtngK4xKOUwboY=","revision":"f5101da0117392c6e7960c934f05a2fd689a5b5f","revisionTime":"2019-08-22T19:45:07Z"}, {"path":"github.com/hashicorp/go-getter/helper/url","checksumSHA1":"9J+kDr29yDrwsdu2ULzewmqGjpA=","revision":"b345bfcec894fb7ff3fdf9b21baf2f56ea423d98","revisionTime":"2018-04-10T17:49:45Z"}, - {"path":"github.com/hashicorp/go-hclog","checksumSHA1":"Iv7QhhSWySasqGcwbFyq0XrgYOQ=","revision":"3a6b159e7c8be95d9af1c989e4dcb730567f5c9c","revisionTime":"2019-08-26T16:23:21Z"}, + {"path":"github.com/hashicorp/go-hclog","checksumSHA1":"uTAjKuGQr4/gpcgdEtTO+JhD/NY=","revision":"a4c7052ea48d1c284eca6ba6281910f0fd3b7b30","revisionTime":"2019-10-10T18:01:30Z","version":"f-multi-sink-logger","versionExact":"f-multi-sink-logger"}, {"path":"github.com/hashicorp/go-immutable-radix","checksumSHA1":"Cas2nprG6pWzf05A2F/OlnjUu2Y=","revision":"8aac2701530899b64bdea735a1de8da899815220","revisionTime":"2017-07-25T22:12:15Z"}, {"path":"github.com/hashicorp/go-memdb","checksumSHA1":"FMAvwDar2bQyYAW4XMFhAt0J5xA=","revision":"20ff6434c1cc49b80963d45bf5c6aa89c78d8d57","revisionTime":"2017-08-31T20:15:40Z"}, {"path":"github.com/hashicorp/go-msgpack/codec","checksumSHA1":"CKGYNUDKre3Z2g4hHNVfp5nTcfA=","revision":"23165f7bc3c2dda1891434ebb9da1511a7bafc1c","revisionTime":"2019-09-27T12:33:13Z","version":"upstream-08f7b40","versionExact":"upstream-08f7b40"}, From 12819975ee7ae152b07ca62a67efb58d264ba18e Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Thu, 10 Oct 2019 15:30:37 -0400 Subject: [PATCH 06/34] remove log_writer prefix output with proper spacing update gzip handler, adjust first byte flow to allow gzip handler bypass wip, first stab at wiring up rpc endpoint --- api/agent.go | 5 +- api/agent_test.go | 27 +++-- client/client.go | 8 +- client/config/config.go | 2 +- client/monitor_endpoint.go | 153 +++++++++++++++++++++++++ client/rpc.go | 3 + client/structs/structs.go | 10 ++ command/agent/agent_endpoint.go | 164 ++++++++++++++++++++++----- command/agent/agent_endpoint_test.go | 38 ++++++- command/agent/command.go | 17 ++- command/agent/http.go | 2 +- command/agent/log_writer.go | 83 -------------- command/agent/log_writer_test.go | 52 --------- command/agent_monitor.go | 14 ++- vendor/vendor.json | 2 +- 15 files changed, 387 insertions(+), 193 deletions(-) create mode 100644 client/monitor_endpoint.go delete mode 100644 command/agent/log_writer.go delete mode 100644 command/agent/log_writer_test.go diff --git a/api/agent.go b/api/agent.go index 6df8482bd..1fa20a00e 100644 --- a/api/agent.go +++ b/api/agent.go @@ -240,7 +240,7 @@ func (a *Agent) Health() (*AgentHealthResponse, error) { // Monitor returns a channel which will receive streaming logs from the agent // Providing a non-nil stopCh can be used to close the connection and stop log streaming -func (a *Agent) Monitor(loglevel string, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) { +func (a *Agent) Monitor(loglevel string, nodeID string, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) { r, err := a.client.newRequest("GET", "/v1/agent/monitor") if err != nil { return nil, err @@ -250,6 +250,9 @@ func (a *Agent) Monitor(loglevel string, stopCh <-chan struct{}, q *QueryOptions if loglevel != "" { r.params.Add("loglevel", loglevel) } + if nodeID != "" { + r.params.Add("nodeID", nodeID) + } _, resp, err := requireOK(a.client.doRequest(r)) if err != nil { diff --git a/api/agent_test.go b/api/agent_test.go index b80e3ac5a..080f9af42 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -3,10 +3,11 @@ package api import ( "reflect" "sort" - "strings" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/hashicorp/nomad/api/internal/testutil" "github.com/stretchr/testify/assert" ) @@ -267,19 +268,27 @@ func TestAgent_Monitor(t *testing.T) { agent := c.Agent() - logCh, err := agent.Monitor("info", nil, nil) + doneCh := make(chan struct{}) + logCh, err := agent.Monitor("debug", doneCh, nil) + defer close(doneCh) if err != nil { t.Fatalf("err: %v", err) } + // make a request to generate some logs + _, err = agent.Region() + require.NoError(t, err) + // Wait for the first log message and validate it - select { - case log := <-logCh: - // TODO: checkout why stub_asset.go help text returns here - if !strings.Contains(log, "[INFO ] nomad: raft: Initial configuration") { - t.Fatalf("bad: %q", log) + for { + select { + case log := <-logCh: + if log == " " { + return + } + require.Contains(t, log, "[DEBUG]") + case <-time.After(10 * time.Second): + require.Fail(t, "failed to get a log message") } - case <-time.After(1000 * time.Second): - t.Fatalf("failed to get a log message") } } diff --git a/client/client.go b/client/client.go index e5d688ef1..a1bf34c08 100644 --- a/client/client.go +++ b/client/client.go @@ -163,8 +163,8 @@ type Client struct { configCopy *config.Config configLock sync.RWMutex - logger hclog.Logger - rpcLogger hclog.Logger + logger hclog.MultiSinkLogger + rpcLogger hclog.MultiSinkLogger connPool *pool.ConnPool @@ -304,7 +304,7 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulServic } // Create the logger - logger := cfg.Logger.ResetNamed("client") + logger := cfg.Logger.ResetNamed("client").(hclog.MultiSinkLogger) // Create the client c := &Client{ @@ -316,7 +316,7 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulServic tlsWrap: tlsWrap, streamingRpcs: structs.NewStreamingRpcRegistry(), logger: logger, - rpcLogger: logger.Named("rpc"), + rpcLogger: logger.Named("rpc").(hclog.MultiSinkLogger), allocs: make(map[string]AllocRunner), allocUpdates: make(chan *structs.Allocation, 64), shutdownCh: make(chan struct{}), diff --git a/client/config/config.go b/client/config/config.go index 4783bea56..0df3cac27 100644 --- a/client/config/config.go +++ b/client/config/config.go @@ -81,7 +81,7 @@ type Config struct { LogOutput io.Writer // Logger provides a logger to thhe client - Logger log.Logger + Logger log.MultiSinkLogger // Region is the clients region Region string diff --git a/client/monitor_endpoint.go b/client/monitor_endpoint.go new file mode 100644 index 000000000..315714ec6 --- /dev/null +++ b/client/monitor_endpoint.go @@ -0,0 +1,153 @@ +package client + +import ( + "context" + "errors" + "io" + "sync" + "time" + + "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/ugorji/go/codec" + + metrics "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" + cstructs "github.com/hashicorp/nomad/client/structs" +) + +type Monitor struct { + c *Client +} + +func NewMonitorEndpoint(c *Client) *Monitor { + m := &Monitor{c: c} + m.c.streamingRpcs.Register("Client.Monitor", m.monitor) + return m +} + +func (m *Monitor) monitor(conn io.ReadWriteCloser) { + defer metrics.MeasureSince([]string{"client", "monitor", "monitor"}, time.Now()) + // defer conn.Close() + + // Decode arguments + var req cstructs.MonitorRequest + decoder := codec.NewDecoder(conn, structs.MsgpackHandle) + encoder := codec.NewEncoder(conn, structs.MsgpackHandle) + + if err := decoder.Decode(&req); err != nil { + handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + return + } + + // Check acl + if aclObj, err := m.c.ResolveToken(req.QueryOptions.AuthToken); err != nil { + handleStreamResultError(err, helper.Int64ToPtr(403), encoder) + return + } else if aclObj != nil && !aclObj.AllowNsOp(req.Namespace, acl.NamespaceCapabilityReadFS) { + handleStreamResultError(structs.ErrPermissionDenied, helper.Int64ToPtr(403), encoder) + return + } + + var logLevel log.Level + if req.LogLevel == "" { + logLevel = log.LevelFromString("INFO") + } else { + logLevel = log.LevelFromString(req.LogLevel) + } + + if logLevel == log.NoLevel { + handleStreamResultError(errors.New("Unknown log level"), helper.Int64ToPtr(400), encoder) + return + } + + // var buf bytes.Buffer + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + streamWriter := newStreamWriter(512) + + streamLog := log.New(&log.LoggerOptions{ + Level: logLevel, + Output: streamWriter, + }) + m.c.logger.RegisterSink(streamLog) + defer m.c.logger.DeregisterSink(streamLog) + + go func() { + for { + if _, err := conn.Read(nil); err != nil { + // One end of the pipe was explicitly closed, exit cleanly + cancel() + return + } + select { + case <-ctx.Done(): + return + } + } + }() + + var streamErr error +OUTER: + for { + select { + case log := <-streamWriter.logCh: + var resp cstructs.StreamErrWrapper + if err := encoder.Encode(resp); err != nil { + streamErr = err + break OUTER + } + resp.Payload = []byte(log) + encoder.Reset(conn) + case <-ctx.Done(): + break OUTER + } + } + + if streamErr != nil { + handleStreamResultError(streamErr, helper.Int64ToPtr(500), encoder) + return + } + +} + +type streamWriter struct { + sync.Mutex + logs []string + logCh chan string + index int + droppedCount int +} + +func newStreamWriter(buf int) *streamWriter { + return &streamWriter{ + logs: make([]string, buf), + logCh: make(chan string, buf), + index: 0, + } +} + +func (d *streamWriter) Write(p []byte) (n int, err error) { + d.Lock() + defer d.Unlock() + + // Strip off newlines at the end if there are any since we store + // individual log lines in the agent. + n = len(p) + if p[n-1] == '\n' { + p = p[:n-1] + } + + d.logs[d.index] = string(p) + d.index = (d.index + 1) % len(d.logs) + + select { + case d.logCh <- string(p): + default: + d.droppedCount++ + } + return +} diff --git a/client/rpc.go b/client/rpc.go index beaec6f2c..6a3b83717 100644 --- a/client/rpc.go +++ b/client/rpc.go @@ -23,6 +23,7 @@ type rpcEndpoints struct { ClientStats *ClientStats FileSystem *FileSystem Allocations *Allocations + Monitor *Monitor } // ClientRPC is used to make a local, client only RPC call @@ -218,6 +219,7 @@ func (c *Client) setupClientRpc() { c.endpoints.ClientStats = &ClientStats{c} c.endpoints.FileSystem = NewFileSystemEndpoint(c) c.endpoints.Allocations = NewAllocationsEndpoint(c) + c.endpoints.Monitor = NewMonitorEndpoint(c) // Create the RPC Server c.rpcServer = rpc.NewServer() @@ -234,6 +236,7 @@ func (c *Client) setupClientRpcServer(server *rpc.Server) { server.Register(c.endpoints.ClientStats) server.Register(c.endpoints.FileSystem) server.Register(c.endpoints.Allocations) + server.Register(c.endpoints.Monitor) } // rpcConnListener is a long lived function that listens for new connections diff --git a/client/structs/structs.go b/client/structs/structs.go index eff8ceaf3..6a6206744 100644 --- a/client/structs/structs.go +++ b/client/structs/structs.go @@ -34,6 +34,16 @@ type ClientStatsResponse struct { structs.QueryMeta } +type MonitorRequest struct { + // LogLevel is the log level filter we want to stream logs on + LogLevel string + + // LogJSON specifies if log format should be unstructured or json + LogJSON bool + + structs.QueryOptions +} + // AllocFileInfo holds information about a file inside the AllocDir type AllocFileInfo struct { Name string diff --git a/command/agent/agent_endpoint.go b/command/agent/agent_endpoint.go index ac5597dfb..46401fc61 100644 --- a/command/agent/agent_endpoint.go +++ b/command/agent/agent_endpoint.go @@ -1,19 +1,25 @@ package agent import ( + "bytes" + "context" "encoding/json" "fmt" + "io" "net" "net/http" "sort" "strings" "sync" + "github.com/docker/docker/pkg/ioutils" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/nomad/acl" + cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/serf/serf" "github.com/mitchellh/copystructure" + "github.com/ugorji/go/codec" ) type Member struct { @@ -169,44 +175,144 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) ( return nil, CodedError(400, fmt.Sprintf("Unknown log level: %s", logLevel)) } - // Create flusher for streaming - flusher, ok := resp.(http.Flusher) - if !ok { - return nil, CodedError(400, "Streaming not supported") - } + // START - streamWriter := newStreamWriter(512) + // Determine if we are targeting a server or client + nodeID := req.URL.Query().Get("nodeID") + if nodeID != "" { - streamLog := log.New(&log.LoggerOptions{ - Level: log.LevelFromString(logLevel), - Output: streamWriter, - }) - s.agent.logger.RegisterSink(streamLog) - defer s.agent.logger.DeregisterSink(streamLog) + // Build the request and parse the ACL token + args := cstructs.MonitorRequest{ + LogLevel: logLevel, + LogJSON: false, + } + s.parse(resp, req, &args.QueryOptions.Region, &args.QueryOptions) - notify := resp.(http.CloseNotifier).CloseNotify() + // Determine the handler to use + useLocalClient, useClientRPC, useServerRPC := s.rpcHandlerForNode(nodeID) - // Send header so client can start streaming body - resp.WriteHeader(http.StatusOK) + // Make the RPC + var handler structs.StreamingRpcHandler + var handlerErr error + if useLocalClient { + handler, handlerErr = s.agent.Client().StreamingRpcHandler("Client.Monitor") + } else if useClientRPC { + handler, handlerErr = s.agent.Client().RemoteStreamingRpcHandler("Client.Monitor") + } else if useServerRPC { + handler, handlerErr = s.agent.Server().StreamingRpcHandler("Client.Monitor") + } else { + handlerErr = CodedError(400, "No local Node and node_id not provided") + } - // 0 byte write is needed before the Flush call so that if we are using - // a gzip stream it will go ahead and write out the HTTP response header - resp.Write([]byte("")) - flusher.Flush() + if handlerErr != nil { + return nil, CodedError(500, handlerErr.Error()) + } + httpPipe, handlerPipe := net.Pipe() + decoder := codec.NewDecoder(httpPipe, structs.MsgpackHandle) + encoder := codec.NewEncoder(httpPipe, structs.MsgpackHandle) - for { - select { - case <-notify: - s.agent.logger.DeregisterSink(streamLog) - if streamWriter.droppedCount > 0 { - s.agent.logger.Warn(fmt.Sprintf("agent: Dropped %d logs during monitor request", streamWriter.droppedCount)) + ctx, cancel := context.WithCancel(context.Background()) + go func() { + <-ctx.Done() + httpPipe.Close() + }() + + // Create an ouput that gets flushed on every write + output := ioutils.NewWriteFlusher(resp) + + // Create a channel that decodes the results + errCh := make(chan HTTPCodedError, 2) + + // stream the response + go func() { + defer cancel() + + // Send the request + if err := encoder.Encode(args); err != nil { + errCh <- CodedError(500, err.Error()) + return + } + + for { + select { + case <-ctx.Done(): + errCh <- nil + return + default: + } + + var res cstructs.StreamErrWrapper + if err := decoder.Decode(&res); err != nil { + errCh <- CodedError(500, err.Error()) + return + } + decoder.Reset(httpPipe) + + if err := res.Error; err != nil { + if err.Code != nil { + errCh <- CodedError(int(*err.Code), err.Error()) + return + } + } + + if _, err := io.Copy(output, bytes.NewReader(res.Payload)); err != nil { + errCh <- CodedError(500, err.Error()) + return + } + } + }() + + handler(handlerPipe) + cancel() + codedErr := <-errCh + + if codedErr != nil && + (codedErr == io.EOF || + strings.Contains(codedErr.Error(), "closed") || + strings.Contains(codedErr.Error(), "EOF")) { + codedErr = nil + } + return nil, codedErr + } else { + // Create flusher for streaming + flusher, ok := resp.(http.Flusher) + if !ok { + return nil, CodedError(400, "Streaming not supported") + } + + streamWriter := newStreamWriter(512) + streamLog := log.New(&log.LoggerOptions{ + Level: log.LevelFromString(logLevel), + Output: streamWriter, + }) + s.agent.logger.RegisterSink(streamLog) + defer s.agent.logger.DeregisterSink(streamLog) + + notify := resp.(http.CloseNotifier).CloseNotify() + + // Send header so client can start streaming body + resp.WriteHeader(http.StatusOK) + // gziphanlder needs a byte to be written and flushed in order + // to tell gzip handler to ignore this response and not compress + resp.Write([]byte("\n")) + flusher.Flush() + + for { + select { + case <-notify: + s.agent.logger.DeregisterSink(streamLog) + if streamWriter.droppedCount > 0 { + s.agent.logger.Warn(fmt.Sprintf("Dropped %d logs during monitor request", streamWriter.droppedCount)) + } + return nil, nil + case log := <-streamWriter.logCh: + fmt.Fprintln(resp, log) + flusher.Flush() } - return nil, nil - case log := <-streamWriter.logCh: - fmt.Fprintln(resp, log) - flusher.Flush() } } + + return nil, nil } type streamWriter struct { diff --git a/command/agent/agent_endpoint_test.go b/command/agent/agent_endpoint_test.go index 9909e3162..50c4a0d25 100644 --- a/command/agent/agent_endpoint_test.go +++ b/command/agent/agent_endpoint_test.go @@ -255,7 +255,7 @@ func TestHTTP_AgentMonitor(t *testing.T) { httpTest(t, nil, func(s *TestAgent) { { - req, err := http.NewRequest("GET", "/v1/agent/monitor?loglevel=unkown", nil) + req, err := http.NewRequest("GET", "/v1/agent/monitor?loglevel=unknown", nil) require.Nil(t, err) resp := newClosableRecorder() @@ -301,6 +301,42 @@ func TestHTTP_AgentMonitor(t *testing.T) { require.Fail(t, err.Error()) }) } + + // stream logs for a given node + { + req, err := http.NewRequest("GET", "/v1/agent/monitor?loglevel=warn&nodeID="+s.client.NodeID(), nil) + require.Nil(t, err) + resp := newClosableRecorder() + defer resp.Close() + + go func() { + _, err = s.Server.AgentMonitor(resp, req) + require.NoError(t, err) + }() + + // send the same log a few times until monitor sink is + // fully set up + maxLogAttempts := 10 + tried := 0 + testutil.WaitForResult(func() (bool, error) { + if tried < maxLogAttempts { + s.Server.logger.Debug("log that should not be sent") + s.Server.logger.Warn("log that should be sent") + tried++ + } + + got := resp.Body.String() + want := "[WARN] http: log that should be sent" + if strings.Contains(got, want) { + require.NotContains(t, resp.Body.String(), "[DEBUG]") + return true, nil + } + + return false, fmt.Errorf("missing expected log, got: %v, want: %v", got, want) + }, func(err error) { + require.Fail(t, err.Error()) + }) + } }) } diff --git a/command/agent/command.go b/command/agent/command.go index c0a441e5e..af20fb0ff 100644 --- a/command/agent/command.go +++ b/command/agent/command.go @@ -373,8 +373,8 @@ func (c *Command) isValidConfig(config, cmdConfig *Config) bool { return true } -// setupLoggers is used to setup the logGate, logWriter, and our logOutput -func (c *Command) setupLoggers(config *Config) (*gatedwriter.Writer, *logWriter, io.Writer) { +// setupLoggers is used to setup the logGate, and our logOutput +func (c *Command) setupLoggers(config *Config) (*gatedwriter.Writer, io.Writer) { // Setup logging. First create the gated log writer, which will // store logs until we're ready to show them. Then create the level // filter, filtering logs of the specified level. @@ -390,19 +390,18 @@ func (c *Command) setupLoggers(config *Config) (*gatedwriter.Writer, *logWriter, c.Ui.Error(fmt.Sprintf( "Invalid log level: %s. Valid log levels are: %v", c.logFilter.MinLevel, c.logFilter.Levels)) - return nil, nil, nil + return nil, nil } // Create a log writer, and wrap a logOutput around it - logWriter := NewLogWriter(512) - writers := []io.Writer{c.logFilter, logWriter} + writers := []io.Writer{c.logFilter} // Check if syslog is enabled if config.EnableSyslog { l, err := gsyslog.NewLogger(gsyslog.LOG_NOTICE, config.SyslogFacility, "nomad") if err != nil { c.Ui.Error(fmt.Sprintf("Syslog setup failed: %v", err)) - return nil, nil, nil + return nil, nil } writers = append(writers, &SyslogWrapper{l, c.logFilter}) } @@ -422,7 +421,7 @@ func (c *Command) setupLoggers(config *Config) (*gatedwriter.Writer, *logWriter, duration, err := time.ParseDuration(config.LogRotateDuration) if err != nil { c.Ui.Error(fmt.Sprintf("Failed to parse log rotation duration: %v", err)) - return nil, nil, nil + return nil, nil } logRotateDuration = duration } else { @@ -444,7 +443,7 @@ func (c *Command) setupLoggers(config *Config) (*gatedwriter.Writer, *logWriter, c.logOutput = io.MultiWriter(writers...) log.SetOutput(c.logOutput) - return logGate, logWriter, c.logOutput + return logGate, c.logOutput } // setupAgent is used to start the agent and various interfaces @@ -597,7 +596,7 @@ func (c *Command) Run(args []string) int { } // Setup the log outputs - logGate, _, logOutput := c.setupLoggers(config) + logGate, logOutput := c.setupLoggers(config) if logGate == nil { return 1 } diff --git a/command/agent/http.go b/command/agent/http.go index 045a2fa49..1ee6540c4 100644 --- a/command/agent/http.go +++ b/command/agent/http.go @@ -108,7 +108,7 @@ func NewHTTPServer(agent *Agent, config *Config) (*HTTPServer, error) { // Handle requests with gzip compression // Use MinSize of 1 to allow a zero byte flush to return // response header used for streaming - gzip, err := gziphandler.GzipHandlerWithOpts(gziphandler.MinSize(1)) + gzip, err := gziphandler.GzipHandlerWithOpts(gziphandler.MinSize(0)) if err != nil { return nil, err } diff --git a/command/agent/log_writer.go b/command/agent/log_writer.go deleted file mode 100644 index ebb96878b..000000000 --- a/command/agent/log_writer.go +++ /dev/null @@ -1,83 +0,0 @@ -package agent - -import ( - "sync" -) - -// LogHandler interface is used for clients that want to subscribe -// to logs, for example to stream them over an IPC mechanism -type LogHandler interface { - HandleLog(string) -} - -// logWriter implements io.Writer so it can be used as a log sink. -// It maintains a circular buffer of logs, and a set of handlers to -// which it can stream the logs to. -type logWriter struct { - sync.Mutex - logs []string - index int - handlers map[LogHandler]struct{} -} - -// NewLogWriter creates a logWriter with the given buffer capacity -func NewLogWriter(buf int) *logWriter { - return &logWriter{ - logs: make([]string, buf), - index: 0, - handlers: make(map[LogHandler]struct{}), - } -} - -// RegisterHandler adds a log handler to receive logs, and sends -// the last buffered logs to the handler -func (l *logWriter) RegisterHandler(lh LogHandler) { - l.Lock() - defer l.Unlock() - - // Do nothing if already registered - if _, ok := l.handlers[lh]; ok { - return - } - - // Register - l.handlers[lh] = struct{}{} - - // Send the old logs - if l.logs[l.index] != "" { - for i := l.index; i < len(l.logs); i++ { - lh.HandleLog(l.logs[i]) - } - } - for i := 0; i < l.index; i++ { - lh.HandleLog(l.logs[i]) - } -} - -// DeregisterHandler removes a LogHandler and prevents more invocations -func (l *logWriter) DeregisterHandler(lh LogHandler) { - l.Lock() - defer l.Unlock() - delete(l.handlers, lh) -} - -// Write is used to accumulate new logs -func (l *logWriter) Write(p []byte) (n int, err error) { - l.Lock() - defer l.Unlock() - - // Strip off newlines at the end if there are any since we store - // individual log lines in the agent. - n = len(p) - if p[n-1] == '\n' { - p = p[:n-1] - } - - l.logs[l.index] = string(p) - l.index = (l.index + 1) % len(l.logs) - - for lh := range l.handlers { - lh.HandleLog(string(p)) - } - return -} diff --git a/command/agent/log_writer_test.go b/command/agent/log_writer_test.go deleted file mode 100644 index 19c23c573..000000000 --- a/command/agent/log_writer_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package agent - -import ( - "testing" -) - -type MockLogHandler struct { - logs []string -} - -func (m *MockLogHandler) HandleLog(l string) { - m.logs = append(m.logs, l) -} - -func TestLogWriter(t *testing.T) { - t.Parallel() - h := &MockLogHandler{} - w := NewLogWriter(4) - - // Write some logs - w.Write([]byte("one")) // Gets dropped! - w.Write([]byte("two")) - w.Write([]byte("three")) - w.Write([]byte("four")) - w.Write([]byte("five")) - - // Register a handler, sends old! - w.RegisterHandler(h) - - w.Write([]byte("six")) - w.Write([]byte("seven")) - - // Deregister - w.DeregisterHandler(h) - - w.Write([]byte("eight")) - w.Write([]byte("nine")) - - out := []string{ - "two", - "three", - "four", - "five", - "six", - "seven", - } - for idx := range out { - if out[idx] != h.logs[idx] { - t.Fatalf("mismatch %v", h.logs) - } - } -} diff --git a/command/agent_monitor.go b/command/agent_monitor.go index aeffc172e..1d0341c1e 100644 --- a/command/agent_monitor.go +++ b/command/agent_monitor.go @@ -6,6 +6,8 @@ import ( "os/signal" "strings" "syscall" + + "github.com/mitchellh/cli" ) type MonitorCommand struct { @@ -35,11 +37,19 @@ func (c *MonitorCommand) Synopsis() string { func (c *MonitorCommand) Name() string { return "monitor" } func (c *MonitorCommand) Run(args []string) int { - var logLevel string + c.Ui = &cli.PrefixedUi{ + OutputPrefix: " ", + InfoPrefix: " ", + ErrorPrefix: "==> ", + Ui: c.Ui, + } + var logLevel string + var nodeID string flags := c.Meta.FlagSet(c.Name(), FlagSetClient) flags.Usage = func() { c.Ui.Output(c.Help()) } flags.StringVar(&logLevel, "log-level", "", "") + flags.StringVar(&nodeID, "node-id", "", "") if err := flags.Parse(args); err != nil { return 1 @@ -53,7 +63,7 @@ func (c *MonitorCommand) Run(args []string) int { } eventDoneCh := make(chan struct{}) - logCh, err := client.Agent().Monitor(logLevel, eventDoneCh, nil) + logCh, err := client.Agent().Monitor(logLevel, nodeID, eventDoneCh, nil) if err != nil { c.Ui.Error(fmt.Sprintf("Error starting monitor: %s", err)) c.Ui.Error(commandErrorText(c)) diff --git a/vendor/vendor.json b/vendor/vendor.json index 275933b8b..95ef75246 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -20,7 +20,7 @@ {"path":"github.com/Microsoft/go-winio/pkg/guid","checksumSHA1":"/ykkyb7gmtZC68n7T24xwbmlCBc=","origin":"github.com/endocrimes/go-winio/pkg/guid","revision":"fb47a8b419480a700368c176bc1d5d7e3393b98d","revisionTime":"2019-06-20T17:03:19Z","version":"dani/safe-relisten","versionExact":"dani/safe-relisten"}, {"path":"github.com/NVIDIA/gpu-monitoring-tools","checksumSHA1":"kF1vk+8Xvb3nGBiw9+qbUc0SZ4M=","revision":"86f2a9fac6c5b597dc494420005144b8ef7ec9fb","revisionTime":"2018-08-29T22:20:09Z"}, {"path":"github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml","checksumSHA1":"P8FATSSgpe5A17FyPrGpsX95Xw8=","revision":"86f2a9fac6c5b597dc494420005144b8ef7ec9fb","revisionTime":"2018-08-29T22:20:09Z"}, - {"path":"github.com/NYTimes/gziphandler","checksumSHA1":"Ylaw7hBEShLk8L5U89e7l6OKWKo=","revision":"dd0439581c7657cb652dfe5c71d7d48baf39541d","revisionTime":"2019-02-21T23:16:47Z"}, + {"path":"github.com/NYTimes/gziphandler","checksumSHA1":"Ylaw7hBEShLk8L5U89e7l6OKWKo=","revision":"dd0439581c7657cb652dfe5c71d7d48baf39541d","revisionTime":"2019-02-21T23:16:47Z","version":"master","versionExact":"master"}, {"path":"github.com/Nvveen/Gotty","checksumSHA1":"Aqy8/FoAIidY/DeQ5oTYSZ4YFVc=","revision":"cd527374f1e5bff4938207604a14f2e38a9cf512","revisionTime":"2012-06-04T00:48:16Z"}, {"path":"github.com/StackExchange/wmi","checksumSHA1":"qtjd74+bErubh+qyv3s+lWmn9wc=","revision":"ea383cf3ba6ec950874b8486cd72356d007c768f","revisionTime":"2017-04-10T19:29:09Z"}, {"path":"github.com/agext/levenshtein","checksumSHA1":"jQh1fnoKPKMURvKkpdRjN695nAQ=","revision":"5f10fee965225ac1eecdc234c09daf5cd9e7f7b6","revisionTime":"2017-02-17T06:30:20Z"}, From 890b8a43fbb0a76421834e43abf079aa9063fec0 Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Tue, 15 Oct 2019 10:33:07 -0400 Subject: [PATCH 07/34] get local rpc endpoint working --- api/agent_test.go | 38 ++++++++++++++++++++++++++-- client/monitor_endpoint.go | 23 ++++++++--------- client/rpc.go | 1 - command/agent/agent_endpoint.go | 6 ++--- command/agent/agent_endpoint_test.go | 12 ++++++--- 5 files changed, 59 insertions(+), 21 deletions(-) diff --git a/api/agent_test.go b/api/agent_test.go index 080f9af42..a8ec1eb14 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/hashicorp/go-uuid" "github.com/hashicorp/nomad/api/internal/testutil" "github.com/stretchr/testify/assert" ) @@ -261,7 +262,7 @@ func TestAgent_Health(t *testing.T) { assert.True(health.Server.Ok) } -func TestAgent_Monitor(t *testing.T) { +func TestAgent_MonitorServer(t *testing.T) { t.Parallel() c, s := makeClient(t, nil, nil) defer s.Stop() @@ -269,7 +270,40 @@ func TestAgent_Monitor(t *testing.T) { agent := c.Agent() doneCh := make(chan struct{}) - logCh, err := agent.Monitor("debug", doneCh, nil) + logCh, err := agent.Monitor("debug", "", doneCh, nil) + defer close(doneCh) + if err != nil { + t.Fatalf("err: %v", err) + } + + // make a request to generate some logs + _, err = agent.Region() + require.NoError(t, err) + + // Wait for the first log message and validate it + for { + select { + case log := <-logCh: + if log == " " { + return + } + require.Contains(t, log, "[DEBUG]") + case <-time.After(10 * time.Second): + require.Fail(t, "failed to get a log message") + } + } +} +func TestAgent_MonitorWithNode(t *testing.T) { + t.Parallel() + c, s := makeClient(t, nil, nil) + defer s.Stop() + + agent := c.Agent() + id, _ := uuid.GenerateUUID() + + doneCh := make(chan struct{}) + // todo need to create or stub a nodeid? + logCh, err := agent.Monitor("debug", id, doneCh, nil) defer close(doneCh) if err != nil { t.Fatalf("err: %v", err) diff --git a/client/monitor_endpoint.go b/client/monitor_endpoint.go index 315714ec6..9fd6d9229 100644 --- a/client/monitor_endpoint.go +++ b/client/monitor_endpoint.go @@ -23,13 +23,13 @@ type Monitor struct { func NewMonitorEndpoint(c *Client) *Monitor { m := &Monitor{c: c} - m.c.streamingRpcs.Register("Client.Monitor", m.monitor) + m.c.streamingRpcs.Register("Agent.Monitor", m.monitor) return m } func (m *Monitor) monitor(conn io.ReadWriteCloser) { defer metrics.MeasureSince([]string{"client", "monitor", "monitor"}, time.Now()) - // defer conn.Close() + defer conn.Close() // Decode arguments var req cstructs.MonitorRequest @@ -62,8 +62,6 @@ func (m *Monitor) monitor(conn io.ReadWriteCloser) { return } - // var buf bytes.Buffer - ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -96,11 +94,12 @@ OUTER: select { case log := <-streamWriter.logCh: var resp cstructs.StreamErrWrapper + + resp.Payload = log if err := encoder.Encode(resp); err != nil { streamErr = err break OUTER } - resp.Payload = []byte(log) encoder.Reset(conn) case <-ctx.Done(): break OUTER @@ -117,7 +116,7 @@ OUTER: type streamWriter struct { sync.Mutex logs []string - logCh chan string + logCh chan []byte index int droppedCount int } @@ -125,7 +124,7 @@ type streamWriter struct { func newStreamWriter(buf int) *streamWriter { return &streamWriter{ logs: make([]string, buf), - logCh: make(chan string, buf), + logCh: make(chan []byte, buf), index: 0, } } @@ -136,16 +135,16 @@ func (d *streamWriter) Write(p []byte) (n int, err error) { // Strip off newlines at the end if there are any since we store // individual log lines in the agent. - n = len(p) - if p[n-1] == '\n' { - p = p[:n-1] - } + // n = len(p) + // if p[n-1] == '\n' { + // p = p[:n-1] + // } d.logs[d.index] = string(p) d.index = (d.index + 1) % len(d.logs) select { - case d.logCh <- string(p): + case d.logCh <- p: default: d.droppedCount++ } diff --git a/client/rpc.go b/client/rpc.go index 6a3b83717..35c3e92b8 100644 --- a/client/rpc.go +++ b/client/rpc.go @@ -236,7 +236,6 @@ func (c *Client) setupClientRpcServer(server *rpc.Server) { server.Register(c.endpoints.ClientStats) server.Register(c.endpoints.FileSystem) server.Register(c.endpoints.Allocations) - server.Register(c.endpoints.Monitor) } // rpcConnListener is a long lived function that listens for new connections diff --git a/command/agent/agent_endpoint.go b/command/agent/agent_endpoint.go index 46401fc61..e286e1f6f 100644 --- a/command/agent/agent_endpoint.go +++ b/command/agent/agent_endpoint.go @@ -195,11 +195,11 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) ( var handler structs.StreamingRpcHandler var handlerErr error if useLocalClient { - handler, handlerErr = s.agent.Client().StreamingRpcHandler("Client.Monitor") + handler, handlerErr = s.agent.Client().StreamingRpcHandler("Agent.Monitor") } else if useClientRPC { - handler, handlerErr = s.agent.Client().RemoteStreamingRpcHandler("Client.Monitor") + handler, handlerErr = s.agent.Client().RemoteStreamingRpcHandler("Agent.Monitor") } else if useServerRPC { - handler, handlerErr = s.agent.Server().StreamingRpcHandler("Client.Monitor") + handler, handlerErr = s.agent.Server().StreamingRpcHandler("Agent.Monitor") } else { handlerErr = CodedError(400, "No local Node and node_id not provided") } diff --git a/command/agent/agent_endpoint_test.go b/command/agent/agent_endpoint_test.go index 50c4a0d25..72be97b7e 100644 --- a/command/agent/agent_endpoint_test.go +++ b/command/agent/agent_endpoint_test.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/json" "fmt" + "io/ioutil" "net" "net/http" "net/http/httptest" @@ -318,21 +319,26 @@ func TestHTTP_AgentMonitor(t *testing.T) { // fully set up maxLogAttempts := 10 tried := 0 + out := "" testutil.WaitForResult(func() (bool, error) { if tried < maxLogAttempts { s.Server.logger.Debug("log that should not be sent") s.Server.logger.Warn("log that should be sent") tried++ } + output, err := ioutil.ReadAll(resp.Body) + if err != nil { + return false, err + } - got := resp.Body.String() + out += string(output) want := "[WARN] http: log that should be sent" - if strings.Contains(got, want) { + if strings.Contains(out, want) { require.NotContains(t, resp.Body.String(), "[DEBUG]") return true, nil } - return false, fmt.Errorf("missing expected log, got: %v, want: %v", got, want) + return false, fmt.Errorf("missing expected log, got: %v, want: %v", out, want) }, func(err error) { require.Fail(t, err.Error()) }) From 8095b4868a2379c629943e1d87f044c53ed92cff Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Tue, 15 Oct 2019 15:14:25 -0400 Subject: [PATCH 08/34] New monitor pkg for shared monitor functionality Adds new package that can be used by client and server RPC endpoints to facilitate monitoring based off of a logger clean up old code small comment about write rm old comment about minsize rename to Monitor Removes connection logic from monitor command Keep connection logic in endpoints, use a channel to send results from monitoring use new multisink logger and interfaces small test for dropped messages update go-hclogger and update sink/intercept logger interfaces --- client/client.go | 8 +- client/config/config.go | 2 +- client/monitor_endpoint.go | 87 ++----- client/monitor_endpoint_test.go | 95 +++++++ client/structs/structs.go | 3 + command/agent/agent.go | 8 +- command/agent/agent_endpoint.go | 245 ++++++------------ command/agent/command.go | 4 +- command/agent/http.go | 2 - command/agent/monitor/monitor.go | 72 +++++ command/agent/monitor/monitor_test.go | 62 +++++ command/agent/testagent.go | 2 +- command/assets/example-short.nomad | 2 +- command/assets/example.nomad | 2 +- dev/docker-clients/client.nomad | 2 +- e2e/consul/input/consul_example.nomad | 2 +- e2e/metrics/input/helloworld.nomad | 2 +- e2e/metrics/input/redis.nomad | 2 +- e2e/metrics/input/simpleweb.nomad | 2 +- e2e/prometheus/prometheus.nomad | 2 +- helper/testlog/testlog.go | 4 +- nomad/client_monitor_endpoint.go | 186 +++++++++++++ nomad/config.go | 2 +- nomad/server.go | 8 +- .../hashicorp/go-hclog/colorize_unix.go | 27 ++ .../hashicorp/go-hclog/colorize_windows.go | 33 +++ vendor/github.com/hashicorp/go-hclog/go.mod | 5 + vendor/github.com/hashicorp/go-hclog/go.sum | 10 + .../hashicorp/go-hclog/interceptlogger.go | 195 ++++++++++++++ .../hashicorp/go-hclog/intlogger.go | 137 +++++----- .../github.com/hashicorp/go-hclog/logger.go | 68 ++++- .../hashicorp/go-hclog/nulllogger.go | 4 + .../github.com/hashicorp/go-hclog/writer.go | 20 +- vendor/vendor.json | 2 +- 34 files changed, 969 insertions(+), 338 deletions(-) create mode 100644 client/monitor_endpoint_test.go create mode 100644 command/agent/monitor/monitor.go create mode 100644 command/agent/monitor/monitor_test.go create mode 100644 nomad/client_monitor_endpoint.go create mode 100644 vendor/github.com/hashicorp/go-hclog/colorize_unix.go create mode 100644 vendor/github.com/hashicorp/go-hclog/colorize_windows.go create mode 100644 vendor/github.com/hashicorp/go-hclog/interceptlogger.go diff --git a/client/client.go b/client/client.go index a1bf34c08..0b68e3836 100644 --- a/client/client.go +++ b/client/client.go @@ -163,8 +163,8 @@ type Client struct { configCopy *config.Config configLock sync.RWMutex - logger hclog.MultiSinkLogger - rpcLogger hclog.MultiSinkLogger + logger hclog.InterceptLogger + rpcLogger hclog.Logger connPool *pool.ConnPool @@ -304,7 +304,7 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulServic } // Create the logger - logger := cfg.Logger.ResetNamed("client").(hclog.MultiSinkLogger) + logger := cfg.Logger.ResetNamedIntercept("client") // Create the client c := &Client{ @@ -316,7 +316,7 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulServic tlsWrap: tlsWrap, streamingRpcs: structs.NewStreamingRpcRegistry(), logger: logger, - rpcLogger: logger.Named("rpc").(hclog.MultiSinkLogger), + rpcLogger: logger.Named("rpc"), allocs: make(map[string]AllocRunner), allocUpdates: make(chan *structs.Allocation, 64), shutdownCh: make(chan struct{}), diff --git a/client/config/config.go b/client/config/config.go index 0df3cac27..d9cabf8d8 100644 --- a/client/config/config.go +++ b/client/config/config.go @@ -81,7 +81,7 @@ type Config struct { LogOutput io.Writer // Logger provides a logger to thhe client - Logger log.MultiSinkLogger + Logger log.InterceptLogger // Region is the clients region Region string diff --git a/client/monitor_endpoint.go b/client/monitor_endpoint.go index 9fd6d9229..6d4aa4183 100644 --- a/client/monitor_endpoint.go +++ b/client/monitor_endpoint.go @@ -4,10 +4,11 @@ import ( "context" "errors" "io" - "sync" + "strings" "time" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/command/agent/monitor" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" "github.com/ugorji/go/codec" @@ -62,39 +63,36 @@ func (m *Monitor) monitor(conn io.ReadWriteCloser) { return } + stopCh := make(chan struct{}) ctx, cancel := context.WithCancel(context.Background()) + defer close(stopCh) defer cancel() - streamWriter := newStreamWriter(512) - - streamLog := log.New(&log.LoggerOptions{ - Level: logLevel, - Output: streamWriter, + monitor := monitor.New(512, m.c.logger, &log.LoggerOptions{ + Level: logLevel, + JSONFormat: false, }) - m.c.logger.RegisterSink(streamLog) - defer m.c.logger.DeregisterSink(streamLog) go func() { - for { - if _, err := conn.Read(nil); err != nil { - // One end of the pipe was explicitly closed, exit cleanly - cancel() - return - } - select { - case <-ctx.Done(): - return - } + if _, err := conn.Read(nil); err != nil { + close(stopCh) + cancel() + return + } + select { + case <-ctx.Done(): + return } }() + logCh := monitor.Start(stopCh) + var streamErr error OUTER: for { select { - case log := <-streamWriter.logCh: + case log := <-logCh: var resp cstructs.StreamErrWrapper - resp.Payload = log if err := encoder.Encode(resp); err != nil { streamErr = err @@ -107,46 +105,15 @@ OUTER: } if streamErr != nil { - handleStreamResultError(streamErr, helper.Int64ToPtr(500), encoder) + // Nothing to do as conn is closed + if streamErr == io.EOF || strings.Contains(streamErr.Error(), "closed") { + return + } + + // Attempt to send the error + encoder.Encode(&cstructs.StreamErrWrapper{ + Error: cstructs.NewRpcError(streamErr, helper.Int64ToPtr(500)), + }) return } - -} - -type streamWriter struct { - sync.Mutex - logs []string - logCh chan []byte - index int - droppedCount int -} - -func newStreamWriter(buf int) *streamWriter { - return &streamWriter{ - logs: make([]string, buf), - logCh: make(chan []byte, buf), - index: 0, - } -} - -func (d *streamWriter) Write(p []byte) (n int, err error) { - d.Lock() - defer d.Unlock() - - // Strip off newlines at the end if there are any since we store - // individual log lines in the agent. - // n = len(p) - // if p[n-1] == '\n' { - // p = p[:n-1] - // } - - d.logs[d.index] = string(p) - d.index = (d.index + 1) % len(d.logs) - - select { - case d.logCh <- p: - default: - d.droppedCount++ - } - return } diff --git a/client/monitor_endpoint_test.go b/client/monitor_endpoint_test.go new file mode 100644 index 000000000..ca95cf731 --- /dev/null +++ b/client/monitor_endpoint_test.go @@ -0,0 +1,95 @@ +package client + +import ( + "fmt" + "io" + "net" + "strings" + "testing" + "time" + + "github.com/hashicorp/nomad/client/config" + cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/nomad" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/testutil" + "github.com/stretchr/testify/require" + "github.com/ugorji/go/codec" +) + +func TestMonitor_Monitor(t *testing.T) { + t.Parallel() + require := require.New(t) + + // start server and client + s := nomad.TestServer(t, nil) + defer s.Shutdown() + testutil.WaitForLeader(t, s.RPC) + + c, cleanup := TestClient(t, func(c *config.Config) { + c.Servers = []string{s.GetConfig().RPCAddr.String()} + }) + defer cleanup() + + req := cstructs.MonitorRequest{ + LogLevel: "debug", + NodeID: c.NodeID(), + } + + handler, err := c.StreamingRpcHandler("Agent.Monitor") + require.Nil(err) + + // create pipe + p1, p2 := net.Pipe() + defer p1.Close() + defer p2.Close() + + errCh := make(chan error) + streamMsg := make(chan *cstructs.StreamErrWrapper) + + go handler(p2) + + // Start decoder + go func() { + decoder := codec.NewDecoder(p1, structs.MsgpackHandle) + for { + var msg cstructs.StreamErrWrapper + if err := decoder.Decode(&msg); err != nil { + if err == io.EOF || strings.Contains(err.Error(), "closed") { + return + } + errCh <- fmt.Errorf("error decoding: %v", err) + } + + streamMsg <- &msg + } + }() + + // send request + encoder := codec.NewEncoder(p1, structs.MsgpackHandle) + require.Nil(encoder.Encode(req)) + + timeout := time.After(1 * time.Second) + expected := "[DEBUG]" + received := "" + +OUTER: + for { + select { + case <-timeout: + t.Fatal("timeout waiting for logs") + case err := <-errCh: + t.Fatal(err) + case msg := <-streamMsg: + if msg.Error != nil { + t.Fatalf("Got error: %v", msg.Error.Error()) + } + + received += string(msg.Payload) + if strings.Contains(received, expected) { + require.Nil(p2.Close()) + break OUTER + } + } + } +} diff --git a/client/structs/structs.go b/client/structs/structs.go index 6a6206744..350e9de69 100644 --- a/client/structs/structs.go +++ b/client/structs/structs.go @@ -41,6 +41,9 @@ type MonitorRequest struct { // LogJSON specifies if log format should be unstructured or json LogJSON bool + // NodeID is the node we want to track the logs of + NodeID string + structs.QueryOptions } diff --git a/command/agent/agent.go b/command/agent/agent.go index b02e6a765..26f9e00a8 100644 --- a/command/agent/agent.go +++ b/command/agent/agent.go @@ -53,8 +53,8 @@ type Agent struct { config *Config configLock sync.Mutex - logger log.MultiSinkLogger - httpLogger log.MultiSinkLogger + logger log.InterceptLogger + httpLogger log.Logger logOutput io.Writer // consulService is Nomad's custom Consul client for managing services @@ -87,7 +87,7 @@ type Agent struct { } // NewAgent is used to create a new agent with the given configuration -func NewAgent(config *Config, logger log.MultiSinkLogger, logOutput io.Writer, inmem *metrics.InmemSink) (*Agent, error) { +func NewAgent(config *Config, logger log.InterceptLogger, logOutput io.Writer, inmem *metrics.InmemSink) (*Agent, error) { a := &Agent{ config: config, logOutput: logOutput, @@ -97,7 +97,7 @@ func NewAgent(config *Config, logger log.MultiSinkLogger, logOutput io.Writer, i // Create the loggers a.logger = logger - a.httpLogger = a.logger.ResetNamed("http").(log.MultiSinkLogger) + a.httpLogger = a.logger.ResetNamed("http") // Global logger should match internal logger as much as possible golog.SetFlags(golog.LstdFlags | golog.Lmicroseconds) diff --git a/command/agent/agent_endpoint.go b/command/agent/agent_endpoint.go index e286e1f6f..b99b62f60 100644 --- a/command/agent/agent_endpoint.go +++ b/command/agent/agent_endpoint.go @@ -10,7 +10,6 @@ import ( "net/http" "sort" "strings" - "sync" "github.com/docker/docker/pkg/ioutils" log "github.com/hashicorp/go-hclog" @@ -175,182 +174,102 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) ( return nil, CodedError(400, fmt.Sprintf("Unknown log level: %s", logLevel)) } - // START - // Determine if we are targeting a server or client nodeID := req.URL.Query().Get("nodeID") - if nodeID != "" { - // Build the request and parse the ACL token - args := cstructs.MonitorRequest{ - LogLevel: logLevel, - LogJSON: false, - } - s.parse(resp, req, &args.QueryOptions.Region, &args.QueryOptions) + // Build the request and parse the ACL token + args := cstructs.MonitorRequest{ + NodeID: nodeID, + LogLevel: logLevel, + LogJSON: false, + } + s.parse(resp, req, &args.QueryOptions.Region, &args.QueryOptions) - // Determine the handler to use - useLocalClient, useClientRPC, useServerRPC := s.rpcHandlerForNode(nodeID) + // Determine the handler to use + useLocalClient, useClientRPC, useServerRPC := s.rpcHandlerForNode(nodeID) - // Make the RPC - var handler structs.StreamingRpcHandler - var handlerErr error - if useLocalClient { - handler, handlerErr = s.agent.Client().StreamingRpcHandler("Agent.Monitor") - } else if useClientRPC { - handler, handlerErr = s.agent.Client().RemoteStreamingRpcHandler("Agent.Monitor") - } else if useServerRPC { - handler, handlerErr = s.agent.Server().StreamingRpcHandler("Agent.Monitor") - } else { - handlerErr = CodedError(400, "No local Node and node_id not provided") - } - - if handlerErr != nil { - return nil, CodedError(500, handlerErr.Error()) - } - httpPipe, handlerPipe := net.Pipe() - decoder := codec.NewDecoder(httpPipe, structs.MsgpackHandle) - encoder := codec.NewEncoder(httpPipe, structs.MsgpackHandle) - - ctx, cancel := context.WithCancel(context.Background()) - go func() { - <-ctx.Done() - httpPipe.Close() - }() - - // Create an ouput that gets flushed on every write - output := ioutils.NewWriteFlusher(resp) - - // Create a channel that decodes the results - errCh := make(chan HTTPCodedError, 2) - - // stream the response - go func() { - defer cancel() - - // Send the request - if err := encoder.Encode(args); err != nil { - errCh <- CodedError(500, err.Error()) - return - } - - for { - select { - case <-ctx.Done(): - errCh <- nil - return - default: - } - - var res cstructs.StreamErrWrapper - if err := decoder.Decode(&res); err != nil { - errCh <- CodedError(500, err.Error()) - return - } - decoder.Reset(httpPipe) - - if err := res.Error; err != nil { - if err.Code != nil { - errCh <- CodedError(int(*err.Code), err.Error()) - return - } - } - - if _, err := io.Copy(output, bytes.NewReader(res.Payload)); err != nil { - errCh <- CodedError(500, err.Error()) - return - } - } - }() - - handler(handlerPipe) - cancel() - codedErr := <-errCh - - if codedErr != nil && - (codedErr == io.EOF || - strings.Contains(codedErr.Error(), "closed") || - strings.Contains(codedErr.Error(), "EOF")) { - codedErr = nil - } - return nil, codedErr + // Make the RPC + var handler structs.StreamingRpcHandler + var handlerErr error + if useLocalClient { + handler, handlerErr = s.agent.Client().StreamingRpcHandler("Agent.Monitor") + } else if useClientRPC { + handler, handlerErr = s.agent.Client().RemoteStreamingRpcHandler("Agent.Monitor") + } else if useServerRPC { + handler, handlerErr = s.agent.Server().StreamingRpcHandler("Agent.Monitor") } else { - // Create flusher for streaming - flusher, ok := resp.(http.Flusher) - if !ok { - return nil, CodedError(400, "Streaming not supported") + handlerErr = CodedError(400, "No local Node and node_id not provided") + } + + if handlerErr != nil { + return nil, CodedError(500, handlerErr.Error()) + } + httpPipe, handlerPipe := net.Pipe() + decoder := codec.NewDecoder(httpPipe, structs.MsgpackHandle) + encoder := codec.NewEncoder(httpPipe, structs.MsgpackHandle) + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + <-ctx.Done() + httpPipe.Close() + }() + + // Create an ouput that gets flushed on every write + output := ioutils.NewWriteFlusher(resp) + + // Create a channel that decodes the results + errCh := make(chan HTTPCodedError, 2) + + // stream the response + go func() { + defer cancel() + + // Send the request + if err := encoder.Encode(args); err != nil { + errCh <- CodedError(500, err.Error()) + return } - streamWriter := newStreamWriter(512) - streamLog := log.New(&log.LoggerOptions{ - Level: log.LevelFromString(logLevel), - Output: streamWriter, - }) - s.agent.logger.RegisterSink(streamLog) - defer s.agent.logger.DeregisterSink(streamLog) - - notify := resp.(http.CloseNotifier).CloseNotify() - - // Send header so client can start streaming body - resp.WriteHeader(http.StatusOK) - // gziphanlder needs a byte to be written and flushed in order - // to tell gzip handler to ignore this response and not compress - resp.Write([]byte("\n")) - flusher.Flush() - for { select { - case <-notify: - s.agent.logger.DeregisterSink(streamLog) - if streamWriter.droppedCount > 0 { - s.agent.logger.Warn(fmt.Sprintf("Dropped %d logs during monitor request", streamWriter.droppedCount)) + case <-ctx.Done(): + errCh <- nil + return + default: + } + + var res cstructs.StreamErrWrapper + if err := decoder.Decode(&res); err != nil { + errCh <- CodedError(500, err.Error()) + return + } + decoder.Reset(httpPipe) + + if err := res.Error; err != nil { + if err.Code != nil { + errCh <- CodedError(int(*err.Code), err.Error()) + return } - return nil, nil - case log := <-streamWriter.logCh: - fmt.Fprintln(resp, log) - flusher.Flush() + } + + if _, err := io.Copy(output, bytes.NewReader(res.Payload)); err != nil { + errCh <- CodedError(500, err.Error()) + return } } + }() + + handler(handlerPipe) + cancel() + codedErr := <-errCh + + if codedErr != nil && + (codedErr == io.EOF || + strings.Contains(codedErr.Error(), "closed") || + strings.Contains(codedErr.Error(), "EOF")) { + codedErr = nil } - - return nil, nil -} - -type streamWriter struct { - sync.Mutex - logs []string - logCh chan string - index int - droppedCount int -} - -func newStreamWriter(buf int) *streamWriter { - return &streamWriter{ - logs: make([]string, buf), - logCh: make(chan string, buf), - index: 0, - } -} - -func (d *streamWriter) Write(p []byte) (n int, err error) { - d.Lock() - defer d.Unlock() - - // Strip off newlines at the end if there are any since we store - // individual log lines in the agent. - n = len(p) - if p[n-1] == '\n' { - p = p[:n-1] - } - - d.logs[d.index] = string(p) - d.index = (d.index + 1) % len(d.logs) - - select { - case d.logCh <- string(p): - default: - d.droppedCount++ - } - return + return nil, codedErr } func (s *HTTPServer) AgentForceLeaveRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { diff --git a/command/agent/command.go b/command/agent/command.go index af20fb0ff..c0567bda8 100644 --- a/command/agent/command.go +++ b/command/agent/command.go @@ -447,7 +447,7 @@ func (c *Command) setupLoggers(config *Config) (*gatedwriter.Writer, io.Writer) } // setupAgent is used to start the agent and various interfaces -func (c *Command) setupAgent(config *Config, logger hclog.MultiSinkLogger, logOutput io.Writer, inmem *metrics.InmemSink) error { +func (c *Command) setupAgent(config *Config, logger hclog.InterceptLogger, logOutput io.Writer, inmem *metrics.InmemSink) error { c.Ui.Output("Starting Nomad agent...") agent, err := NewAgent(config, logger, logOutput, inmem) if err != nil { @@ -602,7 +602,7 @@ func (c *Command) Run(args []string) int { } // Create logger - logger := hclog.NewMultiSink(&hclog.LoggerOptions{ + logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{ Name: "agent", Level: hclog.LevelFromString(config.LogLevel), Output: logOutput, diff --git a/command/agent/http.go b/command/agent/http.go index 1ee6540c4..756b9ea5a 100644 --- a/command/agent/http.go +++ b/command/agent/http.go @@ -106,8 +106,6 @@ func NewHTTPServer(agent *Agent, config *Config) (*HTTPServer, error) { srv.registerHandlers(config.EnableDebug) // Handle requests with gzip compression - // Use MinSize of 1 to allow a zero byte flush to return - // response header used for streaming gzip, err := gziphandler.GzipHandlerWithOpts(gziphandler.MinSize(0)) if err != nil { return nil, err diff --git a/command/agent/monitor/monitor.go b/command/agent/monitor/monitor.go new file mode 100644 index 000000000..7c56270b0 --- /dev/null +++ b/command/agent/monitor/monitor.go @@ -0,0 +1,72 @@ +package monitor + +import ( + "sync" + + log "github.com/hashicorp/go-hclog" +) + +type Monitor struct { + sync.Mutex + sink log.SinkAdapter + logger log.InterceptLogger + logCh chan []byte + index int + droppedCount int + bufSize int +} + +func New(buf int, logger log.InterceptLogger, opts *log.LoggerOptions) *Monitor { + sw := &Monitor{ + logger: logger, + logCh: make(chan []byte, buf), + index: 0, + bufSize: buf, + } + + opts.Output = sw + sink := log.NewSinkAdapter(opts) + sw.sink = sink + + return sw +} + +func (d *Monitor) Start(stopCh <-chan struct{}) <-chan []byte { + d.logger.RegisterSink(d.sink) + + logCh := make(chan []byte, d.bufSize) + go func() { + for { + select { + case log := <-d.logCh: + logCh <- log + case <-stopCh: + d.logger.DeregisterSink(d.sink) + close(d.logCh) + return + } + } + }() + + return logCh +} + +// Write attemps to send latest log to logCh +// it drops the log if channel is unavailable to receive +func (d *Monitor) Write(p []byte) (n int, err error) { + bytes := make([]byte, len(p)) + copy(bytes, p) + + select { + case d.logCh <- bytes: + default: + d.Lock() + defer d.Unlock() + d.droppedCount++ + if d.droppedCount > 10 { + d.logger.Warn("Monitor dropped %d logs during monitor request", d.droppedCount) + d.droppedCount = 0 + } + } + return +} diff --git a/command/agent/monitor/monitor_test.go b/command/agent/monitor/monitor_test.go new file mode 100644 index 000000000..21be76b47 --- /dev/null +++ b/command/agent/monitor/monitor_test.go @@ -0,0 +1,62 @@ +package monitor + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + log "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" +) + +func TestMonitor_Start(t *testing.T) { + t.Parallel() + + logger := log.NewInterceptLogger(&log.LoggerOptions{ + Level: log.Error, + }) + + m := New(512, logger, &log.LoggerOptions{ + Level: log.Debug, + }) + + closeCh := make(chan struct{}) + defer close(closeCh) + + logCh := m.Start(closeCh) + go func() { + for { + select { + case log := <-logCh: + require.Contains(t, string(log), "[DEBUG] test log") + case <-time.After(1 * time.Second): + t.Fatal("Expected to receive from log channel") + } + } + }() + logger.Debug("test log") +} + +func TestMonitor_DroppedMessages(t *testing.T) { + t.Parallel() + + logger := log.NewInterceptLogger(&log.LoggerOptions{ + Level: log.Warn, + }) + + m := New(5, logger, &log.LoggerOptions{ + Level: log.Debug, + }) + + doneCh := make(chan struct{}) + defer close(doneCh) + + m.Start(doneCh) + + for i := 0; i <= 6; i++ { + logger.Debug("test message") + } + + assert.Equal(t, 1, m.droppedCount) +} diff --git a/command/agent/testagent.go b/command/agent/testagent.go index 8b5bb6afd..1dc2e0d82 100644 --- a/command/agent/testagent.go +++ b/command/agent/testagent.go @@ -216,7 +216,7 @@ func (a *TestAgent) start() (*Agent, error) { return nil, fmt.Errorf("unable to set up in memory metrics needed for agent initialization") } - logger := hclog.NewMultiSink(&hclog.LoggerOptions{ + logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{ Name: "agent", Level: hclog.LevelFromString(a.Config.LogLevel), Output: a.LogOutput, diff --git a/command/assets/example-short.nomad b/command/assets/example-short.nomad index ae3de97d3..d450ff7a1 100644 --- a/command/assets/example-short.nomad +++ b/command/assets/example-short.nomad @@ -19,7 +19,7 @@ job "example" { network { mbits = 10 - port "db" {} + port "db" {} } } } diff --git a/command/assets/example.nomad b/command/assets/example.nomad index 8f8d5fffa..f78dc356b 100644 --- a/command/assets/example.nomad +++ b/command/assets/example.nomad @@ -316,7 +316,7 @@ job "example" { network { mbits = 10 - port "db" {} + port "db" {} } } # The "service" stanza instructs Nomad to register this task as a service diff --git a/dev/docker-clients/client.nomad b/dev/docker-clients/client.nomad index 4689df775..37248bfce 100644 --- a/dev/docker-clients/client.nomad +++ b/dev/docker-clients/client.nomad @@ -23,7 +23,7 @@ job "client" { network { mbits = 10 - port "http"{} + port "http" {} } } diff --git a/e2e/consul/input/consul_example.nomad b/e2e/consul/input/consul_example.nomad index 18b02be7c..24217b842 100644 --- a/e2e/consul/input/consul_example.nomad +++ b/e2e/consul/input/consul_example.nomad @@ -49,7 +49,7 @@ job "consul-example" { network { mbits = 10 - port "db" {} + port "db" {} } } diff --git a/e2e/metrics/input/helloworld.nomad b/e2e/metrics/input/helloworld.nomad index bd8cfb443..f8fed4ed8 100644 --- a/e2e/metrics/input/helloworld.nomad +++ b/e2e/metrics/input/helloworld.nomad @@ -29,7 +29,7 @@ job "hello" { network { mbits = 10 - port "web" {} + port "web" {} } } diff --git a/e2e/metrics/input/redis.nomad b/e2e/metrics/input/redis.nomad index 27d8a5d84..2fedaed87 100644 --- a/e2e/metrics/input/redis.nomad +++ b/e2e/metrics/input/redis.nomad @@ -39,7 +39,7 @@ job "redis" { network { mbits = 10 - port "db" {} + port "db" {} } } diff --git a/e2e/metrics/input/simpleweb.nomad b/e2e/metrics/input/simpleweb.nomad index 352f89bb0..92a20e1a3 100644 --- a/e2e/metrics/input/simpleweb.nomad +++ b/e2e/metrics/input/simpleweb.nomad @@ -28,7 +28,7 @@ job "nginx" { network { mbits = 1 - port "http"{} + port "http" {} } } diff --git a/e2e/prometheus/prometheus.nomad b/e2e/prometheus/prometheus.nomad index c32d45d33..85b645443 100644 --- a/e2e/prometheus/prometheus.nomad +++ b/e2e/prometheus/prometheus.nomad @@ -64,7 +64,7 @@ EOH resources { network { mbits = 10 - port "prometheus_ui"{} + port "prometheus_ui" {} } } diff --git a/helper/testlog/testlog.go b/helper/testlog/testlog.go index 625c2cf9d..7a343b50c 100644 --- a/helper/testlog/testlog.go +++ b/helper/testlog/testlog.go @@ -72,7 +72,7 @@ func Logger(t LogPrinter) *log.Logger { } //HCLogger returns a new test hc-logger. -func HCLogger(t LogPrinter) hclog.MultiSinkLogger { +func HCLogger(t LogPrinter) hclog.InterceptLogger { level := hclog.Trace envLogLevel := os.Getenv("NOMAD_TEST_LOG_LEVEL") if envLogLevel != "" { @@ -83,7 +83,7 @@ func HCLogger(t LogPrinter) hclog.MultiSinkLogger { Output: NewWriter(t), IncludeLocation: true, } - return hclog.NewMultiSink(opts) + return hclog.NewInterceptLogger(opts) } type prefixStdout struct { diff --git a/nomad/client_monitor_endpoint.go b/nomad/client_monitor_endpoint.go new file mode 100644 index 000000000..bbe93ae16 --- /dev/null +++ b/nomad/client_monitor_endpoint.go @@ -0,0 +1,186 @@ +package nomad + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "strings" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/acl" + cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/command/agent/monitor" + "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/nomad/structs" + + "github.com/ugorji/go/codec" +) + +type Monitor struct { + srv *Server +} + +func (m *Monitor) register() { + m.srv.streamingRpcs.Register("Agent.Monitor", m.monitor) +} + +func (m *Monitor) monitor(conn io.ReadWriteCloser) { + defer conn.Close() + + // Decode args + var args cstructs.MonitorRequest + decoder := codec.NewDecoder(conn, structs.MsgpackHandle) + encoder := codec.NewEncoder(conn, structs.MsgpackHandle) + + if err := decoder.Decode(&args); err != nil { + handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + return + } + + // Check node read permissions + if aclObj, err := m.srv.ResolveToken(args.AuthToken); err != nil { + handleStreamResultError(err, nil, encoder) + return + } else if aclObj != nil && !aclObj.AllowNsOp(args.Namespace, acl.NamespaceCapabilityReadFS) { + handleStreamResultError(structs.ErrPermissionDenied, nil, encoder) + return + } + + var logLevel log.Level + if args.LogLevel == "" { + logLevel = log.LevelFromString("INFO") + } else { + logLevel = log.LevelFromString(args.LogLevel) + } + + if logLevel == log.NoLevel { + handleStreamResultError(errors.New("Unknown log level"), helper.Int64ToPtr(400), encoder) + return + } + + // Targeting a client so forward the request + if args.NodeID != "" { + nodeID := args.NodeID + + snap, err := m.srv.State().Snapshot() + if err != nil { + handleStreamResultError(err, nil, encoder) + return + } + + node, err := snap.NodeByID(nil, nodeID) + if err != nil { + handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + return + } + + if node == nil { + err := fmt.Errorf("Unknown node %q", nodeID) + handleStreamResultError(err, helper.Int64ToPtr(400), encoder) + return + } + + if err := nodeSupportsRpc(node); err != nil { + handleStreamResultError(err, helper.Int64ToPtr(400), encoder) + return + } + + // Get the Connection to the client either by fowarding to another server + // or creating direct stream + var clientConn net.Conn + state, ok := m.srv.getNodeConn(nodeID) + if !ok { + // Determine the server that has a connection to the node + srv, err := m.srv.serverWithNodeConn(nodeID, m.srv.Region()) + if err != nil { + var code *int64 + if structs.IsErrNoNodeConn(err) { + code = helper.Int64ToPtr(404) + } + handleStreamResultError(err, code, encoder) + return + } + conn, err := m.srv.streamingRpc(srv, "Agent.Monitor") + if err != nil { + handleStreamResultError(err, nil, encoder) + return + } + + clientConn = conn + } else { + stream, err := NodeStreamingRpc(state.Session, "Agent.Monitor") + if err != nil { + handleStreamResultError(err, nil, encoder) + return + } + clientConn = stream + } + defer clientConn.Close() + + // Send the Request + outEncoder := codec.NewEncoder(clientConn, structs.MsgpackHandle) + if err := outEncoder.Encode(args); err != nil { + handleStreamResultError(err, nil, encoder) + return + } + + structs.Bridge(conn, clientConn) + return + } + + // NodeID was empty, so monitor this current server + stopCh := make(chan struct{}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + monitor := monitor.New(512, m.srv.logger, &log.LoggerOptions{ + Level: logLevel, + JSONFormat: false, + }) + + go func() { + if _, err := conn.Read(nil); err != nil { + close(stopCh) + cancel() + return + } + select { + case <-ctx.Done(): + return + } + }() + + logCh := monitor.Start(stopCh) + + var streamErr error +OUTER: + for { + select { + case log := <-logCh: + var resp cstructs.StreamErrWrapper + resp.Payload = log + if err := encoder.Encode(resp); err != nil { + streamErr = err + break OUTER + } + encoder.Reset(conn) + case <-ctx.Done(): + break OUTER + } + } + + if streamErr != nil { + // Nothing to do as conn is closed + if streamErr == io.EOF || strings.Contains(streamErr.Error(), "closed") { + return + } + + // Attempt to send the error + encoder.Encode(&cstructs.StreamErrWrapper{ + Error: cstructs.NewRpcError(streamErr, helper.Int64ToPtr(500)), + }) + return + } +} diff --git a/nomad/config.go b/nomad/config.go index cabf981ea..053408eca 100644 --- a/nomad/config.go +++ b/nomad/config.go @@ -76,7 +76,7 @@ type Config struct { LogOutput io.Writer // Logger is the logger used by the server. - Logger log.Logger + Logger log.InterceptLogger // ProtocolVersion is the protocol version to speak. This must be between // ProtocolVersionMin and ProtocolVersionMax. diff --git a/nomad/server.go b/nomad/server.go index 158d26a55..b6c1ef844 100644 --- a/nomad/server.go +++ b/nomad/server.go @@ -91,7 +91,7 @@ const ( type Server struct { config *Config - logger log.Logger + logger log.InterceptLogger // Connection pool to other Nomad servers connPool *pool.ConnPool @@ -252,6 +252,7 @@ type endpoints struct { // Client endpoints ClientStats *ClientStats FileSystem *FileSystem + Monitor *Monitor ClientAllocations *ClientAllocations } @@ -290,7 +291,7 @@ func NewServer(config *Config, consulCatalog consul.CatalogAPI) (*Server, error) } // Create the logger - logger := config.Logger.ResetNamed("nomad") + logger := config.Logger.ResetNamedIntercept("nomad") // Create the server s := &Server{ @@ -1044,6 +1045,9 @@ func (s *Server) setupRpcServer(server *rpc.Server, ctx *RPCContext) { // Streaming endpoints s.staticEndpoints.FileSystem = &FileSystem{srv: s, logger: s.logger.Named("client_fs")} s.staticEndpoints.FileSystem.register() + + s.staticEndpoints.Monitor = &Monitor{srv: s} + s.staticEndpoints.Monitor.register() } // Register the static handlers diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_unix.go b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go new file mode 100644 index 000000000..44aa9bf2c --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go @@ -0,0 +1,27 @@ +// +build !windows + +package hclog + +import ( + "github.com/mattn/go-isatty" +) + +// setColorization will mutate the values of this logger +// to approperately configure colorization options. It provides +// a wrapper to the output stream on Windows systems. +func (l *intLogger) setColorization(opts *LoggerOptions) { + switch opts.Color { + case ColorOff: + fallthrough + case ForceColor: + return + case AutoColor: + fi := l.checkWriterIsFile() + isUnixTerm := isatty.IsTerminal(fi.Fd()) + isCygwinTerm := isatty.IsCygwinTerminal(fi.Fd()) + isTerm := isUnixTerm || isCygwinTerm + if !isTerm { + l.writer.color = ColorOff + } + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_windows.go b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go new file mode 100644 index 000000000..23486b6d7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go @@ -0,0 +1,33 @@ +// +build windows + +package hclog + +import ( + "os" + + colorable "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +// setColorization will mutate the values of this logger +// to approperately configure colorization options. It provides +// a wrapper to the output stream on Windows systems. +func (l *intLogger) setColorization(opts *LoggerOptions) { + switch opts.Color { + case ColorOff: + return + case ForceColor: + fi := l.checkWriterIsFile() + l.writer.w = colorable.NewColorable(fi) + case AutoColor: + fi := l.checkWriterIsFile() + isUnixTerm := isatty.IsTerminal(os.Stdout.Fd()) + isCygwinTerm := isatty.IsCygwinTerminal(os.Stdout.Fd()) + isTerm := isUnixTerm || isCygwinTerm + if !isTerm { + l.writer.color = ColorOff + return + } + l.writer.w = colorable.NewColorable(fi) + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/go.mod b/vendor/github.com/hashicorp/go-hclog/go.mod index 0d079a654..b6698c083 100644 --- a/vendor/github.com/hashicorp/go-hclog/go.mod +++ b/vendor/github.com/hashicorp/go-hclog/go.mod @@ -2,6 +2,11 @@ module github.com/hashicorp/go-hclog require ( github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fatih/color v1.7.0 + github.com/mattn/go-colorable v0.1.4 + github.com/mattn/go-isatty v0.0.10 github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/testify v1.2.2 ) + +go 1.13 diff --git a/vendor/github.com/hashicorp/go-hclog/go.sum b/vendor/github.com/hashicorp/go-hclog/go.sum index e03ee77d9..9cee2196c 100644 --- a/vendor/github.com/hashicorp/go-hclog/go.sum +++ b/vendor/github.com/hashicorp/go-hclog/go.sum @@ -1,6 +1,16 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go new file mode 100644 index 000000000..a65f7be19 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go @@ -0,0 +1,195 @@ +package hclog + +import ( + "sync" + "sync/atomic" +) + +var _ Logger = &interceptLogger{} + +type interceptLogger struct { + Logger + + sync.Mutex + sinkCount *int32 + Sinks map[SinkAdapter]struct{} +} + +func NewInterceptLogger(opts *LoggerOptions) InterceptLogger { + intercept := &interceptLogger{ + Logger: New(opts), + sinkCount: new(int32), + Sinks: make(map[SinkAdapter]struct{}), + } + + atomic.StoreInt32(intercept.sinkCount, 0) + + return intercept +} + +// Emit the message and args at TRACE level to log and sinks +func (i *interceptLogger) Trace(msg string, args ...interface{}) { + i.Logger.Trace(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.Lock() + defer i.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Trace, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at DEBUG level to log and sinks +func (i *interceptLogger) Debug(msg string, args ...interface{}) { + i.Logger.Debug(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.Lock() + defer i.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Debug, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at INFO level to log and sinks +func (i *interceptLogger) Info(msg string, args ...interface{}) { + i.Logger.Info(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.Lock() + defer i.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Info, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at WARN level to log and sinks +func (i *interceptLogger) Warn(msg string, args ...interface{}) { + i.Logger.Warn(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.Lock() + defer i.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Warn, msg, i.retrieveImplied(args...)...) + } +} + +// Emit the message and args at ERROR level to log and sinks +func (i *interceptLogger) Error(msg string, args ...interface{}) { + i.Logger.Error(msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.Lock() + defer i.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), Error, msg, i.retrieveImplied(args...)...) + } +} + +func (i *interceptLogger) retrieveImplied(args ...interface{}) []interface{} { + top := i.Logger.ImpliedArgs() + + cp := make([]interface{}, len(top)+len(args)) + copy(cp, top) + copy(cp[len(top):], args) + + return cp +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +// Registered sinks will subscribe to these messages as well. +func (i *interceptLogger) Named(name string) Logger { + var sub interceptLogger + + sub = *i + + sub.Logger = i.Logger.Named(name) + + return &sub +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. Registered sinks will subscribe +// to these messages as well. +func (i *interceptLogger) ResetNamed(name string) Logger { + var sub interceptLogger + + sub = *i + + sub.Logger = i.Logger.ResetNamed(name) + + return &sub +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +// Registered sinks will subscribe to these messages as well. +func (i *interceptLogger) NamedIntercept(name string) InterceptLogger { + var sub interceptLogger + + sub = *i + + sub.Logger = i.Logger.Named(name) + + return &sub +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. Registered sinks will subscribe +// to these messages as well. +func (i *interceptLogger) ResetNamedIntercept(name string) InterceptLogger { + var sub interceptLogger + + sub = *i + + sub.Logger = i.Logger.ResetNamed(name) + + return &sub +} + +// Return a sub-Logger for which every emitted log message will contain +// the given key/value pairs. This is used to create a context specific +// Logger. +func (i *interceptLogger) With(args ...interface{}) Logger { + var sub interceptLogger + + sub = *i + + sub.Logger = i.Logger.With(args...) + + return &sub +} + +// RegisterSink attaches a SinkAdapter to interceptLoggers sinks. +func (i *interceptLogger) RegisterSink(sink SinkAdapter) { + i.Lock() + defer i.Unlock() + + i.Sinks[sink] = struct{}{} + + atomic.AddInt32(i.sinkCount, 1) +} + +// DeregisterSink removes a SinkAdapter from interceptLoggers sinks. +func (i *interceptLogger) DeregisterSink(sink SinkAdapter) { + i.Lock() + defer i.Unlock() + + delete(i.Sinks, sink) + + atomic.AddInt32(i.sinkCount, -1) +} diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go index a94c1424e..91b038738 100644 --- a/vendor/github.com/hashicorp/go-hclog/intlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "log" + "os" "reflect" "runtime" "sort" @@ -15,6 +16,8 @@ import ( "sync" "sync/atomic" "time" + + "github.com/fatih/color" ) // TimeFormat to use for logging. This is a version of RFC3339 that contains @@ -32,11 +35,18 @@ var ( Warn: "[WARN] ", Error: "[ERROR]", } + + _levelToColor = map[Level]*color.Color{ + Debug: color.New(color.FgHiWhite), + Trace: color.New(color.FgHiGreen), + Info: color.New(color.FgHiBlue), + Warn: color.New(color.FgHiYellow), + Error: color.New(color.FgHiRed), + } ) // Make sure that intLogger is a Logger var _ Logger = &intLogger{} -var _ MultiSinkLogger = &intLogger{} // intLogger is an internal logger implementation. Internal in that it is // defined entirely by this package. @@ -52,13 +62,21 @@ type intLogger struct { writer *writer level *int32 - sinks map[Logger]struct{} - implied []interface{} } // New returns a configured logger. func New(opts *LoggerOptions) Logger { + return newLogger(opts) +} + +// NewSinkAdapter returns a SinkAdapter with configured settings +// defined by LoggerOptions +func NewSinkAdapter(opts *LoggerOptions) SinkAdapter { + return newLogger(opts) +} + +func newLogger(opts *LoggerOptions) *intLogger { if opts == nil { opts = &LoggerOptions{} } @@ -84,11 +102,12 @@ func New(opts *LoggerOptions) Logger { name: opts.Name, timeFormat: TimeFormat, mutex: mutex, - writer: newWriter(output), + writer: newWriter(output, opts.Color), level: new(int32), - sinks: make(map[Logger]struct{}), } + l.setColorization(opts) + if opts.TimeFormat != "" { l.timeFormat = opts.TimeFormat } @@ -98,31 +117,10 @@ func New(opts *LoggerOptions) Logger { return l } -func NewMultiSink(opts *LoggerOptions) MultiSinkLogger { - return New(opts).(MultiSinkLogger) -} - -func (l *intLogger) RegisterSink(logger Logger) { - l.mutex.Lock() - defer l.mutex.Unlock() - - if _, ok := l.sinks[logger]; ok { - return - } - - l.sinks[logger] = struct{}{} -} - -func (l *intLogger) DeregisterSink(logger Logger) { - l.mutex.Lock() - defer l.mutex.Unlock() - delete(l.sinks, logger) -} - // Log a message and a set of key/value pairs if the given level is at // or more severe that the threshold configured in the Logger. -func (l *intLogger) Log(level Level, msg string, args ...interface{}) { - if level < Level(atomic.LoadInt32(l.level)) && len(l.sinks) == 0 { +func (l *intLogger) Log(name string, level Level, msg string, args ...interface{}) { + if level < Level(atomic.LoadInt32(l.level)) { return } @@ -131,36 +129,10 @@ func (l *intLogger) Log(level Level, msg string, args ...interface{}) { l.mutex.Lock() defer l.mutex.Unlock() - for lh := range l.sinks { - lh, ok := lh.(*intLogger) - if !ok { - continue - } - - if level < Level(atomic.LoadInt32(lh.level)) { - continue - } - - // Set the sink name to the name of the calling log - lh.name = l.name - - if lh.json { - lh.logJSON(t, level, msg, args...) - } else { - lh.log(t, level, msg, args...) - } - - lh.writer.Flush(level) - } - - if level < Level(atomic.LoadInt32(l.level)) { - return - } - if l.json { - l.logJSON(t, level, msg, args...) + l.logJSON(t, name, level, msg, args...) } else { - l.log(t, level, msg, args...) + l.log(t, name, level, msg, args...) } l.writer.Flush(level) @@ -196,7 +168,7 @@ func trimCallerPath(path string) string { } // Non-JSON logging format function -func (l *intLogger) log(t time.Time, level Level, msg string, args ...interface{}) { +func (l *intLogger) log(t time.Time, name string, level Level, msg string, args ...interface{}) { l.writer.WriteString(t.Format(l.timeFormat)) l.writer.WriteByte(' ') @@ -219,8 +191,8 @@ func (l *intLogger) log(t time.Time, level Level, msg string, args ...interface{ l.writer.WriteByte(' ') - if l.name != "" { - l.writer.WriteString(l.name) + if name != "" { + l.writer.WriteString(name) l.writer.WriteString(": ") } @@ -349,8 +321,8 @@ func (l *intLogger) renderSlice(v reflect.Value) string { } // JSON logging function -func (l *intLogger) logJSON(t time.Time, level Level, msg string, args ...interface{}) { - vals := l.jsonMapEntry(t, level, msg) +func (l *intLogger) logJSON(t time.Time, name string, level Level, msg string, args ...interface{}) { + vals := l.jsonMapEntry(t, name, level, msg) args = append(l.implied, args...) if args != nil && len(args) > 0 { @@ -392,7 +364,7 @@ func (l *intLogger) logJSON(t time.Time, level Level, msg string, args ...interf err := json.NewEncoder(l.writer).Encode(vals) if err != nil { if _, ok := err.(*json.UnsupportedTypeError); ok { - plainVal := l.jsonMapEntry(t, level, msg) + plainVal := l.jsonMapEntry(t, name, level, msg) plainVal["@warn"] = errJsonUnsupportedTypeMsg json.NewEncoder(l.writer).Encode(plainVal) @@ -400,7 +372,7 @@ func (l *intLogger) logJSON(t time.Time, level Level, msg string, args ...interf } } -func (l intLogger) jsonMapEntry(t time.Time, level Level, msg string) map[string]interface{} { +func (l intLogger) jsonMapEntry(t time.Time, name string, level Level, msg string) map[string]interface{} { vals := map[string]interface{}{ "@message": msg, "@timestamp": t.Format("2006-01-02T15:04:05.000000Z07:00"), @@ -424,8 +396,8 @@ func (l intLogger) jsonMapEntry(t time.Time, level Level, msg string) map[string vals["@level"] = levelStr - if l.name != "" { - vals["@module"] = l.name + if name != "" { + vals["@module"] = name } if l.caller { @@ -438,27 +410,27 @@ func (l intLogger) jsonMapEntry(t time.Time, level Level, msg string) map[string // Emit the message and args at DEBUG level func (l *intLogger) Debug(msg string, args ...interface{}) { - l.Log(Debug, msg, args...) + l.Log(l.Name(), Debug, msg, args...) } // Emit the message and args at TRACE level func (l *intLogger) Trace(msg string, args ...interface{}) { - l.Log(Trace, msg, args...) + l.Log(l.Name(), Trace, msg, args...) } // Emit the message and args at INFO level func (l *intLogger) Info(msg string, args ...interface{}) { - l.Log(Info, msg, args...) + l.Log(l.Name(), Info, msg, args...) } // Emit the message and args at WARN level func (l *intLogger) Warn(msg string, args ...interface{}) { - l.Log(Warn, msg, args...) + l.Log(l.Name(), Warn, msg, args...) } // Emit the message and args at ERROR level func (l *intLogger) Error(msg string, args ...interface{}) { - l.Log(Error, msg, args...) + l.Log(l.Name(), Error, msg, args...) } // Indicate that the logger would emit TRACE level logs @@ -576,3 +548,28 @@ func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { forceLevel: opts.ForceLevel, } } + +// checks if the underlying io.Writer is a file, and +// panics if not. For use by colorization. +func (l *intLogger) checkWriterIsFile() *os.File { + fi, ok := l.writer.w.(*os.File) + if !ok { + panic("Cannot enable coloring of non-file Writers") + } + return fi +} + +// Accept implements the SinkAdapter interface +func (i *intLogger) Accept(name string, level Level, msg string, args ...interface{}) { + i.Log(name, level, msg, args...) +} + +// ImpliedArgs returns the loggers implied args +func (i *intLogger) ImpliedArgs() []interface{} { + return i.implied +} + +// Name returns the loggers name +func (i *intLogger) Name() string { + return i.name +} diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go index a7774e923..81045cbfe 100644 --- a/vendor/github.com/hashicorp/go-hclog/logger.go +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -53,6 +53,21 @@ func Fmt(str string, args ...interface{}) Format { return append(Format{str}, args...) } +// ColorOption expresses how the output should be colored, if at all. +type ColorOption uint8 + +const ( + // ColorOff is the default coloration, and does not + // inject color codes into the io.Writer. + ColorOff ColorOption = iota + // AutoColor checks if the io.Writer is a tty, + // and if so enables coloring. + AutoColor + // ForceColor will enable coloring, regardless of whether + // the io.Writer is a tty or not. + ForceColor +) + // LevelFromString returns a Level type for the named log level, or "NoLevel" if // the level string is invalid. This facilitates setting the log level via // config or environment variable by name in a predictable way. @@ -111,9 +126,14 @@ type Logger interface { // Indicate if ERROR logs would be emitted. This and the other Is* guards IsError() bool + ImpliedArgs() []interface{} + // Creates a sublogger that will always have the given key/value pairs With(args ...interface{}) Logger + // Returns the Name of the logger + Name() string + // Create a logger that will prepend the name string on the front of all messages. // If the logger already has a name, the new value will be appended to the current // name. That way, a major subsystem can use this to decorate all it's own logs @@ -136,17 +156,6 @@ type Logger interface { StandardWriter(opts *StandardLoggerOptions) io.Writer } -// MultiSinkLogger describes the interface that allows a logger to -// write to multiple sub loggers which may be configured to have different -// level and writer settings. This is useful for monitor commands to allow -// for streaming of logs at a lower level than what is set for the parent logger -type MultiSinkLogger interface { - Logger - - RegisterSink(logger Logger) - DeregisterSink(logger Logger) -} - // StandardLoggerOptions can be used to configure a new standard logger. type StandardLoggerOptions struct { // Indicate that some minimal parsing should be done on strings to try @@ -184,4 +193,41 @@ type LoggerOptions struct { // The time format to use instead of the default TimeFormat string + + // Color the output. On Windows, colored logs are only avaiable for io.Writers that + // are concretely instances of *os.File. + Color ColorOption +} + +// InterceptLogger describes the interface for using a logger +// that can register different output sinks. +// This is useful for sending lower level log messages +// to a different output while keeping the root logger +// at a higher one. +type InterceptLogger interface { + // Logger is the root logger for an InterceptLogger + Logger + + // RegisterSink adds a SinkAdapter to the InterceptLogger + RegisterSink(sink SinkAdapter) + + // DeregisterSink removes a SinkAdapter from the InterceptLogger + DeregisterSink(sink SinkAdapter) + + // Create a interceptlogger that will prepend the name string on the front of all messages. + // If the logger already has a name, the new value will be appended to the current + // name. That way, a major subsystem can use this to decorate all it's own logs + // without losing context. + NamedIntercept(name string) InterceptLogger + + // Create a interceptlogger that will prepend the name string on the front of all messages. + // This sets the name of the logger to the value directly, unlike Named which honor + // the current name as well. + ResetNamedIntercept(name string) InterceptLogger +} + +// SinkAdapter describes the interface that must be implemented +// in order to Register a new sink to an InterceptLogger +type SinkAdapter interface { + Accept(name string, level Level, msg string, args ...interface{}) } diff --git a/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/vendor/github.com/hashicorp/go-hclog/nulllogger.go index 7ad6b351e..4abdd5583 100644 --- a/vendor/github.com/hashicorp/go-hclog/nulllogger.go +++ b/vendor/github.com/hashicorp/go-hclog/nulllogger.go @@ -35,8 +35,12 @@ func (l *nullLogger) IsWarn() bool { return false } func (l *nullLogger) IsError() bool { return false } +func (l *nullLogger) ImpliedArgs() []interface{} { return []interface{}{} } + func (l *nullLogger) With(args ...interface{}) Logger { return l } +func (l *nullLogger) Name() string { return "" } + func (l *nullLogger) Named(name string) Logger { return l } func (l *nullLogger) ResetNamed(name string) Logger { return l } diff --git a/vendor/github.com/hashicorp/go-hclog/writer.go b/vendor/github.com/hashicorp/go-hclog/writer.go index 7e8ec729d..421a1f06c 100644 --- a/vendor/github.com/hashicorp/go-hclog/writer.go +++ b/vendor/github.com/hashicorp/go-hclog/writer.go @@ -6,19 +6,27 @@ import ( ) type writer struct { - b bytes.Buffer - w io.Writer + b bytes.Buffer + w io.Writer + color ColorOption } -func newWriter(w io.Writer) *writer { - return &writer{w: w} +func newWriter(w io.Writer, color ColorOption) *writer { + return &writer{w: w, color: color} } func (w *writer) Flush(level Level) (err error) { + var unwritten = w.b.Bytes() + + if w.color != ColorOff { + color := _levelToColor[level] + unwritten = []byte(color.Sprintf("%s", unwritten)) + } + if lw, ok := w.w.(LevelWriter); ok { - _, err = lw.LevelWrite(level, w.b.Bytes()) + _, err = lw.LevelWrite(level, unwritten) } else { - _, err = w.w.Write(w.b.Bytes()) + _, err = w.w.Write(unwritten) } w.b.Reset() return err diff --git a/vendor/vendor.json b/vendor/vendor.json index 95ef75246..9a41d61e7 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -209,7 +209,7 @@ {"path":"github.com/hashicorp/go-envparse","checksumSHA1":"FKmqR4DC3nCXtnT9pe02z5CLNWo=","revision":"310ca1881b22af3522e3a8638c0b426629886196","revisionTime":"2018-01-19T21:58:41Z"}, {"path":"github.com/hashicorp/go-getter","checksumSHA1":"d4brua17AGQqMNtngK4xKOUwboY=","revision":"f5101da0117392c6e7960c934f05a2fd689a5b5f","revisionTime":"2019-08-22T19:45:07Z"}, {"path":"github.com/hashicorp/go-getter/helper/url","checksumSHA1":"9J+kDr29yDrwsdu2ULzewmqGjpA=","revision":"b345bfcec894fb7ff3fdf9b21baf2f56ea423d98","revisionTime":"2018-04-10T17:49:45Z"}, - {"path":"github.com/hashicorp/go-hclog","checksumSHA1":"uTAjKuGQr4/gpcgdEtTO+JhD/NY=","revision":"a4c7052ea48d1c284eca6ba6281910f0fd3b7b30","revisionTime":"2019-10-10T18:01:30Z","version":"f-multi-sink-logger","versionExact":"f-multi-sink-logger"}, + {"path":"github.com/hashicorp/go-hclog","checksumSHA1":"+gkR2S9qSVVy3gor4p0Z86RMgZI=","revision":"2bec91ee9db63c8546a105f89da3eb63037c5e6c","revisionTime":"2019-10-18T20:14:58Z","version":"f-multi-sink","versionExact":"f-multi-sink"}, {"path":"github.com/hashicorp/go-immutable-radix","checksumSHA1":"Cas2nprG6pWzf05A2F/OlnjUu2Y=","revision":"8aac2701530899b64bdea735a1de8da899815220","revisionTime":"2017-07-25T22:12:15Z"}, {"path":"github.com/hashicorp/go-memdb","checksumSHA1":"FMAvwDar2bQyYAW4XMFhAt0J5xA=","revision":"20ff6434c1cc49b80963d45bf5c6aa89c78d8d57","revisionTime":"2017-08-31T20:15:40Z"}, {"path":"github.com/hashicorp/go-msgpack/codec","checksumSHA1":"CKGYNUDKre3Z2g4hHNVfp5nTcfA=","revision":"23165f7bc3c2dda1891434ebb9da1511a7bafc1c","revisionTime":"2019-09-27T12:33:13Z","version":"upstream-08f7b40","versionExact":"upstream-08f7b40"}, From e7589301ea4f7d23aec1d35c36daefca2f36a02c Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Thu, 24 Oct 2019 12:47:46 -0400 Subject: [PATCH 09/34] enable json formatting, use queryoptions --- api/agent.go | 10 +--------- api/agent_test.go | 16 ++++++++++++++-- client/monitor_endpoint.go | 16 ++++++++-------- command/agent/agent_endpoint.go | 15 +++++++++++---- command/agent/agent_endpoint_test.go | 6 +++--- command/agent/monitor/monitor.go | 3 +++ command/agent_monitor.go | 16 +++++++++++++++- nomad/client_monitor_endpoint.go | 2 +- 8 files changed, 56 insertions(+), 28 deletions(-) diff --git a/api/agent.go b/api/agent.go index 1fa20a00e..6e381e4ba 100644 --- a/api/agent.go +++ b/api/agent.go @@ -240,20 +240,13 @@ func (a *Agent) Health() (*AgentHealthResponse, error) { // Monitor returns a channel which will receive streaming logs from the agent // Providing a non-nil stopCh can be used to close the connection and stop log streaming -func (a *Agent) Monitor(loglevel string, nodeID string, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) { +func (a *Agent) Monitor(stopCh <-chan struct{}, q *QueryOptions) (chan string, error) { r, err := a.client.newRequest("GET", "/v1/agent/monitor") if err != nil { return nil, err } r.setQueryOptions(q) - if loglevel != "" { - r.params.Add("loglevel", loglevel) - } - if nodeID != "" { - r.params.Add("nodeID", nodeID) - } - _, resp, err := requireOK(a.client.doRequest(r)) if err != nil { return nil, err @@ -267,7 +260,6 @@ func (a *Agent) Monitor(loglevel string, nodeID string, stopCh <-chan struct{}, for { select { case <-stopCh: - close(logCh) return default: } diff --git a/api/agent_test.go b/api/agent_test.go index a8ec1eb14..8c18cc2dc 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -270,7 +270,12 @@ func TestAgent_MonitorServer(t *testing.T) { agent := c.Agent() doneCh := make(chan struct{}) - logCh, err := agent.Monitor("debug", "", doneCh, nil) + q := &QueryOptions{ + Params: map[string]string{ + "log-level": "debug", + }, + } + logCh, err := agent.Monitor(doneCh, q) defer close(doneCh) if err != nil { t.Fatalf("err: %v", err) @@ -301,9 +306,16 @@ func TestAgent_MonitorWithNode(t *testing.T) { agent := c.Agent() id, _ := uuid.GenerateUUID() + q := &QueryOptions{ + Params: map[string]string{ + "log-level": "debug", + "node-id": id, + }, + } + doneCh := make(chan struct{}) // todo need to create or stub a nodeid? - logCh, err := agent.Monitor("debug", id, doneCh, nil) + logCh, err := agent.Monitor(doneCh, q) defer close(doneCh) if err != nil { t.Fatalf("err: %v", err) diff --git a/client/monitor_endpoint.go b/client/monitor_endpoint.go index 6d4aa4183..181bfad84 100644 --- a/client/monitor_endpoint.go +++ b/client/monitor_endpoint.go @@ -33,29 +33,29 @@ func (m *Monitor) monitor(conn io.ReadWriteCloser) { defer conn.Close() // Decode arguments - var req cstructs.MonitorRequest + var args cstructs.MonitorRequest decoder := codec.NewDecoder(conn, structs.MsgpackHandle) encoder := codec.NewEncoder(conn, structs.MsgpackHandle) - if err := decoder.Decode(&req); err != nil { + if err := decoder.Decode(&args); err != nil { handleStreamResultError(err, helper.Int64ToPtr(500), encoder) return } // Check acl - if aclObj, err := m.c.ResolveToken(req.QueryOptions.AuthToken); err != nil { + if aclObj, err := m.c.ResolveToken(args.QueryOptions.AuthToken); err != nil { handleStreamResultError(err, helper.Int64ToPtr(403), encoder) return - } else if aclObj != nil && !aclObj.AllowNsOp(req.Namespace, acl.NamespaceCapabilityReadFS) { + } else if aclObj != nil && !aclObj.AllowNsOp(args.Namespace, acl.NamespaceCapabilityReadFS) { handleStreamResultError(structs.ErrPermissionDenied, helper.Int64ToPtr(403), encoder) return } var logLevel log.Level - if req.LogLevel == "" { + if args.LogLevel == "" { logLevel = log.LevelFromString("INFO") } else { - logLevel = log.LevelFromString(req.LogLevel) + logLevel = log.LevelFromString(args.LogLevel) } if logLevel == log.NoLevel { @@ -69,13 +69,13 @@ func (m *Monitor) monitor(conn io.ReadWriteCloser) { defer cancel() monitor := monitor.New(512, m.c.logger, &log.LoggerOptions{ + JSONFormat: args.LogJSON, Level: logLevel, - JSONFormat: false, }) go func() { if _, err := conn.Read(nil); err != nil { - close(stopCh) + // One end of the pipe explicitly closed, exit cancel() return } diff --git a/command/agent/agent_endpoint.go b/command/agent/agent_endpoint.go index b99b62f60..5b92fe07d 100644 --- a/command/agent/agent_endpoint.go +++ b/command/agent/agent_endpoint.go @@ -9,6 +9,7 @@ import ( "net" "net/http" "sort" + "strconv" "strings" "github.com/docker/docker/pkg/ioutils" @@ -165,7 +166,7 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) ( } // Get the provided loglevel. - logLevel := req.URL.Query().Get("loglevel") + logLevel := req.URL.Query().Get("log-level") if logLevel == "" { logLevel = "INFO" } @@ -175,13 +176,19 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) ( } // Determine if we are targeting a server or client - nodeID := req.URL.Query().Get("nodeID") + nodeID := req.URL.Query().Get("node-id") + + logJSONStr := req.URL.Query().Get("log-json") + logJSON, err := strconv.ParseBool(logJSONStr) + if err != nil { + logJSON = false + } // Build the request and parse the ACL token args := cstructs.MonitorRequest{ NodeID: nodeID, LogLevel: logLevel, - LogJSON: false, + LogJSON: logJSON, } s.parse(resp, req, &args.QueryOptions.Region, &args.QueryOptions) @@ -208,7 +215,7 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) ( decoder := codec.NewDecoder(httpPipe, structs.MsgpackHandle) encoder := codec.NewEncoder(httpPipe, structs.MsgpackHandle) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(req.Context()) go func() { <-ctx.Done() httpPipe.Close() diff --git a/command/agent/agent_endpoint_test.go b/command/agent/agent_endpoint_test.go index 72be97b7e..9c4cadb5e 100644 --- a/command/agent/agent_endpoint_test.go +++ b/command/agent/agent_endpoint_test.go @@ -256,7 +256,7 @@ func TestHTTP_AgentMonitor(t *testing.T) { httpTest(t, nil, func(s *TestAgent) { { - req, err := http.NewRequest("GET", "/v1/agent/monitor?loglevel=unknown", nil) + req, err := http.NewRequest("GET", "/v1/agent/monitor?log-level=unknown", nil) require.Nil(t, err) resp := newClosableRecorder() @@ -269,7 +269,7 @@ func TestHTTP_AgentMonitor(t *testing.T) { // check for a specific log { - req, err := http.NewRequest("GET", "/v1/agent/monitor?loglevel=warn", nil) + req, err := http.NewRequest("GET", "/v1/agent/monitor?log-level=warn", nil) require.Nil(t, err) resp := newClosableRecorder() defer resp.Close() @@ -305,7 +305,7 @@ func TestHTTP_AgentMonitor(t *testing.T) { // stream logs for a given node { - req, err := http.NewRequest("GET", "/v1/agent/monitor?loglevel=warn&nodeID="+s.client.NodeID(), nil) + req, err := http.NewRequest("GET", "/v1/agent/monitor?log-level=warn&node-id="+s.client.NodeID(), nil) require.Nil(t, err) resp := newClosableRecorder() defer resp.Close() diff --git a/command/agent/monitor/monitor.go b/command/agent/monitor/monitor.go index 7c56270b0..db220a661 100644 --- a/command/agent/monitor/monitor.go +++ b/command/agent/monitor/monitor.go @@ -41,6 +41,9 @@ func (d *Monitor) Start(stopCh <-chan struct{}) <-chan []byte { case log := <-d.logCh: logCh <- log case <-stopCh: + d.Lock() + defer d.Unlock() + d.logger.DeregisterSink(d.sink) close(d.logCh) return diff --git a/command/agent_monitor.go b/command/agent_monitor.go index 1d0341c1e..daeb35040 100644 --- a/command/agent_monitor.go +++ b/command/agent_monitor.go @@ -4,9 +4,11 @@ import ( "fmt" "os" "os/signal" + "strconv" "strings" "syscall" + "github.com/hashicorp/nomad/api" "github.com/mitchellh/cli" ) @@ -46,10 +48,13 @@ func (c *MonitorCommand) Run(args []string) int { var logLevel string var nodeID string + var logJSON bool + flags := c.Meta.FlagSet(c.Name(), FlagSetClient) flags.Usage = func() { c.Ui.Output(c.Help()) } flags.StringVar(&logLevel, "log-level", "", "") flags.StringVar(&nodeID, "node-id", "", "") + flags.BoolVar(&logJSON, "log-json", false, "") if err := flags.Parse(args); err != nil { return 1 @@ -62,8 +67,17 @@ func (c *MonitorCommand) Run(args []string) int { return 1 } + params := map[string]string{ + "log-level": logLevel, + "node-id": nodeID, + "log-json": strconv.FormatBool(logJSON), + } + + query := &api.QueryOptions{ + Params: params, + } eventDoneCh := make(chan struct{}) - logCh, err := client.Agent().Monitor(logLevel, nodeID, eventDoneCh, nil) + logCh, err := client.Agent().Monitor(eventDoneCh, query) if err != nil { c.Ui.Error(fmt.Sprintf("Error starting monitor: %s", err)) c.Ui.Error(commandErrorText(c)) diff --git a/nomad/client_monitor_endpoint.go b/nomad/client_monitor_endpoint.go index bbe93ae16..6eedd75d1 100644 --- a/nomad/client_monitor_endpoint.go +++ b/nomad/client_monitor_endpoint.go @@ -137,7 +137,7 @@ func (m *Monitor) monitor(conn io.ReadWriteCloser) { monitor := monitor.New(512, m.srv.logger, &log.LoggerOptions{ Level: logLevel, - JSONFormat: false, + JSONFormat: args.LogJSON, }) go func() { From 2362008e74d9eb65265fb96a61e6e4262c3b5648 Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Thu, 24 Oct 2019 16:55:23 -0400 Subject: [PATCH 10/34] new hclog with standardlogger intercept --- api/agent_test.go | 3 ++- command/agent/agent_endpoint.go | 19 ++++++++++------- command/agent/monitor/monitor.go | 3 +++ nomad/server.go | 2 +- .../hashicorp/go-hclog/interceptlogger.go | 21 +++++++++++++++++++ .../github.com/hashicorp/go-hclog/logger.go | 7 +++++++ vendor/vendor.json | 2 +- 7 files changed, 47 insertions(+), 10 deletions(-) diff --git a/api/agent_test.go b/api/agent_test.go index 8c18cc2dc..bc22d374c 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -262,7 +262,7 @@ func TestAgent_Health(t *testing.T) { assert.True(health.Server.Ok) } -func TestAgent_MonitorServer(t *testing.T) { +func TestAgent_Monitor(t *testing.T) { t.Parallel() c, s := makeClient(t, nil, nil) defer s.Stop() @@ -275,6 +275,7 @@ func TestAgent_MonitorServer(t *testing.T) { "log-level": "debug", }, } + logCh, err := agent.Monitor(doneCh, q) defer close(doneCh) if err != nil { diff --git a/command/agent/agent_endpoint.go b/command/agent/agent_endpoint.go index 5b92fe07d..d5fe6a0df 100644 --- a/command/agent/agent_endpoint.go +++ b/command/agent/agent_endpoint.go @@ -198,14 +198,19 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) ( // Make the RPC var handler structs.StreamingRpcHandler var handlerErr error - if useLocalClient { - handler, handlerErr = s.agent.Client().StreamingRpcHandler("Agent.Monitor") - } else if useClientRPC { - handler, handlerErr = s.agent.Client().RemoteStreamingRpcHandler("Agent.Monitor") - } else if useServerRPC { - handler, handlerErr = s.agent.Server().StreamingRpcHandler("Agent.Monitor") + if nodeID != "" { + if useLocalClient { + handler, handlerErr = s.agent.Client().StreamingRpcHandler("Agent.Monitor") + } else if useClientRPC { + handler, handlerErr = s.agent.Client().RemoteStreamingRpcHandler("Agent.Monitor") + } else if useServerRPC { + handler, handlerErr = s.agent.Server().StreamingRpcHandler("Agent.Monitor") + } else { + handlerErr = CodedError(400, "No local Node and node_id not provided") + } } else { - handlerErr = CodedError(400, "No local Node and node_id not provided") + // No node id we want to monitor this server + handler, handlerErr = s.agent.Server().StreamingRpcHandler("Agent.Monitor") } if handlerErr != nil { diff --git a/command/agent/monitor/monitor.go b/command/agent/monitor/monitor.go index db220a661..d93c02b9e 100644 --- a/command/agent/monitor/monitor.go +++ b/command/agent/monitor/monitor.go @@ -57,6 +57,9 @@ func (d *Monitor) Start(stopCh <-chan struct{}) <-chan []byte { // Write attemps to send latest log to logCh // it drops the log if channel is unavailable to receive func (d *Monitor) Write(p []byte) (n int, err error) { + d.Lock() + defer d.Unlock() + bytes := make([]byte, len(p)) copy(bytes, p) diff --git a/nomad/server.go b/nomad/server.go index b6c1ef844..ac5edcf98 100644 --- a/nomad/server.go +++ b/nomad/server.go @@ -1276,7 +1276,7 @@ func (s *Server) setupSerf(conf *serf.Config, ch chan serf.Event, path string) ( if s.config.UpgradeVersion != "" { conf.Tags[AutopilotVersionTag] = s.config.UpgradeVersion } - logger := s.logger.StandardLogger(&log.StandardLoggerOptions{InferLevels: true}) + logger := s.logger.StandardLoggerIntercept(&log.StandardLoggerOptions{InferLevels: true}) conf.MemberlistConfig.Logger = logger conf.Logger = logger conf.MemberlistConfig.LogOutput = nil diff --git a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go index a65f7be19..68f31e42d 100644 --- a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go @@ -1,6 +1,8 @@ package hclog import ( + "io" + "log" "sync" "sync/atomic" ) @@ -193,3 +195,22 @@ func (i *interceptLogger) DeregisterSink(sink SinkAdapter) { atomic.AddInt32(i.sinkCount, -1) } + +// Create a *log.Logger that will send it's data through this Logger. This +// allows packages that expect to be using the standard library to log to +// actually use this logger, which will also send to any registered sinks. +func (l *interceptLogger) StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger { + if opts == nil { + opts = &StandardLoggerOptions{} + } + + return log.New(l.StandardWriterIntercept(opts), "", 0) +} + +func (l *interceptLogger) StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer { + return &stdlogAdapter{ + log: l, + inferLevels: opts.InferLevels, + forceLevel: opts.ForceLevel, + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go index 81045cbfe..48d608714 100644 --- a/vendor/github.com/hashicorp/go-hclog/logger.go +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -126,6 +126,7 @@ type Logger interface { // Indicate if ERROR logs would be emitted. This and the other Is* guards IsError() bool + // ImpliedArgs returns With key/value pairs ImpliedArgs() []interface{} // Creates a sublogger that will always have the given key/value pairs @@ -224,6 +225,12 @@ type InterceptLogger interface { // This sets the name of the logger to the value directly, unlike Named which honor // the current name as well. ResetNamedIntercept(name string) InterceptLogger + + // Return a value that conforms to the stdlib log.Logger interface + StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger + + // Return a value that conforms to io.Writer, which can be passed into log.SetOutput() + StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer } // SinkAdapter describes the interface that must be implemented diff --git a/vendor/vendor.json b/vendor/vendor.json index 9a41d61e7..32d11e9f3 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -209,7 +209,7 @@ {"path":"github.com/hashicorp/go-envparse","checksumSHA1":"FKmqR4DC3nCXtnT9pe02z5CLNWo=","revision":"310ca1881b22af3522e3a8638c0b426629886196","revisionTime":"2018-01-19T21:58:41Z"}, {"path":"github.com/hashicorp/go-getter","checksumSHA1":"d4brua17AGQqMNtngK4xKOUwboY=","revision":"f5101da0117392c6e7960c934f05a2fd689a5b5f","revisionTime":"2019-08-22T19:45:07Z"}, {"path":"github.com/hashicorp/go-getter/helper/url","checksumSHA1":"9J+kDr29yDrwsdu2ULzewmqGjpA=","revision":"b345bfcec894fb7ff3fdf9b21baf2f56ea423d98","revisionTime":"2018-04-10T17:49:45Z"}, - {"path":"github.com/hashicorp/go-hclog","checksumSHA1":"+gkR2S9qSVVy3gor4p0Z86RMgZI=","revision":"2bec91ee9db63c8546a105f89da3eb63037c5e6c","revisionTime":"2019-10-18T20:14:58Z","version":"f-multi-sink","versionExact":"f-multi-sink"}, + {"path":"github.com/hashicorp/go-hclog","checksumSHA1":"t9zfE6XfBVzoruZdfV3vDPtoIa8=","revision":"8a936e0aa8de099a865e59f9a20744221f93b469","revisionTime":"2019-10-24T20:40:26Z","version":"f-multi-sink","versionExact":"f-multi-sink"}, {"path":"github.com/hashicorp/go-immutable-radix","checksumSHA1":"Cas2nprG6pWzf05A2F/OlnjUu2Y=","revision":"8aac2701530899b64bdea735a1de8da899815220","revisionTime":"2017-07-25T22:12:15Z"}, {"path":"github.com/hashicorp/go-memdb","checksumSHA1":"FMAvwDar2bQyYAW4XMFhAt0J5xA=","revision":"20ff6434c1cc49b80963d45bf5c6aa89c78d8d57","revisionTime":"2017-08-31T20:15:40Z"}, {"path":"github.com/hashicorp/go-msgpack/codec","checksumSHA1":"CKGYNUDKre3Z2g4hHNVfp5nTcfA=","revision":"23165f7bc3c2dda1891434ebb9da1511a7bafc1c","revisionTime":"2019-09-27T12:33:13Z","version":"upstream-08f7b40","versionExact":"upstream-08f7b40"}, From 3c0082f13278668f45439ffad583e32bcb38306a Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Thu, 24 Oct 2019 16:58:12 -0400 Subject: [PATCH 11/34] use intercepting loggers for rpchandlers --- nomad/rpc.go | 4 ++-- nomad/server.go | 2 +- nomad/server_setup_oss.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/nomad/rpc.go b/nomad/rpc.go index 29dfe78c8..41d47fe73 100644 --- a/nomad/rpc.go +++ b/nomad/rpc.go @@ -54,11 +54,11 @@ type rpcHandler struct { } func newRpcHandler(s *Server) *rpcHandler { - logger := s.logger.Named("rpc") + logger := s.logger.NamedIntercept("rpc") return &rpcHandler{ Server: s, logger: logger, - gologger: logger.StandardLogger(&log.StandardLoggerOptions{InferLevels: true}), + gologger: logger.StandardLoggerIntercept(&log.StandardLoggerOptions{InferLevels: true}), } } diff --git a/nomad/server.go b/nomad/server.go index ac5edcf98..c7f195b23 100644 --- a/nomad/server.go +++ b/nomad/server.go @@ -1106,7 +1106,7 @@ func (s *Server) setupRaft() error { s.raftTransport = trans // Make sure we set the Logger. - logger := s.logger.StandardLogger(&log.StandardLoggerOptions{InferLevels: true}) + logger := s.logger.StandardLoggerIntercept(&log.StandardLoggerOptions{InferLevels: true}) s.config.RaftConfig.Logger = logger s.config.RaftConfig.LogOutput = nil diff --git a/nomad/server_setup_oss.go b/nomad/server_setup_oss.go index b73d1fa45..7436d9eb6 100644 --- a/nomad/server_setup_oss.go +++ b/nomad/server_setup_oss.go @@ -12,7 +12,7 @@ type EnterpriseState struct{} func (s *Server) setupEnterprise(config *Config) error { // Set up the OSS version of autopilot apDelegate := &AutopilotDelegate{s} - s.autopilot = autopilot.NewAutopilot(s.logger.StandardLogger(&log.StandardLoggerOptions{InferLevels: true}), apDelegate, config.AutopilotInterval, config.ServerHealthInterval) + s.autopilot = autopilot.NewAutopilot(s.logger.StandardLoggerIntercept(&log.StandardLoggerOptions{InferLevels: true}), apDelegate, config.AutopilotInterval, config.ServerHealthInterval) return nil } From 735530ca4f578ab05325c8f0da91a93f150d83ad Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Fri, 25 Oct 2019 10:32:20 -0400 Subject: [PATCH 12/34] client monitor endpoint tests --- api/agent_test.go | 68 ++++++--- nomad/client_monitor_endpoint.go | 3 +- nomad/client_monitor_endpoint_test.go | 189 ++++++++++++++++++++++++++ 3 files changed, 240 insertions(+), 20 deletions(-) create mode 100644 nomad/client_monitor_endpoint_test.go diff --git a/api/agent_test.go b/api/agent_test.go index bc22d374c..9d731c399 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -1,14 +1,16 @@ package api import ( + "fmt" "reflect" "sort" + "strings" "testing" "time" + "github.com/kr/pretty" "github.com/stretchr/testify/require" - "github.com/hashicorp/go-uuid" "github.com/hashicorp/nomad/api/internal/testutil" "github.com/stretchr/testify/assert" ) @@ -262,17 +264,47 @@ func TestAgent_Health(t *testing.T) { assert.True(health.Server.Ok) } -func TestAgent_Monitor(t *testing.T) { +func TestAgent_MonitorWithNode(t *testing.T) { t.Parallel() - c, s := makeClient(t, nil, nil) + rpcPort := 0 + c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) { + rpcPort = c.Ports.RPC + c.Client = &testutil.ClientConfig{ + Enabled: true, + } + }) defer s.Stop() + require.NoError(t, c.Agent().SetServers([]string{fmt.Sprintf("127.0.0.1:%d", rpcPort)})) + agent := c.Agent() + index := uint64(0) + var node *NodeListStub + // grab a node + testutil.WaitForResult(func() (bool, error) { + nodes, qm, err := c.Nodes().List(&QueryOptions{WaitIndex: index}) + if err != nil { + return false, err + } + index = qm.LastIndex + if len(nodes) != 1 { + return false, fmt.Errorf("expected 1 node but found: %s", pretty.Sprint(nodes)) + } + if nodes[0].Status != "ready" { + return false, fmt.Errorf("node not ready: %s", nodes[0].Status) + } + node = nodes[0] + return true, nil + }, func(err error) { + t.Fatalf("err: %v", err) + }) + doneCh := make(chan struct{}) q := &QueryOptions{ Params: map[string]string{ "log-level": "debug", + "node-id": node.ID, }, } @@ -283,34 +315,32 @@ func TestAgent_Monitor(t *testing.T) { } // make a request to generate some logs - _, err = agent.Region() + _, err = agent.NodeName() require.NoError(t, err) - // Wait for the first log message and validate it + // Wait for a log message +OUTER: for { select { case log := <-logCh: - if log == " " { - return + if strings.Contains(log, "[DEBUG]") { + break OUTER } - require.Contains(t, log, "[DEBUG]") - case <-time.After(10 * time.Second): - require.Fail(t, "failed to get a log message") + case <-time.After(2 * time.Second): + require.Fail(t, "failed to get a DEBUG log message") } } } -func TestAgent_MonitorWithNode(t *testing.T) { +func TestAgent_Monitor(t *testing.T) { t.Parallel() c, s := makeClient(t, nil, nil) defer s.Stop() agent := c.Agent() - id, _ := uuid.GenerateUUID() q := &QueryOptions{ Params: map[string]string{ "log-level": "debug", - "node-id": id, }, } @@ -326,16 +356,16 @@ func TestAgent_MonitorWithNode(t *testing.T) { _, err = agent.Region() require.NoError(t, err) - // Wait for the first log message and validate it + // Wait for a log message +OUTER: for { select { case log := <-logCh: - if log == " " { - return + if strings.Contains(log, "[DEBUG]") { + break OUTER } - require.Contains(t, log, "[DEBUG]") - case <-time.After(10 * time.Second): - require.Fail(t, "failed to get a log message") + case <-time.After(2 * time.Second): + require.Fail(t, "failed to get a DEBUG log message") } } } diff --git a/nomad/client_monitor_endpoint.go b/nomad/client_monitor_endpoint.go index 6eedd75d1..dada37069 100644 --- a/nomad/client_monitor_endpoint.go +++ b/nomad/client_monitor_endpoint.go @@ -133,6 +133,7 @@ func (m *Monitor) monitor(conn io.ReadWriteCloser) { // NodeID was empty, so monitor this current server stopCh := make(chan struct{}) ctx, cancel := context.WithCancel(context.Background()) + defer close(stopCh) defer cancel() monitor := monitor.New(512, m.srv.logger, &log.LoggerOptions{ @@ -142,7 +143,7 @@ func (m *Monitor) monitor(conn io.ReadWriteCloser) { go func() { if _, err := conn.Read(nil); err != nil { - close(stopCh) + // One end of the pipe closed, exit cancel() return } diff --git a/nomad/client_monitor_endpoint_test.go b/nomad/client_monitor_endpoint_test.go new file mode 100644 index 000000000..6045c9caa --- /dev/null +++ b/nomad/client_monitor_endpoint_test.go @@ -0,0 +1,189 @@ +package nomad + +import ( + "fmt" + "io" + "net" + "strings" + "testing" + "time" + + "github.com/hashicorp/nomad/client" + "github.com/hashicorp/nomad/client/config" + cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/testutil" + "github.com/stretchr/testify/require" + "github.com/ugorji/go/codec" +) + +func TestMonitor_Monitor_Remote_Server(t *testing.T) { + t.Parallel() + require := require.New(t) + + // start server and client + s1 := TestServer(t, nil) + defer s1.Shutdown() + s2 := TestServer(t, func(c *Config) { + c.DevDisableBootstrap = true + }) + defer s2.Shutdown() + TestJoin(t, s1, s2) + testutil.WaitForLeader(t, s1.RPC) + testutil.WaitForLeader(t, s2.RPC) + + c, cleanup := client.TestClient(t, func(c *config.Config) { + c.Servers = []string{s2.GetConfig().RPCAddr.String()} + }) + defer cleanup() + + testutil.WaitForResult(func() (bool, error) { + nodes := s2.connectedNodes() + return len(nodes) == 1, nil + }, func(err error) { + t.Fatalf("should have a clients") + }) + + // No node ID to monitor the remote server + req := cstructs.MonitorRequest{ + LogLevel: "debug", + NodeID: c.NodeID(), + } + + handler, err := s1.StreamingRpcHandler("Agent.Monitor") + require.Nil(err) + + // create pipe + p1, p2 := net.Pipe() + defer p1.Close() + defer p2.Close() + + errCh := make(chan error) + streamMsg := make(chan *cstructs.StreamErrWrapper) + + go handler(p2) + + // Start decoder + go func() { + decoder := codec.NewDecoder(p1, structs.MsgpackHandle) + for { + var msg cstructs.StreamErrWrapper + if err := decoder.Decode(&msg); err != nil { + if err == io.EOF || strings.Contains(err.Error(), "closed") { + return + } + errCh <- fmt.Errorf("error decoding: %v", err) + } + + streamMsg <- &msg + } + }() + + // send request + encoder := codec.NewEncoder(p1, structs.MsgpackHandle) + require.Nil(encoder.Encode(req)) + + timeout := time.After(1 * time.Second) + expected := "[DEBUG]" + received := "" + +OUTER: + for { + select { + case <-timeout: + t.Fatal("timeout waiting for logs") + case err := <-errCh: + t.Fatal(err) + case msg := <-streamMsg: + if msg.Error != nil { + t.Fatalf("Got error: %v", msg.Error.Error()) + } + + received += string(msg.Payload) + if strings.Contains(received, expected) { + require.Nil(p2.Close()) + break OUTER + } + } + } +} + +func TestMonitor_MonitorServer(t *testing.T) { + t.Parallel() + require := require.New(t) + + // start server and client + s := TestServer(t, nil) + defer s.Shutdown() + testutil.WaitForLeader(t, s.RPC) + + // No node ID to monitor the remote server + req := cstructs.MonitorRequest{ + LogLevel: "debug", + } + + handler, err := s.StreamingRpcHandler("Agent.Monitor") + require.Nil(err) + + // create pipe + p1, p2 := net.Pipe() + defer p1.Close() + defer p2.Close() + + errCh := make(chan error) + streamMsg := make(chan *cstructs.StreamErrWrapper) + + go handler(p2) + + // Start decoder + go func() { + decoder := codec.NewDecoder(p1, structs.MsgpackHandle) + for { + var msg cstructs.StreamErrWrapper + if err := decoder.Decode(&msg); err != nil { + if err == io.EOF || strings.Contains(err.Error(), "closed") { + return + } + errCh <- fmt.Errorf("error decoding: %v", err) + } + + streamMsg <- &msg + } + }() + + // send request + encoder := codec.NewEncoder(p1, structs.MsgpackHandle) + require.Nil(encoder.Encode(req)) + + timeout := time.After(1 * time.Second) + expected := "[DEBUG]" + received := "" + + // send logs + go func() { + for { + s.logger.Debug("test log") + time.Sleep(100 * time.Millisecond) + } + }() + +OUTER: + for { + select { + case <-timeout: + t.Fatal("timeout waiting for logs") + case err := <-errCh: + t.Fatal(err) + case msg := <-streamMsg: + if msg.Error != nil { + t.Fatalf("Got error: %v", msg.Error.Error()) + } + + received += string(msg.Payload) + if strings.Contains(received, expected) { + require.Nil(p2.Close()) + break OUTER + } + } + } +} From cd60628b318faa0e69922f9f372797ed2ce3ba56 Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Fri, 25 Oct 2019 10:51:18 -0400 Subject: [PATCH 13/34] rpc acl tests for both monitor endpoints --- api/agent_test.go | 1 - client/monitor_endpoint_test.go | 108 +++++++++++++++++++++++++ nomad/client_monitor_endpoint_test.go | 110 +++++++++++++++++++++++++- 3 files changed, 217 insertions(+), 2 deletions(-) diff --git a/api/agent_test.go b/api/agent_test.go index 9d731c399..3514b146f 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -345,7 +345,6 @@ func TestAgent_Monitor(t *testing.T) { } doneCh := make(chan struct{}) - // todo need to create or stub a nodeid? logCh, err := agent.Monitor(doneCh, q) defer close(doneCh) if err != nil { diff --git a/client/monitor_endpoint_test.go b/client/monitor_endpoint_test.go index ca95cf731..4b71d674e 100644 --- a/client/monitor_endpoint_test.go +++ b/client/monitor_endpoint_test.go @@ -8,9 +8,11 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/client/config" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/nomad" + "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" "github.com/stretchr/testify/require" @@ -93,3 +95,109 @@ OUTER: } } } + +func TestMonitor_Monitor_ACL(t *testing.T) { + t.Parallel() + require := require.New(t) + + // start server + s, root := nomad.TestACLServer(t, nil) + defer s.Shutdown() + testutil.WaitForLeader(t, s.RPC) + + policyBad := mock.NamespacePolicy("other", "", []string{acl.NamespaceCapabilityReadFS}) + tokenBad := mock.CreatePolicyAndToken(t, s.State(), 1005, "invalid", policyBad) + + policyGood := mock.NamespacePolicy(structs.DefaultNamespace, "", + []string{acl.NamespaceCapabilityReadLogs, acl.NamespaceCapabilityReadFS}) + tokenGood := mock.CreatePolicyAndToken(t, s.State(), 1009, "valid", policyGood) + + cases := []struct { + Name string + Token string + ExpectedErr string + }{ + { + Name: "bad token", + Token: tokenBad.SecretID, + ExpectedErr: structs.ErrPermissionDenied.Error(), + }, + { + Name: "good token", + Token: tokenGood.SecretID, + ExpectedErr: "Unknown log level", + }, + { + Name: "root token", + Token: root.SecretID, + ExpectedErr: "Unknown log level", + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + req := &cstructs.MonitorRequest{ + LogLevel: "unknown", + QueryOptions: structs.QueryOptions{ + Namespace: structs.DefaultNamespace, + Region: "global", + AuthToken: tc.Token, + }, + } + + handler, err := s.StreamingRpcHandler("Agent.Monitor") + require.Nil(err) + + // create pipe + p1, p2 := net.Pipe() + defer p1.Close() + defer p2.Close() + + errCh := make(chan error) + streamMsg := make(chan *cstructs.StreamErrWrapper) + + go handler(p2) + + // Start decoder + go func() { + decoder := codec.NewDecoder(p1, structs.MsgpackHandle) + for { + var msg cstructs.StreamErrWrapper + if err := decoder.Decode(&msg); err != nil { + if err == io.EOF || strings.Contains(err.Error(), "closed") { + return + } + errCh <- fmt.Errorf("error decoding: %v", err) + } + + streamMsg <- &msg + } + }() + + // send request + encoder := codec.NewEncoder(p1, structs.MsgpackHandle) + require.Nil(encoder.Encode(req)) + + timeout := time.After(5 * time.Second) + OUTER: + for { + select { + case <-timeout: + t.Fatal("timeout") + case err := <-errCh: + t.Fatal(err) + case msg := <-streamMsg: + if msg.Error == nil { + continue + } + + if strings.Contains(msg.Error.Error(), tc.ExpectedErr) { + break OUTER + } else { + t.Fatalf("Bad error: %v", msg.Error) + } + } + } + }) + } +} diff --git a/nomad/client_monitor_endpoint_test.go b/nomad/client_monitor_endpoint_test.go index 6045c9caa..27496f739 100644 --- a/nomad/client_monitor_endpoint_test.go +++ b/nomad/client_monitor_endpoint_test.go @@ -8,9 +8,11 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/client" "github.com/hashicorp/nomad/client/config" cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" "github.com/stretchr/testify/require" @@ -112,7 +114,7 @@ func TestMonitor_MonitorServer(t *testing.T) { t.Parallel() require := require.New(t) - // start server and client + // start server s := TestServer(t, nil) defer s.Shutdown() testutil.WaitForLeader(t, s.RPC) @@ -187,3 +189,109 @@ OUTER: } } } + +func TestMonitor_Monitor_ACL(t *testing.T) { + t.Parallel() + require := require.New(t) + + // start server + s, root := TestACLServer(t, nil) + defer s.Shutdown() + testutil.WaitForLeader(t, s.RPC) + + policyBad := mock.NamespacePolicy("other", "", []string{acl.NamespaceCapabilityReadFS}) + tokenBad := mock.CreatePolicyAndToken(t, s.State(), 1005, "invalid", policyBad) + + policyGood := mock.NamespacePolicy(structs.DefaultNamespace, "", + []string{acl.NamespaceCapabilityReadLogs, acl.NamespaceCapabilityReadFS}) + tokenGood := mock.CreatePolicyAndToken(t, s.State(), 1009, "valid", policyGood) + + cases := []struct { + Name string + Token string + ExpectedErr string + }{ + { + Name: "bad token", + Token: tokenBad.SecretID, + ExpectedErr: structs.ErrPermissionDenied.Error(), + }, + { + Name: "good token", + Token: tokenGood.SecretID, + ExpectedErr: "Unknown log level", + }, + { + Name: "root token", + Token: root.SecretID, + ExpectedErr: "Unknown log level", + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + req := &cstructs.MonitorRequest{ + LogLevel: "unknown", + QueryOptions: structs.QueryOptions{ + Namespace: structs.DefaultNamespace, + Region: "global", + AuthToken: tc.Token, + }, + } + + handler, err := s.StreamingRpcHandler("Agent.Monitor") + require.Nil(err) + + // create pipe + p1, p2 := net.Pipe() + defer p1.Close() + defer p2.Close() + + errCh := make(chan error) + streamMsg := make(chan *cstructs.StreamErrWrapper) + + go handler(p2) + + // Start decoder + go func() { + decoder := codec.NewDecoder(p1, structs.MsgpackHandle) + for { + var msg cstructs.StreamErrWrapper + if err := decoder.Decode(&msg); err != nil { + if err == io.EOF || strings.Contains(err.Error(), "closed") { + return + } + errCh <- fmt.Errorf("error decoding: %v", err) + } + + streamMsg <- &msg + } + }() + + // send request + encoder := codec.NewEncoder(p1, structs.MsgpackHandle) + require.Nil(encoder.Encode(req)) + + timeout := time.After(5 * time.Second) + OUTER: + for { + select { + case <-timeout: + t.Fatal("timeout") + case err := <-errCh: + t.Fatal(err) + case msg := <-streamMsg: + if msg.Error == nil { + continue + } + + if strings.Contains(msg.Error.Error(), tc.ExpectedErr) { + break OUTER + } else { + t.Fatalf("Bad error: %v", msg.Error) + } + } + } + }) + } +} From c8d60dd6f9f1532a879de324c4fea74556f35158 Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Fri, 25 Oct 2019 10:58:54 -0400 Subject: [PATCH 14/34] only look up rpchandler for node if we have nodeid fix some comments and nomad monitor -h output --- command/agent/agent_endpoint.go | 9 ++++----- command/agent/agent_endpoint_test.go | 7 ++----- command/agent/command.go | 1 - command/agent/testagent.go | 2 +- command/agent_monitor.go | 14 +++++++++++++- 5 files changed, 20 insertions(+), 13 deletions(-) diff --git a/command/agent/agent_endpoint.go b/command/agent/agent_endpoint.go index d5fe6a0df..044eadb16 100644 --- a/command/agent/agent_endpoint.go +++ b/command/agent/agent_endpoint.go @@ -192,13 +192,12 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) ( } s.parse(resp, req, &args.QueryOptions.Region, &args.QueryOptions) - // Determine the handler to use - useLocalClient, useClientRPC, useServerRPC := s.rpcHandlerForNode(nodeID) - // Make the RPC var handler structs.StreamingRpcHandler var handlerErr error if nodeID != "" { + // Determine the handler to use + useLocalClient, useClientRPC, useServerRPC := s.rpcHandlerForNode(nodeID) if useLocalClient { handler, handlerErr = s.agent.Client().StreamingRpcHandler("Agent.Monitor") } else if useClientRPC { @@ -229,10 +228,10 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) ( // Create an ouput that gets flushed on every write output := ioutils.NewWriteFlusher(resp) - // Create a channel that decodes the results + // create an error channel to handle errors errCh := make(chan HTTPCodedError, 2) - // stream the response + // stream response go func() { defer cancel() diff --git a/command/agent/agent_endpoint_test.go b/command/agent/agent_endpoint_test.go index 9c4cadb5e..0a4cb1f01 100644 --- a/command/agent/agent_endpoint_test.go +++ b/command/agent/agent_endpoint_test.go @@ -279,8 +279,7 @@ func TestHTTP_AgentMonitor(t *testing.T) { require.NoError(t, err) }() - // send the same log a few times until monitor sink is - // fully set up + // send the same log until monitor sink is set up maxLogAttempts := 10 tried := 0 testutil.WaitForResult(func() (bool, error) { @@ -315,8 +314,7 @@ func TestHTTP_AgentMonitor(t *testing.T) { require.NoError(t, err) }() - // send the same log a few times until monitor sink is - // fully set up + // send the same log until monitor sink is set up maxLogAttempts := 10 tried := 0 out := "" @@ -370,7 +368,6 @@ func TestHTTP_AgentForceLeave(t *testing.T) { httpTest(t, nil, func(s *TestAgent) { // Make the HTTP request req, err := http.NewRequest("PUT", "/v1/agent/force-leave?node=foo", nil) - require.Nil(t, err) if err != nil { t.Fatalf("err: %v", err) } diff --git a/command/agent/command.go b/command/agent/command.go index c0567bda8..da3e40e90 100644 --- a/command/agent/command.go +++ b/command/agent/command.go @@ -382,7 +382,6 @@ func (c *Command) setupLoggers(config *Config) (*gatedwriter.Writer, io.Writer) Writer: &cli.UiWriter{Ui: c.Ui}, } - // TODO can this be killed c.logFilter = LevelFilter() c.logFilter.MinLevel = logutils.LogLevel(strings.ToUpper(config.LogLevel)) c.logFilter.Writer = logGate diff --git a/command/agent/testagent.go b/command/agent/testagent.go index 1dc2e0d82..a36a46116 100644 --- a/command/agent/testagent.go +++ b/command/agent/testagent.go @@ -206,7 +206,7 @@ RETRY: func (a *TestAgent) start() (*Agent, error) { if a.LogOutput == nil { - a.LogOutput = io.MultiWriter(testlog.NewWriter(a.T)) + a.LogOutput = testlog.NewWriter(a.T) } inm := metrics.NewInmemSink(10*time.Second, time.Minute) diff --git a/command/agent_monitor.go b/command/agent_monitor.go index daeb35040..19e13abcb 100644 --- a/command/agent_monitor.go +++ b/command/agent_monitor.go @@ -28,7 +28,19 @@ Usage: nomad monitor [options] General Options: - ` + generalOptionsUsage() + ` + generalOptionsUsage() + ` + +Monitor Specific Options: + + -log-level + Sets the log level to monitor (default: INFO) + + -node-id + Sets the specific node to monitor + + -log-json + Sets log output to JSON format + ` return strings.TrimSpace(helpText) } From 92d6a30f86275ec4c3dfb2ed355417e582565b0f Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Fri, 25 Oct 2019 14:25:19 -0400 Subject: [PATCH 15/34] agent:read acl policy for monitor --- client/monitor_endpoint.go | 5 ++--- client/monitor_endpoint_test.go | 14 +++++++++----- command/agent/agent_endpoint.go | 2 +- command/agent/monitor/monitor.go | 2 +- nomad/client_monitor_endpoint.go | 5 ++--- nomad/client_monitor_endpoint_test.go | 3 +-- 6 files changed, 16 insertions(+), 15 deletions(-) diff --git a/client/monitor_endpoint.go b/client/monitor_endpoint.go index 181bfad84..b02a5c4b6 100644 --- a/client/monitor_endpoint.go +++ b/client/monitor_endpoint.go @@ -7,7 +7,6 @@ import ( "strings" "time" - "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/command/agent/monitor" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" @@ -43,10 +42,10 @@ func (m *Monitor) monitor(conn io.ReadWriteCloser) { } // Check acl - if aclObj, err := m.c.ResolveToken(args.QueryOptions.AuthToken); err != nil { + if aclObj, err := m.c.ResolveToken(args.AuthToken); err != nil { handleStreamResultError(err, helper.Int64ToPtr(403), encoder) return - } else if aclObj != nil && !aclObj.AllowNsOp(args.Namespace, acl.NamespaceCapabilityReadFS) { + } else if aclObj != nil && !aclObj.AllowAgentRead() { handleStreamResultError(structs.ErrPermissionDenied, helper.Int64ToPtr(403), encoder) return } diff --git a/client/monitor_endpoint_test.go b/client/monitor_endpoint_test.go index 4b71d674e..38acd6fe6 100644 --- a/client/monitor_endpoint_test.go +++ b/client/monitor_endpoint_test.go @@ -86,7 +86,6 @@ OUTER: if msg.Error != nil { t.Fatalf("Got error: %v", msg.Error.Error()) } - received += string(msg.Payload) if strings.Contains(received, expected) { require.Nil(p2.Close()) @@ -105,11 +104,16 @@ func TestMonitor_Monitor_ACL(t *testing.T) { defer s.Shutdown() testutil.WaitForLeader(t, s.RPC) - policyBad := mock.NamespacePolicy("other", "", []string{acl.NamespaceCapabilityReadFS}) + c, cleanup := TestClient(t, func(c *config.Config) { + c.ACLEnabled = true + c.Servers = []string{s.GetConfig().RPCAddr.String()} + }) + defer cleanup() + + policyBad := mock.NodePolicy(acl.PolicyDeny) tokenBad := mock.CreatePolicyAndToken(t, s.State(), 1005, "invalid", policyBad) - policyGood := mock.NamespacePolicy(structs.DefaultNamespace, "", - []string{acl.NamespaceCapabilityReadLogs, acl.NamespaceCapabilityReadFS}) + policyGood := mock.AgentPolicy(acl.PolicyRead) tokenGood := mock.CreatePolicyAndToken(t, s.State(), 1009, "valid", policyGood) cases := []struct { @@ -145,7 +149,7 @@ func TestMonitor_Monitor_ACL(t *testing.T) { }, } - handler, err := s.StreamingRpcHandler("Agent.Monitor") + handler, err := c.StreamingRpcHandler("Agent.Monitor") require.Nil(err) // create pipe diff --git a/command/agent/agent_endpoint.go b/command/agent/agent_endpoint.go index 044eadb16..c48feb50f 100644 --- a/command/agent/agent_endpoint.go +++ b/command/agent/agent_endpoint.go @@ -225,7 +225,7 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) ( httpPipe.Close() }() - // Create an ouput that gets flushed on every write + // Create an output that gets flushed on every write output := ioutils.NewWriteFlusher(resp) // create an error channel to handle errors diff --git a/command/agent/monitor/monitor.go b/command/agent/monitor/monitor.go index d93c02b9e..f648c8631 100644 --- a/command/agent/monitor/monitor.go +++ b/command/agent/monitor/monitor.go @@ -54,7 +54,7 @@ func (d *Monitor) Start(stopCh <-chan struct{}) <-chan []byte { return logCh } -// Write attemps to send latest log to logCh +// Write attempts to send latest log to logCh // it drops the log if channel is unavailable to receive func (d *Monitor) Write(p []byte) (n int, err error) { d.Lock() diff --git a/nomad/client_monitor_endpoint.go b/nomad/client_monitor_endpoint.go index dada37069..0f9a182ec 100644 --- a/nomad/client_monitor_endpoint.go +++ b/nomad/client_monitor_endpoint.go @@ -9,7 +9,6 @@ import ( "strings" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/nomad/acl" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/command/agent/monitor" "github.com/hashicorp/nomad/helper" @@ -43,8 +42,8 @@ func (m *Monitor) monitor(conn io.ReadWriteCloser) { if aclObj, err := m.srv.ResolveToken(args.AuthToken); err != nil { handleStreamResultError(err, nil, encoder) return - } else if aclObj != nil && !aclObj.AllowNsOp(args.Namespace, acl.NamespaceCapabilityReadFS) { - handleStreamResultError(structs.ErrPermissionDenied, nil, encoder) + } else if aclObj != nil && !aclObj.AllowAgentRead() { + handleStreamResultError(structs.ErrPermissionDenied, helper.Int64ToPtr(403), encoder) return } diff --git a/nomad/client_monitor_endpoint_test.go b/nomad/client_monitor_endpoint_test.go index 27496f739..27e4cb84f 100644 --- a/nomad/client_monitor_endpoint_test.go +++ b/nomad/client_monitor_endpoint_test.go @@ -202,8 +202,7 @@ func TestMonitor_Monitor_ACL(t *testing.T) { policyBad := mock.NamespacePolicy("other", "", []string{acl.NamespaceCapabilityReadFS}) tokenBad := mock.CreatePolicyAndToken(t, s.State(), 1005, "invalid", policyBad) - policyGood := mock.NamespacePolicy(structs.DefaultNamespace, "", - []string{acl.NamespaceCapabilityReadLogs, acl.NamespaceCapabilityReadFS}) + policyGood := mock.AgentPolicy(acl.PolicyRead) tokenGood := mock.CreatePolicyAndToken(t, s.State(), 1009, "valid", policyGood) cases := []struct { From 9bb606a0ae00f857229002268dd48b1b32a3dfed Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Mon, 28 Oct 2019 08:25:47 -0400 Subject: [PATCH 16/34] update go-hclog dep remove duplicate lock --- command/agent/monitor/monitor.go | 2 - command/agent/monitor/monitor_test.go | 4 +- .../github.com/NYTimes/gziphandler/README.md | 8 +- vendor/github.com/NYTimes/gziphandler/go.mod | 5 - vendor/github.com/NYTimes/gziphandler/go.sum | 7 - vendor/github.com/NYTimes/gziphandler/gzip.go | 225 ++++-------------- .../hashicorp/go-hclog/intlogger.go | 15 +- vendor/vendor.json | 4 +- 8 files changed, 70 insertions(+), 200 deletions(-) delete mode 100644 vendor/github.com/NYTimes/gziphandler/go.mod delete mode 100644 vendor/github.com/NYTimes/gziphandler/go.sum diff --git a/command/agent/monitor/monitor.go b/command/agent/monitor/monitor.go index f648c8631..1d063ae1d 100644 --- a/command/agent/monitor/monitor.go +++ b/command/agent/monitor/monitor.go @@ -66,8 +66,6 @@ func (d *Monitor) Write(p []byte) (n int, err error) { select { case d.logCh <- bytes: default: - d.Lock() - defer d.Unlock() d.droppedCount++ if d.droppedCount > 10 { d.logger.Warn("Monitor dropped %d logs during monitor request", d.droppedCount) diff --git a/command/agent/monitor/monitor_test.go b/command/agent/monitor/monitor_test.go index 21be76b47..b513db44e 100644 --- a/command/agent/monitor/monitor_test.go +++ b/command/agent/monitor/monitor_test.go @@ -54,9 +54,9 @@ func TestMonitor_DroppedMessages(t *testing.T) { m.Start(doneCh) - for i := 0; i <= 6; i++ { + for i := 0; i <= 9; i++ { logger.Debug("test message") } - assert.Equal(t, 1, m.droppedCount) + assert.Greater(t, m.droppedCount, 0) } diff --git a/vendor/github.com/NYTimes/gziphandler/README.md b/vendor/github.com/NYTimes/gziphandler/README.md index 6259acaca..6d7246070 100644 --- a/vendor/github.com/NYTimes/gziphandler/README.md +++ b/vendor/github.com/NYTimes/gziphandler/README.md @@ -6,10 +6,6 @@ response body, for clients which support it. Although it's usually simpler to leave that to a reverse proxy (like nginx or Varnish), this package is useful when that's undesirable. -## Install -```bash -go get -u github.com/NYTimes/gziphandler -``` ## Usage @@ -52,5 +48,5 @@ The docs can be found at [godoc.org][docs], as usual. -[docs]: https://godoc.org/github.com/NYTimes/gziphandler -[license]: https://github.com/NYTimes/gziphandler/blob/master/LICENSE +[docs]: https://godoc.org/github.com/nytimes/gziphandler +[license]: https://github.com/nytimes/gziphandler/blob/master/LICENSE.md diff --git a/vendor/github.com/NYTimes/gziphandler/go.mod b/vendor/github.com/NYTimes/gziphandler/go.mod deleted file mode 100644 index 801901274..000000000 --- a/vendor/github.com/NYTimes/gziphandler/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/NYTimes/gziphandler - -go 1.11 - -require github.com/stretchr/testify v1.3.0 diff --git a/vendor/github.com/NYTimes/gziphandler/go.sum b/vendor/github.com/NYTimes/gziphandler/go.sum deleted file mode 100644 index 4347755af..000000000 --- a/vendor/github.com/NYTimes/gziphandler/go.sum +++ /dev/null @@ -1,7 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/NYTimes/gziphandler/gzip.go b/vendor/github.com/NYTimes/gziphandler/gzip.go index c112bbdf8..b6af9115a 100644 --- a/vendor/github.com/NYTimes/gziphandler/gzip.go +++ b/vendor/github.com/NYTimes/gziphandler/gzip.go @@ -1,11 +1,10 @@ -package gziphandler // import "github.com/NYTimes/gziphandler" +package gziphandler import ( "bufio" "compress/gzip" "fmt" "io" - "mime" "net" "net/http" "strconv" @@ -29,11 +28,9 @@ const ( // The examples seem to indicate that it is. DefaultQValue = 1.0 - // DefaultMinSize is the default minimum size until we enable gzip compression. - // 1500 bytes is the MTU size for the internet since that is the largest size allowed at the network layer. - // If you take a file that is 1300 bytes and compress it to 800 bytes, it’s still transmitted in that same 1500 byte packet regardless, so you’ve gained nothing. - // That being the case, you should restrict the gzip compression to files with a size greater than a single packet, 1400 bytes (1.4KB) is a safe value. - DefaultMinSize = 1400 + // DefaultMinSize defines the minimum size to reach to enable compression. + // It's 512 bytes. + DefaultMinSize = 512 ) // gzipWriterPools stores a sync.Pool for each compression level for reuse of @@ -83,71 +80,44 @@ type GzipResponseWriter struct { minSize int // Specifed the minimum response size to gzip. If the response length is bigger than this value, it is compressed. buf []byte // Holds the first part of the write before reaching the minSize or the end of the write. - ignore bool // If true, then we immediately passthru writes to the underlying ResponseWriter. - contentTypes []parsedContentType // Only compress if the response is one of these content-types. All are accepted if empty. -} - -type GzipResponseWriterWithCloseNotify struct { - *GzipResponseWriter -} - -func (w GzipResponseWriterWithCloseNotify) CloseNotify() <-chan bool { - return w.ResponseWriter.(http.CloseNotifier).CloseNotify() + contentTypes []string // Only compress if the response is one of these content-types. All are accepted if empty. } // Write appends data to the gzip writer. func (w *GzipResponseWriter) Write(b []byte) (int, error) { - // GZIP responseWriter is initialized. Use the GZIP responseWriter. - if w.gw != nil { - return w.gw.Write(b) + // If content type is not set. + if _, ok := w.Header()[contentType]; !ok { + // It infer it from the uncompressed body. + w.Header().Set(contentType, http.DetectContentType(b)) } - // If we have already decided not to use GZIP, immediately passthrough. - if w.ignore { - return w.ResponseWriter.Write(b) + // GZIP responseWriter is initialized. Use the GZIP responseWriter. + if w.gw != nil { + n, err := w.gw.Write(b) + return n, err } // Save the write into a buffer for later use in GZIP responseWriter (if content is long enough) or at close with regular responseWriter. // On the first write, w.buf changes from nil to a valid slice w.buf = append(w.buf, b...) - var ( - cl, _ = strconv.Atoi(w.Header().Get(contentLength)) - ct = w.Header().Get(contentType) - ce = w.Header().Get(contentEncoding) - ) - // Only continue if they didn't already choose an encoding or a known unhandled content length or type. - if ce == "" && (cl == 0 || cl >= w.minSize) && (ct == "" || handleContentType(w.contentTypes, ct)) { - // If the current buffer is less than minSize and a Content-Length isn't set, then wait until we have more data. - if len(w.buf) < w.minSize && cl == 0 { - return len(b), nil - } - // If the Content-Length is larger than minSize or the current buffer is larger than minSize, then continue. - if cl >= w.minSize || len(w.buf) >= w.minSize { - // If a Content-Type wasn't specified, infer it from the current buffer. - if ct == "" { - ct = http.DetectContentType(w.buf) - w.Header().Set(contentType, ct) - } - // If the Content-Type is acceptable to GZIP, initialize the GZIP writer. - if handleContentType(w.contentTypes, ct) { - if err := w.startGzip(); err != nil { - return 0, err - } - return len(b), nil - } + // If the global writes are bigger than the minSize and we're about to write + // a response containing a content type we want to handle, enable + // compression. + if len(w.buf) >= w.minSize && handleContentType(w.contentTypes, w) && w.Header().Get(contentEncoding) == "" { + err := w.startGzip() + if err != nil { + return 0, err } } - // If we got here, we should not GZIP this response. - if err := w.startPlain(); err != nil { - return 0, err - } + return len(b), nil } -// startGzip initializes a GZIP writer and writes the buffer. +// startGzip initialize any GZIP specific informations. func (w *GzipResponseWriter) startGzip() error { + // Set the GZIP header. w.Header().Set(contentEncoding, "gzip") @@ -159,57 +129,28 @@ func (w *GzipResponseWriter) startGzip() error { // Write the header to gzip response. if w.code != 0 { w.ResponseWriter.WriteHeader(w.code) - // Ensure that no other WriteHeader's happen - w.code = 0 } - // Initialize and flush the buffer into the gzip response if there are any bytes. - // If there aren't any, we shouldn't initialize it yet because on Close it will - // write the gzip header even if nothing was ever written. - if len(w.buf) > 0 { - // Initialize the GZIP response. - w.init() - n, err := w.gw.Write(w.buf) + // Initialize the GZIP response. + w.init() - // This should never happen (per io.Writer docs), but if the write didn't - // accept the entire buffer but returned no specific error, we have no clue - // what's going on, so abort just to be safe. - if err == nil && n < len(w.buf) { - err = io.ErrShortWrite - } - return err - } - return nil -} + // Flush the buffer into the gzip response. + n, err := w.gw.Write(w.buf) -// startPlain writes to sent bytes and buffer the underlying ResponseWriter without gzip. -func (w *GzipResponseWriter) startPlain() error { - if w.code != 0 { - w.ResponseWriter.WriteHeader(w.code) - // Ensure that no other WriteHeader's happen - w.code = 0 - } - w.ignore = true - // If Write was never called then don't call Write on the underlying ResponseWriter. - if w.buf == nil { - return nil - } - n, err := w.ResponseWriter.Write(w.buf) - w.buf = nil // This should never happen (per io.Writer docs), but if the write didn't // accept the entire buffer but returned no specific error, we have no clue // what's going on, so abort just to be safe. if err == nil && n < len(w.buf) { - err = io.ErrShortWrite + return io.ErrShortWrite } + + w.buf = nil return err } // WriteHeader just saves the response code until close or GZIP effective writes. func (w *GzipResponseWriter) WriteHeader(code int) { - if w.code == 0 { - w.code = code - } + w.code = code } // init graps a new gzip writer from the gzipWriterPool and writes the correct @@ -224,18 +165,19 @@ func (w *GzipResponseWriter) init() { // Close will close the gzip.Writer and will put it back in the gzipWriterPool. func (w *GzipResponseWriter) Close() error { - if w.ignore { - return nil - } - if w.gw == nil { - // GZIP not triggered yet, write out regular response. - err := w.startPlain() - // Returns the error if any at write. - if err != nil { - err = fmt.Errorf("gziphandler: write to regular responseWriter at close gets error: %q", err.Error()) + // Gzip not trigged yet, write out regular response. + if w.code != 0 { + w.ResponseWriter.WriteHeader(w.code) } - return err + if w.buf != nil { + _, writeErr := w.ResponseWriter.Write(w.buf) + // Returns the error if any at write. + if writeErr != nil { + return fmt.Errorf("gziphandler: write to regular responseWriter at close gets error: %q", writeErr.Error()) + } + } + return nil } err := w.gw.Close() @@ -248,14 +190,6 @@ func (w *GzipResponseWriter) Close() error { // http.ResponseWriter if it is an http.Flusher. This makes GzipResponseWriter // an http.Flusher. func (w *GzipResponseWriter) Flush() { - if w.gw == nil && !w.ignore { - // Only flush once startGzip or startPlain has been called. - // - // Flush is thus a no-op until we're certain whether a plain - // or gzipped response will be served. - return - } - if w.gw != nil { w.gw.Flush() } @@ -322,6 +256,7 @@ func GzipHandlerWithOpts(opts ...option) (func(http.Handler) http.Handler, error return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Add(vary, acceptEncoding) + if acceptsGzip(r) { gw := &GzipResponseWriter{ ResponseWriter: w, @@ -331,13 +266,7 @@ func GzipHandlerWithOpts(opts ...option) (func(http.Handler) http.Handler, error } defer gw.Close() - if _, ok := w.(http.CloseNotifier); ok { - gwcn := GzipResponseWriterWithCloseNotify{gw} - h.ServeHTTP(gwcn, r) - } else { - h.ServeHTTP(gw, r) - } - + h.ServeHTTP(gw, r) } else { h.ServeHTTP(w, r) } @@ -345,40 +274,11 @@ func GzipHandlerWithOpts(opts ...option) (func(http.Handler) http.Handler, error }, nil } -// Parsed representation of one of the inputs to ContentTypes. -// See https://golang.org/pkg/mime/#ParseMediaType -type parsedContentType struct { - mediaType string - params map[string]string -} - -// equals returns whether this content type matches another content type. -func (pct parsedContentType) equals(mediaType string, params map[string]string) bool { - if pct.mediaType != mediaType { - return false - } - // if pct has no params, don't care about other's params - if len(pct.params) == 0 { - return true - } - - // if pct has any params, they must be identical to other's. - if len(pct.params) != len(params) { - return false - } - for k, v := range pct.params { - if w, ok := params[k]; !ok || v != w { - return false - } - } - return true -} - // Used for functional configuration. type config struct { minSize int level int - contentTypes []parsedContentType + contentTypes []string } func (c *config) validate() error { @@ -407,32 +307,11 @@ func CompressionLevel(level int) option { } } -// ContentTypes specifies a list of content types to compare -// the Content-Type header to before compressing. If none -// match, the response will be returned as-is. -// -// Content types are compared in a case-insensitive, whitespace-ignored -// manner. -// -// A MIME type without any other directive will match a content type -// that has the same MIME type, regardless of that content type's other -// directives. I.e., "text/html" will match both "text/html" and -// "text/html; charset=utf-8". -// -// A MIME type with any other directive will only match a content type -// that has the same MIME type and other directives. I.e., -// "text/html; charset=utf-8" will only match "text/html; charset=utf-8". -// -// By default, responses are gzipped regardless of -// Content-Type. func ContentTypes(types []string) option { return func(c *config) { - c.contentTypes = []parsedContentType{} + c.contentTypes = []string{} for _, v := range types { - mediaType, params, err := mime.ParseMediaType(v) - if err == nil { - c.contentTypes = append(c.contentTypes, parsedContentType{mediaType, params}) - } + c.contentTypes = append(c.contentTypes, strings.ToLower(v)) } } } @@ -453,19 +332,15 @@ func acceptsGzip(r *http.Request) bool { } // returns true if we've been configured to compress the specific content type. -func handleContentType(contentTypes []parsedContentType, ct string) bool { +func handleContentType(contentTypes []string, w http.ResponseWriter) bool { // If contentTypes is empty we handle all content types. if len(contentTypes) == 0 { return true } - mediaType, params, err := mime.ParseMediaType(ct) - if err != nil { - return false - } - + ct := strings.ToLower(w.Header().Get(contentType)) for _, c := range contentTypes { - if c.equals(mediaType, params) { + if c == ct { return true } } diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go index 91b038738..5882b8701 100644 --- a/vendor/github.com/hashicorp/go-hclog/intlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -9,6 +9,7 @@ import ( "log" "os" "reflect" + "regexp" "runtime" "sort" "strconv" @@ -167,6 +168,8 @@ func trimCallerPath(path string) string { return path[idx+1:] } +var logImplFile = regexp.MustCompile(`github.com/hashicorp/go-hclog/.+logger.go$`) + // Non-JSON logging format function func (l *intLogger) log(t time.Time, name string, level Level, msg string, args ...interface{}) { l.writer.WriteString(t.Format(l.timeFormat)) @@ -179,8 +182,18 @@ func (l *intLogger) log(t time.Time, name string, level Level, msg string, args l.writer.WriteString("[?????]") } + offset := 3 if l.caller { - if _, file, line, ok := runtime.Caller(3); ok { + // Check if the caller is inside our package and inside + // a logger implementation file + if _, file, _, ok := runtime.Caller(3); ok { + match := logImplFile.MatchString(file) + if match { + offset = 4 + } + } + + if _, file, line, ok := runtime.Caller(offset); ok { l.writer.WriteByte(' ') l.writer.WriteString(trimCallerPath(file)) l.writer.WriteByte(':') diff --git a/vendor/vendor.json b/vendor/vendor.json index 32d11e9f3..4f46cd656 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -20,7 +20,7 @@ {"path":"github.com/Microsoft/go-winio/pkg/guid","checksumSHA1":"/ykkyb7gmtZC68n7T24xwbmlCBc=","origin":"github.com/endocrimes/go-winio/pkg/guid","revision":"fb47a8b419480a700368c176bc1d5d7e3393b98d","revisionTime":"2019-06-20T17:03:19Z","version":"dani/safe-relisten","versionExact":"dani/safe-relisten"}, {"path":"github.com/NVIDIA/gpu-monitoring-tools","checksumSHA1":"kF1vk+8Xvb3nGBiw9+qbUc0SZ4M=","revision":"86f2a9fac6c5b597dc494420005144b8ef7ec9fb","revisionTime":"2018-08-29T22:20:09Z"}, {"path":"github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml","checksumSHA1":"P8FATSSgpe5A17FyPrGpsX95Xw8=","revision":"86f2a9fac6c5b597dc494420005144b8ef7ec9fb","revisionTime":"2018-08-29T22:20:09Z"}, - {"path":"github.com/NYTimes/gziphandler","checksumSHA1":"Ylaw7hBEShLk8L5U89e7l6OKWKo=","revision":"dd0439581c7657cb652dfe5c71d7d48baf39541d","revisionTime":"2019-02-21T23:16:47Z","version":"master","versionExact":"master"}, + {"path":"github.com/NYTimes/gziphandler","checksumSHA1":"jktW57+vJsziNVPeXMCoujTzdW4=","revision":"97ae7fbaf81620fe97840685304a78a306a39c64","revisionTime":"2017-09-16T00:36:49Z"}, {"path":"github.com/Nvveen/Gotty","checksumSHA1":"Aqy8/FoAIidY/DeQ5oTYSZ4YFVc=","revision":"cd527374f1e5bff4938207604a14f2e38a9cf512","revisionTime":"2012-06-04T00:48:16Z"}, {"path":"github.com/StackExchange/wmi","checksumSHA1":"qtjd74+bErubh+qyv3s+lWmn9wc=","revision":"ea383cf3ba6ec950874b8486cd72356d007c768f","revisionTime":"2017-04-10T19:29:09Z"}, {"path":"github.com/agext/levenshtein","checksumSHA1":"jQh1fnoKPKMURvKkpdRjN695nAQ=","revision":"5f10fee965225ac1eecdc234c09daf5cd9e7f7b6","revisionTime":"2017-02-17T06:30:20Z"}, @@ -209,7 +209,7 @@ {"path":"github.com/hashicorp/go-envparse","checksumSHA1":"FKmqR4DC3nCXtnT9pe02z5CLNWo=","revision":"310ca1881b22af3522e3a8638c0b426629886196","revisionTime":"2018-01-19T21:58:41Z"}, {"path":"github.com/hashicorp/go-getter","checksumSHA1":"d4brua17AGQqMNtngK4xKOUwboY=","revision":"f5101da0117392c6e7960c934f05a2fd689a5b5f","revisionTime":"2019-08-22T19:45:07Z"}, {"path":"github.com/hashicorp/go-getter/helper/url","checksumSHA1":"9J+kDr29yDrwsdu2ULzewmqGjpA=","revision":"b345bfcec894fb7ff3fdf9b21baf2f56ea423d98","revisionTime":"2018-04-10T17:49:45Z"}, - {"path":"github.com/hashicorp/go-hclog","checksumSHA1":"t9zfE6XfBVzoruZdfV3vDPtoIa8=","revision":"8a936e0aa8de099a865e59f9a20744221f93b469","revisionTime":"2019-10-24T20:40:26Z","version":"f-multi-sink","versionExact":"f-multi-sink"}, + {"path":"github.com/hashicorp/go-hclog","checksumSHA1":"CYpA7Nmx/oTmWKIXtvO0uRhIyGk=","revision":"234833755cb25ae46996d0ef823326f492f89243","revisionTime":"2019-10-25T21:19:05Z","version":"f-multi-sink","versionExact":"f-multi-sink"}, {"path":"github.com/hashicorp/go-immutable-radix","checksumSHA1":"Cas2nprG6pWzf05A2F/OlnjUu2Y=","revision":"8aac2701530899b64bdea735a1de8da899815220","revisionTime":"2017-07-25T22:12:15Z"}, {"path":"github.com/hashicorp/go-memdb","checksumSHA1":"FMAvwDar2bQyYAW4XMFhAt0J5xA=","revision":"20ff6434c1cc49b80963d45bf5c6aa89c78d8d57","revisionTime":"2017-08-31T20:15:40Z"}, {"path":"github.com/hashicorp/go-msgpack/codec","checksumSHA1":"CKGYNUDKre3Z2g4hHNVfp5nTcfA=","revision":"23165f7bc3c2dda1891434ebb9da1511a7bafc1c","revisionTime":"2019-09-27T12:33:13Z","version":"upstream-08f7b40","versionExact":"upstream-08f7b40"}, From a72bd5cf65c4816e2749836bcf2a0491c0f90596 Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Wed, 30 Oct 2019 09:02:29 -0400 Subject: [PATCH 17/34] use channel instead of empty string to determine close --- api/agent.go | 11 +++++------ command/agent_monitor.go | 4 ++-- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/api/agent.go b/api/agent.go index 6e381e4ba..b7e5e5a10 100644 --- a/api/agent.go +++ b/api/agent.go @@ -255,26 +255,25 @@ func (a *Agent) Monitor(stopCh <-chan struct{}, q *QueryOptions) (chan string, e logCh := make(chan string, 64) go func() { defer resp.Body.Close() - + defer close(logCh) scanner := bufio.NewScanner(resp.Body) + + LOOP: for { select { case <-stopCh: return default: } + if scanner.Scan() { - // An empty string signals to the caller that - // the scan is done, so make sure we only emit - // that when the scanner says it's done, not if - // we happen to ingest an empty line. if text := scanner.Text(); text != "" { logCh <- text } else { logCh <- " " } } else { - logCh <- "" + break LOOP } } }() diff --git a/command/agent_monitor.go b/command/agent_monitor.go index 19e13abcb..9b42562cd 100644 --- a/command/agent_monitor.go +++ b/command/agent_monitor.go @@ -101,8 +101,8 @@ func (c *MonitorCommand) Run(args []string) int { OUTER: for { select { - case log := <-logCh: - if log == "" { + case log, ok := <-logCh: + if !ok { break OUTER } c.Ui.Output(log) From 1176fc0227589510f48c5e702a0b109912f175c0 Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Wed, 30 Oct 2019 09:28:24 -0400 Subject: [PATCH 18/34] address feedback, use agent_endpoint instead of monitor --- api/agent_test.go | 15 +++++++++------ client/{monitor_endpoint.go => agent_endpoint.go} | 8 ++++---- ...or_endpoint_test.go => agent_endpoint_test.go} | 0 client/rpc.go | 2 +- ...nitor_endpoint.go => client_agent_endpoint.go} | 6 +++--- ...oint_test.go => client_agent_endpoint_test.go} | 0 nomad/server.go | 6 +++--- 7 files changed, 20 insertions(+), 17 deletions(-) rename client/{monitor_endpoint.go => agent_endpoint.go} (94%) rename client/{monitor_endpoint_test.go => agent_endpoint_test.go} (100%) rename nomad/{client_monitor_endpoint.go => client_agent_endpoint.go} (97%) rename nomad/{client_monitor_endpoint_test.go => client_agent_endpoint_test.go} (100%) diff --git a/api/agent_test.go b/api/agent_test.go index 3514b146f..f847b3ae1 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -264,6 +264,9 @@ func TestAgent_Health(t *testing.T) { assert.True(health.Server.Ok) } +// TestAgent_MonitorWithNode tests the Monitor endpoint +// passing in a log level and node ie, which tests monitor +// functionality for a specific client node func TestAgent_MonitorWithNode(t *testing.T) { t.Parallel() rpcPort := 0 @@ -309,10 +312,8 @@ func TestAgent_MonitorWithNode(t *testing.T) { } logCh, err := agent.Monitor(doneCh, q) + require.NoError(t, err) defer close(doneCh) - if err != nil { - t.Fatalf("err: %v", err) - } // make a request to generate some logs _, err = agent.NodeName() @@ -331,6 +332,10 @@ OUTER: } } } + +// TestAgent_Monitor tests the Monitor endpoint +// passing in only a log level, which tests the servers +// monitor functionality func TestAgent_Monitor(t *testing.T) { t.Parallel() c, s := makeClient(t, nil, nil) @@ -346,10 +351,8 @@ func TestAgent_Monitor(t *testing.T) { doneCh := make(chan struct{}) logCh, err := agent.Monitor(doneCh, q) + require.NoError(t, err) defer close(doneCh) - if err != nil { - t.Fatalf("err: %v", err) - } // make a request to generate some logs _, err = agent.Region() diff --git a/client/monitor_endpoint.go b/client/agent_endpoint.go similarity index 94% rename from client/monitor_endpoint.go rename to client/agent_endpoint.go index b02a5c4b6..e452837bd 100644 --- a/client/monitor_endpoint.go +++ b/client/agent_endpoint.go @@ -17,17 +17,17 @@ import ( cstructs "github.com/hashicorp/nomad/client/structs" ) -type Monitor struct { +type Agent struct { c *Client } -func NewMonitorEndpoint(c *Client) *Monitor { - m := &Monitor{c: c} +func NewMonitorEndpoint(c *Client) *Agent { + m := &Agent{c: c} m.c.streamingRpcs.Register("Agent.Monitor", m.monitor) return m } -func (m *Monitor) monitor(conn io.ReadWriteCloser) { +func (m *Agent) monitor(conn io.ReadWriteCloser) { defer metrics.MeasureSince([]string{"client", "monitor", "monitor"}, time.Now()) defer conn.Close() diff --git a/client/monitor_endpoint_test.go b/client/agent_endpoint_test.go similarity index 100% rename from client/monitor_endpoint_test.go rename to client/agent_endpoint_test.go diff --git a/client/rpc.go b/client/rpc.go index 35c3e92b8..6d15d831e 100644 --- a/client/rpc.go +++ b/client/rpc.go @@ -23,7 +23,7 @@ type rpcEndpoints struct { ClientStats *ClientStats FileSystem *FileSystem Allocations *Allocations - Monitor *Monitor + Monitor *Agent } // ClientRPC is used to make a local, client only RPC call diff --git a/nomad/client_monitor_endpoint.go b/nomad/client_agent_endpoint.go similarity index 97% rename from nomad/client_monitor_endpoint.go rename to nomad/client_agent_endpoint.go index 0f9a182ec..12a0d0857 100644 --- a/nomad/client_monitor_endpoint.go +++ b/nomad/client_agent_endpoint.go @@ -17,15 +17,15 @@ import ( "github.com/ugorji/go/codec" ) -type Monitor struct { +type Agent struct { srv *Server } -func (m *Monitor) register() { +func (m *Agent) register() { m.srv.streamingRpcs.Register("Agent.Monitor", m.monitor) } -func (m *Monitor) monitor(conn io.ReadWriteCloser) { +func (m *Agent) monitor(conn io.ReadWriteCloser) { defer conn.Close() // Decode args diff --git a/nomad/client_monitor_endpoint_test.go b/nomad/client_agent_endpoint_test.go similarity index 100% rename from nomad/client_monitor_endpoint_test.go rename to nomad/client_agent_endpoint_test.go diff --git a/nomad/server.go b/nomad/server.go index c7f195b23..e6c00c0e7 100644 --- a/nomad/server.go +++ b/nomad/server.go @@ -252,7 +252,7 @@ type endpoints struct { // Client endpoints ClientStats *ClientStats FileSystem *FileSystem - Monitor *Monitor + Agent *Agent ClientAllocations *ClientAllocations } @@ -1046,8 +1046,8 @@ func (s *Server) setupRpcServer(server *rpc.Server, ctx *RPCContext) { s.staticEndpoints.FileSystem = &FileSystem{srv: s, logger: s.logger.Named("client_fs")} s.staticEndpoints.FileSystem.register() - s.staticEndpoints.Monitor = &Monitor{srv: s} - s.staticEndpoints.Monitor.register() + s.staticEndpoints.Agent = &Agent{srv: s} + s.staticEndpoints.Agent.register() } // Register the static handlers From 6bf8617d02b46c608aea94b1008a65ecae05a6f5 Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Wed, 30 Oct 2019 09:36:39 -0400 Subject: [PATCH 19/34] rename function, initialize log level better underscores instead of dashes for query params --- api/agent_test.go | 6 +++--- client/agent_endpoint.go | 6 ++---- client/rpc.go | 4 ++-- command/agent/agent_endpoint.go | 6 +++--- command/agent/agent_endpoint_test.go | 6 +++--- command/agent_monitor.go | 6 +++--- nomad/client_agent_endpoint.go | 4 +--- 7 files changed, 17 insertions(+), 21 deletions(-) diff --git a/api/agent_test.go b/api/agent_test.go index f847b3ae1..82a8e93f8 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -306,8 +306,8 @@ func TestAgent_MonitorWithNode(t *testing.T) { doneCh := make(chan struct{}) q := &QueryOptions{ Params: map[string]string{ - "log-level": "debug", - "node-id": node.ID, + "log_level": "debug", + "node_id": node.ID, }, } @@ -345,7 +345,7 @@ func TestAgent_Monitor(t *testing.T) { q := &QueryOptions{ Params: map[string]string{ - "log-level": "debug", + "log_level": "debug", }, } diff --git a/client/agent_endpoint.go b/client/agent_endpoint.go index e452837bd..e1b6e31ec 100644 --- a/client/agent_endpoint.go +++ b/client/agent_endpoint.go @@ -21,7 +21,7 @@ type Agent struct { c *Client } -func NewMonitorEndpoint(c *Client) *Agent { +func NewAgentEndpoint(c *Client) *Agent { m := &Agent{c: c} m.c.streamingRpcs.Register("Agent.Monitor", m.monitor) return m @@ -50,11 +50,9 @@ func (m *Agent) monitor(conn io.ReadWriteCloser) { return } - var logLevel log.Level + logLevel := log.LevelFromString(args.LogLevel) if args.LogLevel == "" { logLevel = log.LevelFromString("INFO") - } else { - logLevel = log.LevelFromString(args.LogLevel) } if logLevel == log.NoLevel { diff --git a/client/rpc.go b/client/rpc.go index 6d15d831e..3388f6483 100644 --- a/client/rpc.go +++ b/client/rpc.go @@ -23,7 +23,7 @@ type rpcEndpoints struct { ClientStats *ClientStats FileSystem *FileSystem Allocations *Allocations - Monitor *Agent + Agent *Agent } // ClientRPC is used to make a local, client only RPC call @@ -219,7 +219,7 @@ func (c *Client) setupClientRpc() { c.endpoints.ClientStats = &ClientStats{c} c.endpoints.FileSystem = NewFileSystemEndpoint(c) c.endpoints.Allocations = NewAllocationsEndpoint(c) - c.endpoints.Monitor = NewMonitorEndpoint(c) + c.endpoints.Agent = NewAgentEndpoint(c) // Create the RPC Server c.rpcServer = rpc.NewServer() diff --git a/command/agent/agent_endpoint.go b/command/agent/agent_endpoint.go index c48feb50f..ceb16d8eb 100644 --- a/command/agent/agent_endpoint.go +++ b/command/agent/agent_endpoint.go @@ -166,7 +166,7 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) ( } // Get the provided loglevel. - logLevel := req.URL.Query().Get("log-level") + logLevel := req.URL.Query().Get("log_level") if logLevel == "" { logLevel = "INFO" } @@ -176,9 +176,9 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) ( } // Determine if we are targeting a server or client - nodeID := req.URL.Query().Get("node-id") + nodeID := req.URL.Query().Get("node_id") - logJSONStr := req.URL.Query().Get("log-json") + logJSONStr := req.URL.Query().Get("log_json") logJSON, err := strconv.ParseBool(logJSONStr) if err != nil { logJSON = false diff --git a/command/agent/agent_endpoint_test.go b/command/agent/agent_endpoint_test.go index 0a4cb1f01..767801048 100644 --- a/command/agent/agent_endpoint_test.go +++ b/command/agent/agent_endpoint_test.go @@ -256,7 +256,7 @@ func TestHTTP_AgentMonitor(t *testing.T) { httpTest(t, nil, func(s *TestAgent) { { - req, err := http.NewRequest("GET", "/v1/agent/monitor?log-level=unknown", nil) + req, err := http.NewRequest("GET", "/v1/agent/monitor?log_level=unknown", nil) require.Nil(t, err) resp := newClosableRecorder() @@ -269,7 +269,7 @@ func TestHTTP_AgentMonitor(t *testing.T) { // check for a specific log { - req, err := http.NewRequest("GET", "/v1/agent/monitor?log-level=warn", nil) + req, err := http.NewRequest("GET", "/v1/agent/monitor?log_level=warn", nil) require.Nil(t, err) resp := newClosableRecorder() defer resp.Close() @@ -304,7 +304,7 @@ func TestHTTP_AgentMonitor(t *testing.T) { // stream logs for a given node { - req, err := http.NewRequest("GET", "/v1/agent/monitor?log-level=warn&node-id="+s.client.NodeID(), nil) + req, err := http.NewRequest("GET", "/v1/agent/monitor?log_level=warn&node_id="+s.client.NodeID(), nil) require.Nil(t, err) resp := newClosableRecorder() defer resp.Close() diff --git a/command/agent_monitor.go b/command/agent_monitor.go index 9b42562cd..c09c200da 100644 --- a/command/agent_monitor.go +++ b/command/agent_monitor.go @@ -80,9 +80,9 @@ func (c *MonitorCommand) Run(args []string) int { } params := map[string]string{ - "log-level": logLevel, - "node-id": nodeID, - "log-json": strconv.FormatBool(logJSON), + "log_level": logLevel, + "node_id": nodeID, + "log_json": strconv.FormatBool(logJSON), } query := &api.QueryOptions{ diff --git a/nomad/client_agent_endpoint.go b/nomad/client_agent_endpoint.go index 12a0d0857..6c9370653 100644 --- a/nomad/client_agent_endpoint.go +++ b/nomad/client_agent_endpoint.go @@ -47,11 +47,9 @@ func (m *Agent) monitor(conn io.ReadWriteCloser) { return } - var logLevel log.Level + logLevel := log.LevelFromString(args.LogLevel) if args.LogLevel == "" { logLevel = log.LevelFromString("INFO") - } else { - logLevel = log.LevelFromString(args.LogLevel) } if logLevel == log.NoLevel { From 873969cf414ecc43eec7acb2b0b35582eb064df4 Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Wed, 30 Oct 2019 09:55:41 -0400 Subject: [PATCH 20/34] return 400 if invalid log_json param is given Addresses feedback around monitor implementation subselect on stopCh to prevent blocking forever. Set up a separate goroutine to check every 3 seconds for dropped messages. rename returned ch to avoid confusion --- command/agent/agent_endpoint.go | 10 ++-- command/agent/agent_endpoint_test.go | 14 +++++ command/agent/monitor/monitor.go | 78 +++++++++++++++++++++------ command/agent/monitor/monitor_test.go | 37 ++++++++++--- 4 files changed, 113 insertions(+), 26 deletions(-) diff --git a/command/agent/agent_endpoint.go b/command/agent/agent_endpoint.go index ceb16d8eb..8d19d3c2d 100644 --- a/command/agent/agent_endpoint.go +++ b/command/agent/agent_endpoint.go @@ -178,10 +178,14 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) ( // Determine if we are targeting a server or client nodeID := req.URL.Query().Get("node_id") + logJSON := false logJSONStr := req.URL.Query().Get("log_json") - logJSON, err := strconv.ParseBool(logJSONStr) - if err != nil { - logJSON = false + if logJSONStr != "" { + parsed, err := strconv.ParseBool(logJSONStr) + if err != nil { + return nil, CodedError(400, fmt.Sprintf("Unknown option for log json: %v", err)) + } + logJSON = parsed } // Build the request and parse the ACL token diff --git a/command/agent/agent_endpoint_test.go b/command/agent/agent_endpoint_test.go index 767801048..feeb8a828 100644 --- a/command/agent/agent_endpoint_test.go +++ b/command/agent/agent_endpoint_test.go @@ -255,6 +255,20 @@ func TestHTTP_AgentMonitor(t *testing.T) { t.Parallel() httpTest(t, nil, func(s *TestAgent) { + // invalid log_json + { + req, err := http.NewRequest("GET", "/v1/agent/monitor?log_json=no", nil) + require.Nil(t, err) + resp := newClosableRecorder() + + // Make the request + _, err = s.Server.AgentMonitor(resp, req) + if err.(HTTPCodedError).Code() != 400 { + t.Fatalf("expected 400 response, got: %v", resp.Code) + } + } + + // unknown log_level { req, err := http.NewRequest("GET", "/v1/agent/monitor?log_level=unknown", nil) require.Nil(t, err) diff --git a/command/agent/monitor/monitor.go b/command/agent/monitor/monitor.go index 1d063ae1d..3605bb992 100644 --- a/command/agent/monitor/monitor.go +++ b/command/agent/monitor/monitor.go @@ -1,27 +1,34 @@ package monitor import ( + "fmt" "sync" + "time" log "github.com/hashicorp/go-hclog" ) +// Monitor provides a mechanism to stream logs using go-hclog +// InterceptLogger and SinkAdapter. It allows streaming of logs +// at a different log level than what is set on the logger. type Monitor struct { sync.Mutex - sink log.SinkAdapter - logger log.InterceptLogger - logCh chan []byte - index int - droppedCount int - bufSize int + sink log.SinkAdapter + logger log.InterceptLogger + logCh chan []byte + droppedCount int + bufSize int + droppedDuration time.Duration } +// New creates a new Monitor. Start must be called in order to actually start +// streaming logs func New(buf int, logger log.InterceptLogger, opts *log.LoggerOptions) *Monitor { sw := &Monitor{ - logger: logger, - logCh: make(chan []byte, buf), - index: 0, - bufSize: buf, + logger: logger, + logCh: make(chan []byte, buf), + bufSize: buf, + droppedDuration: 3 * time.Second, } opts.Output = sw @@ -31,15 +38,25 @@ func New(buf int, logger log.InterceptLogger, opts *log.LoggerOptions) *Monitor return sw } +// Start registers a sink on the monitors logger and starts sending +// received log messages over the returned channel. A non-nil +// sopCh can be used to deregister the sink and stop log streaming func (d *Monitor) Start(stopCh <-chan struct{}) <-chan []byte { d.logger.RegisterSink(d.sink) - logCh := make(chan []byte, d.bufSize) + streamCh := make(chan []byte, d.bufSize) go func() { + defer close(streamCh) for { select { case log := <-d.logCh: - logCh <- log + select { + case <-stopCh: + d.logger.DeregisterSink(d.sink) + close(d.logCh) + return + case streamCh <- log: + } case <-stopCh: d.Lock() defer d.Unlock() @@ -51,7 +68,38 @@ func (d *Monitor) Start(stopCh <-chan struct{}) <-chan []byte { } }() - return logCh + go func() { + // loop and check for dropped messages + LOOP: + for { + select { + case <-stopCh: + break LOOP + case <-time.After(d.droppedDuration): + d.Lock() + defer d.Unlock() + + if d.droppedCount > 0 { + dropped := fmt.Sprintf("[WARN] Monitor dropped %d logs during monitor request\n", d.droppedCount) + select { + case d.logCh <- []byte(dropped): + default: + // Make room for dropped message + select { + case <-d.logCh: + d.droppedCount++ + dropped = fmt.Sprintf("[WARN] Monitor dropped %d logs during monitor request\n", d.droppedCount) + default: + } + d.logCh <- []byte(dropped) + } + d.droppedCount = 0 + } + } + } + }() + + return streamCh } // Write attempts to send latest log to logCh @@ -67,10 +115,6 @@ func (d *Monitor) Write(p []byte) (n int, err error) { case d.logCh <- bytes: default: d.droppedCount++ - if d.droppedCount > 10 { - d.logger.Warn("Monitor dropped %d logs during monitor request", d.droppedCount) - d.droppedCount = 0 - } } return } diff --git a/command/agent/monitor/monitor_test.go b/command/agent/monitor/monitor_test.go index b513db44e..bb6cedaf2 100644 --- a/command/agent/monitor/monitor_test.go +++ b/command/agent/monitor/monitor_test.go @@ -1,11 +1,11 @@ package monitor import ( + "fmt" + "strings" "testing" "time" - "github.com/stretchr/testify/assert" - log "github.com/hashicorp/go-hclog" "github.com/stretchr/testify/require" ) @@ -38,6 +38,7 @@ func TestMonitor_Start(t *testing.T) { logger.Debug("test log") } +// Ensure number of dropped messages are logged func TestMonitor_DroppedMessages(t *testing.T) { t.Parallel() @@ -48,15 +49,39 @@ func TestMonitor_DroppedMessages(t *testing.T) { m := New(5, logger, &log.LoggerOptions{ Level: log.Debug, }) + m.droppedDuration = 5 * time.Millisecond doneCh := make(chan struct{}) defer close(doneCh) - m.Start(doneCh) + logCh := m.Start(doneCh) - for i := 0; i <= 9; i++ { - logger.Debug("test message") + for i := 0; i <= 100; i++ { + logger.Debug(fmt.Sprintf("test message %d", i)) } - assert.Greater(t, m.droppedCount, 0) + received := "" + + passed := make(chan struct{}) + go func() { + for { + select { + case recv := <-logCh: + received += string(recv) + if strings.Contains(received, "[WARN] Monitor dropped 90 logs during monitor request") { + close(passed) + } + } + } + }() + +TEST: + for { + select { + case <-passed: + break TEST + case <-time.After(1 * time.Second): + require.Fail(t, "expected to see warn dropped messages") + } + } } From 58117c03c43410bb49cdba44bdf3ebc9d69a0f26 Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Wed, 30 Oct 2019 16:56:24 -0400 Subject: [PATCH 21/34] fix deadlock issue, switch to frames envelope --- api/agent.go | 20 +++++++++++++------- api/agent_test.go | 4 ++-- command/agent/monitor/monitor.go | 3 --- command/agent_monitor.go | 6 +++--- 4 files changed, 18 insertions(+), 15 deletions(-) diff --git a/api/agent.go b/api/agent.go index b7e5e5a10..d3622acbc 100644 --- a/api/agent.go +++ b/api/agent.go @@ -238,9 +238,13 @@ func (a *Agent) Health() (*AgentHealthResponse, error) { return nil, fmt.Errorf("unable to unmarshal response with status %d: %v", resp.StatusCode, err) } +type MonitorFrame struct { + Data []byte `json:",omitempty"` +} + // Monitor returns a channel which will receive streaming logs from the agent // Providing a non-nil stopCh can be used to close the connection and stop log streaming -func (a *Agent) Monitor(stopCh <-chan struct{}, q *QueryOptions) (chan string, error) { +func (a *Agent) Monitor(stopCh <-chan struct{}, q *QueryOptions) (<-chan *MonitorFrame, error) { r, err := a.client.newRequest("GET", "/v1/agent/monitor") if err != nil { return nil, err @@ -252,10 +256,10 @@ func (a *Agent) Monitor(stopCh <-chan struct{}, q *QueryOptions) (chan string, e return nil, err } - logCh := make(chan string, 64) + frames := make(chan *MonitorFrame, 10) go func() { defer resp.Body.Close() - defer close(logCh) + defer close(frames) scanner := bufio.NewScanner(resp.Body) LOOP: @@ -267,10 +271,12 @@ func (a *Agent) Monitor(stopCh <-chan struct{}, q *QueryOptions) (chan string, e } if scanner.Scan() { - if text := scanner.Text(); text != "" { - logCh <- text + var frame MonitorFrame + if bytes := scanner.Bytes(); len(bytes) > 0 { + frame.Data = bytes + frames <- &frame } else { - logCh <- " " + frames <- &frame } } else { break LOOP @@ -278,7 +284,7 @@ func (a *Agent) Monitor(stopCh <-chan struct{}, q *QueryOptions) (chan string, e } }() - return logCh, nil + return frames, nil } // joinResponse is used to decode the response we get while diff --git a/api/agent_test.go b/api/agent_test.go index 82a8e93f8..4a0304f09 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -324,7 +324,7 @@ OUTER: for { select { case log := <-logCh: - if strings.Contains(log, "[DEBUG]") { + if strings.Contains(string(log.Data), "[DEBUG]") { break OUTER } case <-time.After(2 * time.Second): @@ -363,7 +363,7 @@ OUTER: for { select { case log := <-logCh: - if strings.Contains(log, "[DEBUG]") { + if strings.Contains(string(log.Data), "[DEBUG]") { break OUTER } case <-time.After(2 * time.Second): diff --git a/command/agent/monitor/monitor.go b/command/agent/monitor/monitor.go index 3605bb992..471a2c451 100644 --- a/command/agent/monitor/monitor.go +++ b/command/agent/monitor/monitor.go @@ -105,9 +105,6 @@ func (d *Monitor) Start(stopCh <-chan struct{}) <-chan []byte { // Write attempts to send latest log to logCh // it drops the log if channel is unavailable to receive func (d *Monitor) Write(p []byte) (n int, err error) { - d.Lock() - defer d.Unlock() - bytes := make([]byte, len(p)) copy(bytes, p) diff --git a/command/agent_monitor.go b/command/agent_monitor.go index c09c200da..a369fbb48 100644 --- a/command/agent_monitor.go +++ b/command/agent_monitor.go @@ -89,7 +89,7 @@ func (c *MonitorCommand) Run(args []string) int { Params: params, } eventDoneCh := make(chan struct{}) - logCh, err := client.Agent().Monitor(eventDoneCh, query) + frames, err := client.Agent().Monitor(eventDoneCh, query) if err != nil { c.Ui.Error(fmt.Sprintf("Error starting monitor: %s", err)) c.Ui.Error(commandErrorText(c)) @@ -101,11 +101,11 @@ func (c *MonitorCommand) Run(args []string) int { OUTER: for { select { - case log, ok := <-logCh: + case frame, ok := <-frames: if !ok { break OUTER } - c.Ui.Output(log) + c.Ui.Output(string(frame.Data)) } } From f8eaf1f5af63b090de8b950f3c5a523463f2fb54 Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Thu, 31 Oct 2019 09:59:24 -0400 Subject: [PATCH 22/34] lock in sub select rm redundant lock wip to use framing wip switch to stream frames --- api/agent.go | 41 +++++++++++--------- client/agent_endpoint.go | 72 +++++++++++++++++++++++++++++++---- client/agent_endpoint_test.go | 12 +++++- client/structs/structs.go | 3 ++ command/agent_monitor.go | 45 ++++++++++++---------- 5 files changed, 124 insertions(+), 49 deletions(-) diff --git a/api/agent.go b/api/agent.go index d3622acbc..bccb2c19f 100644 --- a/api/agent.go +++ b/api/agent.go @@ -1,7 +1,6 @@ package api import ( - "bufio" "encoding/json" "fmt" "net/url" @@ -244,25 +243,28 @@ type MonitorFrame struct { // Monitor returns a channel which will receive streaming logs from the agent // Providing a non-nil stopCh can be used to close the connection and stop log streaming -func (a *Agent) Monitor(stopCh <-chan struct{}, q *QueryOptions) (<-chan *MonitorFrame, error) { +func (a *Agent) Monitor(stopCh <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, <-chan error) { + + errCh := make(chan error, 1) r, err := a.client.newRequest("GET", "/v1/agent/monitor") if err != nil { - return nil, err + errCh <- err + return nil, errCh } r.setQueryOptions(q) _, resp, err := requireOK(a.client.doRequest(r)) if err != nil { - return nil, err + errCh <- err + return nil, errCh } - frames := make(chan *MonitorFrame, 10) + frames := make(chan *StreamFrame, 10) go func() { defer resp.Body.Close() - defer close(frames) - scanner := bufio.NewScanner(resp.Body) - LOOP: + dec := json.NewDecoder(resp.Body) + for { select { case <-stopCh: @@ -270,17 +272,20 @@ func (a *Agent) Monitor(stopCh <-chan struct{}, q *QueryOptions) (<-chan *Monito default: } - if scanner.Scan() { - var frame MonitorFrame - if bytes := scanner.Bytes(); len(bytes) > 0 { - frame.Data = bytes - frames <- &frame - } else { - frames <- &frame - } - } else { - break LOOP + // Decode the next frame + var frame StreamFrame + if err := dec.Decode(&frame); err != nil { + close(frames) + errCh <- err + return } + + // Discard heartbeat frame + if frame.IsHeartbeat() { + continue + } + + frames <- &frame } }() diff --git a/client/agent_endpoint.go b/client/agent_endpoint.go index e1b6e31ec..e15e74da4 100644 --- a/client/agent_endpoint.go +++ b/client/agent_endpoint.go @@ -1,6 +1,7 @@ package client import ( + "bytes" "context" "errors" "io" @@ -14,6 +15,7 @@ import ( metrics "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" + sframer "github.com/hashicorp/nomad/client/lib/streamframer" cstructs "github.com/hashicorp/nomad/client/structs" ) @@ -27,6 +29,10 @@ func NewAgentEndpoint(c *Client) *Agent { return m } +type monitorFrame struct { + Data []byte `json:",omitempty"` +} + func (m *Agent) monitor(conn io.ReadWriteCloser) { defer metrics.MeasureSince([]string{"client", "monitor", "monitor"}, time.Now()) defer conn.Close() @@ -70,6 +76,17 @@ func (m *Agent) monitor(conn io.ReadWriteCloser) { Level: logLevel, }) + frames := make(chan *sframer.StreamFrame, streamFramesBuffer) + errCh := make(chan error) + var buf bytes.Buffer + frameCodec := codec.NewEncoder(&buf, structs.JsonHandle) + + // framer := sframer.NewStreamFramer(frames, 1*time.Second, 200*time.Millisecond, 64*1024) + framer := sframer.NewStreamFramer(frames, 1*time.Second, 200*time.Millisecond, 1024) + framer.Run() + defer framer.Destroy() + + // goroutine to detect remote side closing go func() { if _, err := conn.Read(nil); err != nil { // One end of the pipe explicitly closed, exit @@ -83,14 +100,59 @@ func (m *Agent) monitor(conn io.ReadWriteCloser) { }() logCh := monitor.Start(stopCh) + initialOffset := int64(0) + + // receive logs and build frames + go func() { + defer framer.Destroy() + LOOP: + for { + select { + case log := <-logCh: + if err := framer.Send("", "log", log, initialOffset); err != nil { + select { + case errCh <- err: + case <-ctx.Done(): + } + break LOOP + } + case <-ctx.Done(): + break LOOP + } + } + }() var streamErr error OUTER: for { select { - case log := <-logCh: + case frame, ok := <-frames: + if !ok { + // frame may have been closed when an error + // occurred. Check once more for an error. + select { + case streamErr = <-errCh: + // There was a pending error! + default: + // No error, continue on + } + + break OUTER + } + var resp cstructs.StreamErrWrapper - resp.Payload = log + if args.PlainText { + resp.Payload = frame.Data + } else { + if err := frameCodec.Encode(frame); err != nil { + streamErr = err + break OUTER + } + + resp.Payload = buf.Bytes() + buf.Reset() + } + if err := encoder.Encode(resp); err != nil { streamErr = err break OUTER @@ -106,11 +168,5 @@ OUTER: if streamErr == io.EOF || strings.Contains(streamErr.Error(), "closed") { return } - - // Attempt to send the error - encoder.Encode(&cstructs.StreamErrWrapper{ - Error: cstructs.NewRpcError(streamErr, helper.Int64ToPtr(500)), - }) - return } } diff --git a/client/agent_endpoint_test.go b/client/agent_endpoint_test.go index 38acd6fe6..14f53cc6a 100644 --- a/client/agent_endpoint_test.go +++ b/client/agent_endpoint_test.go @@ -1,6 +1,7 @@ package client import ( + "encoding/json" "fmt" "io" "net" @@ -10,11 +11,13 @@ import ( "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/client/config" + sframer "github.com/hashicorp/nomad/client/lib/streamframer" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/nomad" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/ugorji/go/codec" ) @@ -71,7 +74,7 @@ func TestMonitor_Monitor(t *testing.T) { encoder := codec.NewEncoder(p1, structs.MsgpackHandle) require.Nil(encoder.Encode(req)) - timeout := time.After(1 * time.Second) + timeout := time.After(5 * time.Second) expected := "[DEBUG]" received := "" @@ -86,7 +89,12 @@ OUTER: if msg.Error != nil { t.Fatalf("Got error: %v", msg.Error.Error()) } - received += string(msg.Payload) + + var frame sframer.StreamFrame + err := json.Unmarshal(msg.Payload, &frame) + assert.NoError(t, err) + + received += string(frame.Data) if strings.Contains(received, expected) { require.Nil(p2.Close()) break OUTER diff --git a/client/structs/structs.go b/client/structs/structs.go index 350e9de69..6a84e2d63 100644 --- a/client/structs/structs.go +++ b/client/structs/structs.go @@ -44,6 +44,9 @@ type MonitorRequest struct { // NodeID is the node we want to track the logs of NodeID string + // PlainText disables base64 encoding. + PlainText bool + structs.QueryOptions } diff --git a/command/agent_monitor.go b/command/agent_monitor.go index a369fbb48..ba5dbd0df 100644 --- a/command/agent_monitor.go +++ b/command/agent_monitor.go @@ -2,11 +2,13 @@ package command import ( "fmt" + "io" "os" "os/signal" "strconv" "strings" "syscall" + "time" "github.com/hashicorp/nomad/api" "github.com/mitchellh/cli" @@ -88,38 +90,39 @@ func (c *MonitorCommand) Run(args []string) int { query := &api.QueryOptions{ Params: params, } + eventDoneCh := make(chan struct{}) - frames, err := client.Agent().Monitor(eventDoneCh, query) - if err != nil { + frames, errCh := client.Agent().Monitor(eventDoneCh, query) + select { + case err := <-errCh: c.Ui.Error(fmt.Sprintf("Error starting monitor: %s", err)) c.Ui.Error(commandErrorText(c)) return 1 + default: } - go func() { - defer close(eventDoneCh) - OUTER: - for { - select { - case frame, ok := <-frames: - if !ok { - break OUTER - } - c.Ui.Output(string(frame.Data)) - } - } + // Create a reader + var r io.ReadCloser + frameReader := api.NewFrameReader(frames, errCh, eventDoneCh) + frameReader.SetUnblockTime(500 * time.Millisecond) + r = frameReader - }() + defer r.Close() signalCh := make(chan os.Signal, 1) signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM) - select { - case <-eventDoneCh: - c.Ui.Error("Remote side ended the monitor! This usually means that the\n" + - "remote side has exited or crashed.") + go func() { + <-signalCh + // End the streaming + r.Close() + }() + + _, err = io.Copy(os.Stdout, r) + if err != nil { + c.Ui.Error(fmt.Sprintf("error monitoring logs: %s", err)) return 1 - case <-signalCh: - return 0 } + + return 0 } From 9a96c10d4c240fb2d78de73fc3b9b3c15fa299b3 Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Fri, 1 Nov 2019 10:33:28 -0400 Subject: [PATCH 23/34] moving endpoints over to frames --- api/agent.go | 2 +- api/agent_test.go | 25 ++++++---- command/agent/agent_endpoint_test.go | 7 +-- command/agent_monitor_test.go | 2 + nomad/client_agent_endpoint.go | 71 ++++++++++++++++++++++++---- nomad/client_agent_endpoint_test.go | 17 +++++-- 6 files changed, 97 insertions(+), 27 deletions(-) diff --git a/api/agent.go b/api/agent.go index bccb2c19f..936ac1e4c 100644 --- a/api/agent.go +++ b/api/agent.go @@ -289,7 +289,7 @@ func (a *Agent) Monitor(stopCh <-chan struct{}, q *QueryOptions) (<-chan *Stream } }() - return frames, nil + return frames, errCh } // joinResponse is used to decode the response we get while diff --git a/api/agent_test.go b/api/agent_test.go index 4a0304f09..079ba44c1 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -1,6 +1,7 @@ package api import ( + "bytes" "fmt" "reflect" "sort" @@ -311,22 +312,24 @@ func TestAgent_MonitorWithNode(t *testing.T) { }, } - logCh, err := agent.Monitor(doneCh, q) - require.NoError(t, err) + frames, errCh := agent.Monitor(doneCh, q) defer close(doneCh) // make a request to generate some logs - _, err = agent.NodeName() + _, err := agent.NodeName() require.NoError(t, err) // Wait for a log message + var result bytes.Buffer OUTER: for { select { - case log := <-logCh: - if strings.Contains(string(log.Data), "[DEBUG]") { + case f := <-frames: + if strings.Contains(string(f.Data), "[DEBUG]") { break OUTER } + case err := <-errCh: + t.Errorf("Error: %v", err) case <-time.After(2 * time.Second): require.Fail(t, "failed to get a DEBUG log message") } @@ -350,22 +353,26 @@ func TestAgent_Monitor(t *testing.T) { } doneCh := make(chan struct{}) - logCh, err := agent.Monitor(doneCh, q) - require.NoError(t, err) + frames, errCh := agent.Monitor(doneCh, q) defer close(doneCh) // make a request to generate some logs - _, err = agent.Region() + _, err := agent.Region() require.NoError(t, err) // Wait for a log message OUTER: for { select { - case log := <-logCh: + case log := <-frames: + if log == nil { + continue + } if strings.Contains(string(log.Data), "[DEBUG]") { break OUTER } + case err := <-errCh: + t.Fatalf("error: %v", err) case <-time.After(2 * time.Second): require.Fail(t, "failed to get a DEBUG log message") } diff --git a/command/agent/agent_endpoint_test.go b/command/agent/agent_endpoint_test.go index feeb8a828..504fc1634 100644 --- a/command/agent/agent_endpoint_test.go +++ b/command/agent/agent_endpoint_test.go @@ -298,15 +298,13 @@ func TestHTTP_AgentMonitor(t *testing.T) { tried := 0 testutil.WaitForResult(func() (bool, error) { if tried < maxLogAttempts { - s.Server.logger.Debug("log that should not be sent") s.Server.logger.Warn("log that should be sent") tried++ } got := resp.Body.String() - want := "[WARN] http: log that should be sent" + want := `{"Data":"` if strings.Contains(got, want) { - require.NotContains(t, resp.Body.String(), "[DEBUG]") return true, nil } @@ -344,9 +342,8 @@ func TestHTTP_AgentMonitor(t *testing.T) { } out += string(output) - want := "[WARN] http: log that should be sent" + want := `{"Data":"` if strings.Contains(out, want) { - require.NotContains(t, resp.Body.String(), "[DEBUG]") return true, nil } diff --git a/command/agent_monitor_test.go b/command/agent_monitor_test.go index eb49e92c3..24109b280 100644 --- a/command/agent_monitor_test.go +++ b/command/agent_monitor_test.go @@ -14,6 +14,8 @@ func TestMonitorCommand_Implements(t *testing.T) { func TestMonitorCommand_Fails(t *testing.T) { t.Parallel() + srv, _, _ := testServer(t, false, nil) + defer srv.Shutdown() ui := new(cli.MockUi) cmd := &MonitorCommand{Meta: Meta{Ui: ui}} diff --git a/nomad/client_agent_endpoint.go b/nomad/client_agent_endpoint.go index 6c9370653..d3d3d4d55 100644 --- a/nomad/client_agent_endpoint.go +++ b/nomad/client_agent_endpoint.go @@ -1,14 +1,17 @@ package nomad import ( + "bytes" "context" "errors" "fmt" "io" "net" "strings" + "time" log "github.com/hashicorp/go-hclog" + sframer "github.com/hashicorp/nomad/client/lib/streamframer" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/command/agent/monitor" "github.com/hashicorp/nomad/helper" @@ -138,9 +141,20 @@ func (m *Agent) monitor(conn io.ReadWriteCloser) { JSONFormat: args.LogJSON, }) + frames := make(chan *sframer.StreamFrame, 32) + errCh := make(chan error) + var buf bytes.Buffer + frameCodec := codec.NewEncoder(&buf, structs.JsonHandle) + + // framer := sframer.NewStreamFramer(frames, 1*time.Second, 200*time.Millisecond, 64*1024) + framer := sframer.NewStreamFramer(frames, 1*time.Second, 200*time.Millisecond, 1024) + framer.Run() + defer framer.Destroy() + + // goroutine to detect remote side closing go func() { if _, err := conn.Read(nil); err != nil { - // One end of the pipe closed, exit + // One end of the pipe explicitly closed, exit cancel() return } @@ -151,14 +165,59 @@ func (m *Agent) monitor(conn io.ReadWriteCloser) { }() logCh := monitor.Start(stopCh) + initialOffset := int64(0) + + // receive logs and build frames + go func() { + defer framer.Destroy() + LOOP: + for { + select { + case log := <-logCh: + if err := framer.Send("", "log", log, initialOffset); err != nil { + select { + case errCh <- err: + case <-ctx.Done(): + } + break LOOP + } + case <-ctx.Done(): + break LOOP + } + } + }() var streamErr error OUTER: for { select { - case log := <-logCh: + case frame, ok := <-frames: + if !ok { + // frame may have been closed when an error + // occurred. Check once more for an error. + select { + case streamErr = <-errCh: + // There was a pending error! + default: + // No error, continue on + } + + break OUTER + } + var resp cstructs.StreamErrWrapper - resp.Payload = log + if args.PlainText { + resp.Payload = frame.Data + } else { + if err := frameCodec.Encode(frame); err != nil { + streamErr = err + break OUTER + } + + resp.Payload = buf.Bytes() + buf.Reset() + } + if err := encoder.Encode(resp); err != nil { streamErr = err break OUTER @@ -174,11 +233,5 @@ OUTER: if streamErr == io.EOF || strings.Contains(streamErr.Error(), "closed") { return } - - // Attempt to send the error - encoder.Encode(&cstructs.StreamErrWrapper{ - Error: cstructs.NewRpcError(streamErr, helper.Int64ToPtr(500)), - }) - return } } diff --git a/nomad/client_agent_endpoint_test.go b/nomad/client_agent_endpoint_test.go index 27e4cb84f..201e21fb9 100644 --- a/nomad/client_agent_endpoint_test.go +++ b/nomad/client_agent_endpoint_test.go @@ -1,6 +1,7 @@ package nomad import ( + "encoding/json" "fmt" "io" "net" @@ -11,10 +12,12 @@ import ( "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/client" "github.com/hashicorp/nomad/client/config" + sframer "github.com/hashicorp/nomad/client/lib/streamframer" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/ugorji/go/codec" ) @@ -85,7 +88,7 @@ func TestMonitor_Monitor_Remote_Server(t *testing.T) { encoder := codec.NewEncoder(p1, structs.MsgpackHandle) require.Nil(encoder.Encode(req)) - timeout := time.After(1 * time.Second) + timeout := time.After(3 * time.Second) expected := "[DEBUG]" received := "" @@ -101,7 +104,11 @@ OUTER: t.Fatalf("Got error: %v", msg.Error.Error()) } - received += string(msg.Payload) + var frame sframer.StreamFrame + err := json.Unmarshal(msg.Payload, &frame) + assert.NoError(t, err) + + received += string(frame.Data) if strings.Contains(received, expected) { require.Nil(p2.Close()) break OUTER @@ -181,7 +188,11 @@ OUTER: t.Fatalf("Got error: %v", msg.Error.Error()) } - received += string(msg.Payload) + var frame sframer.StreamFrame + err := json.Unmarshal(msg.Payload, &frame) + assert.NoError(t, err) + + received += string(frame.Data) if strings.Contains(received, expected) { require.Nil(p2.Close()) break OUTER From f74bd99b2a1338bef12bffe91a9d7f9cbb4c1962 Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Fri, 1 Nov 2019 11:06:49 -0400 Subject: [PATCH 24/34] monitor command takes no args rm extra new line fix lint errors return after close fix, simplify test --- api/agent.go | 1 - api/agent_test.go | 2 -- client/agent_endpoint.go | 4 ---- command/agent/monitor/monitor_test.go | 24 +++++++++++++----------- command/agent_monitor.go | 7 +++++++ 5 files changed, 20 insertions(+), 18 deletions(-) diff --git a/api/agent.go b/api/agent.go index 936ac1e4c..fdd5ff891 100644 --- a/api/agent.go +++ b/api/agent.go @@ -244,7 +244,6 @@ type MonitorFrame struct { // Monitor returns a channel which will receive streaming logs from the agent // Providing a non-nil stopCh can be used to close the connection and stop log streaming func (a *Agent) Monitor(stopCh <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, <-chan error) { - errCh := make(chan error, 1) r, err := a.client.newRequest("GET", "/v1/agent/monitor") if err != nil { diff --git a/api/agent_test.go b/api/agent_test.go index 079ba44c1..d01cfdf27 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -1,7 +1,6 @@ package api import ( - "bytes" "fmt" "reflect" "sort" @@ -320,7 +319,6 @@ func TestAgent_MonitorWithNode(t *testing.T) { require.NoError(t, err) // Wait for a log message - var result bytes.Buffer OUTER: for { select { diff --git a/client/agent_endpoint.go b/client/agent_endpoint.go index e15e74da4..6ad62bae4 100644 --- a/client/agent_endpoint.go +++ b/client/agent_endpoint.go @@ -29,10 +29,6 @@ func NewAgentEndpoint(c *Client) *Agent { return m } -type monitorFrame struct { - Data []byte `json:",omitempty"` -} - func (m *Agent) monitor(conn io.ReadWriteCloser) { defer metrics.MeasureSince([]string{"client", "monitor", "monitor"}, time.Now()) defer conn.Close() diff --git a/command/agent/monitor/monitor_test.go b/command/agent/monitor/monitor_test.go index bb6cedaf2..be7deae2c 100644 --- a/command/agent/monitor/monitor_test.go +++ b/command/agent/monitor/monitor_test.go @@ -22,20 +22,23 @@ func TestMonitor_Start(t *testing.T) { }) closeCh := make(chan struct{}) - defer close(closeCh) logCh := m.Start(closeCh) + go func() { - for { - select { - case log := <-logCh: - require.Contains(t, string(log), "[DEBUG] test log") - case <-time.After(1 * time.Second): - t.Fatal("Expected to receive from log channel") - } - } + logger.Debug("test log") + time.Sleep(10 * time.Millisecond) }() - logger.Debug("test log") + + for { + select { + case log := <-logCh: + require.Contains(t, string(log), "[DEBUG] test log") + return + case <-time.After(3 * time.Second): + t.Fatal("Expected to receive from log channel") + } + } } // Ensure number of dropped messages are logged @@ -61,7 +64,6 @@ func TestMonitor_DroppedMessages(t *testing.T) { } received := "" - passed := make(chan struct{}) go func() { for { diff --git a/command/agent_monitor.go b/command/agent_monitor.go index ba5dbd0df..3fc86d48c 100644 --- a/command/agent_monitor.go +++ b/command/agent_monitor.go @@ -74,6 +74,13 @@ func (c *MonitorCommand) Run(args []string) int { return 1 } + args = flags.Args() + if l := len(args); l != 0 { + c.Ui.Error("This command takes no arguments") + c.Ui.Error(commandErrorText(c)) + return 1 + } + client, err := c.Meta.Client() if err != nil { c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err)) From 8423ccf89093ea3af9d079b33036b6657f930b6e Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Fri, 1 Nov 2019 14:54:25 -0400 Subject: [PATCH 25/34] allow more time for streaming message remove unused struct --- api/agent.go | 4 ---- command/agent/monitor/monitor_test.go | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/api/agent.go b/api/agent.go index fdd5ff891..2ec8ff233 100644 --- a/api/agent.go +++ b/api/agent.go @@ -237,10 +237,6 @@ func (a *Agent) Health() (*AgentHealthResponse, error) { return nil, fmt.Errorf("unable to unmarshal response with status %d: %v", resp.StatusCode, err) } -type MonitorFrame struct { - Data []byte `json:",omitempty"` -} - // Monitor returns a channel which will receive streaming logs from the agent // Providing a non-nil stopCh can be used to close the connection and stop log streaming func (a *Agent) Monitor(stopCh <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, <-chan error) { diff --git a/command/agent/monitor/monitor_test.go b/command/agent/monitor/monitor_test.go index be7deae2c..92a8563cc 100644 --- a/command/agent/monitor/monitor_test.go +++ b/command/agent/monitor/monitor_test.go @@ -82,7 +82,7 @@ TEST: select { case <-passed: break TEST - case <-time.After(1 * time.Second): + case <-time.After(2 * time.Second): require.Fail(t, "expected to see warn dropped messages") } } From 676800f03afd8491dff1e26d956fac216bf83fd4 Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Mon, 4 Nov 2019 09:04:47 -0500 Subject: [PATCH 26/34] address feedback --- command/agent/monitor/monitor_test.go | 2 +- command/agent_monitor.go | 7 +++---- nomad/client_agent_endpoint.go | 3 +-- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/command/agent/monitor/monitor_test.go b/command/agent/monitor/monitor_test.go index 92a8563cc..f422171c1 100644 --- a/command/agent/monitor/monitor_test.go +++ b/command/agent/monitor/monitor_test.go @@ -82,7 +82,7 @@ TEST: select { case <-passed: break TEST - case <-time.After(2 * time.Second): + case <-time.After(3 * time.Second): require.Fail(t, "expected to see warn dropped messages") } } diff --git a/command/agent_monitor.go b/command/agent_monitor.go index 3fc86d48c..b5ef2c28c 100644 --- a/command/agent_monitor.go +++ b/command/agent_monitor.go @@ -22,8 +22,7 @@ func (c *MonitorCommand) Help() string { helpText := ` Usage: nomad monitor [options] - Shows recent log messages of a nomad agent, and attaches to the agent, - outputting log messagse as they occur in real time. The monitor lets you + Stream log messages of a nomad agent. The monitor command lets you listen for log levels that may be filtered out of the Nomad agent. For example your agent may only be logging at INFO level, but with the monitor command you can set -log-level DEBUG @@ -47,7 +46,7 @@ Monitor Specific Options: } func (c *MonitorCommand) Synopsis() string { - return "stream logs from a nomad agent" + return "stream logs from a Nomad agent" } func (c *MonitorCommand) Name() string { return "monitor" } @@ -68,7 +67,7 @@ func (c *MonitorCommand) Run(args []string) int { flags.Usage = func() { c.Ui.Output(c.Help()) } flags.StringVar(&logLevel, "log-level", "", "") flags.StringVar(&nodeID, "node-id", "", "") - flags.BoolVar(&logJSON, "log-json", false, "") + flags.BoolVar(&logJSON, "json", false, "") if err := flags.Parse(args); err != nil { return 1 diff --git a/nomad/client_agent_endpoint.go b/nomad/client_agent_endpoint.go index d3d3d4d55..6c66c2233 100644 --- a/nomad/client_agent_endpoint.go +++ b/nomad/client_agent_endpoint.go @@ -41,7 +41,7 @@ func (m *Agent) monitor(conn io.ReadWriteCloser) { return } - // Check node read permissions + // Check agent read permissions if aclObj, err := m.srv.ResolveToken(args.AuthToken); err != nil { handleStreamResultError(err, nil, encoder) return @@ -146,7 +146,6 @@ func (m *Agent) monitor(conn io.ReadWriteCloser) { var buf bytes.Buffer frameCodec := codec.NewEncoder(&buf, structs.JsonHandle) - // framer := sframer.NewStreamFramer(frames, 1*time.Second, 200*time.Millisecond, 64*1024) framer := sframer.NewStreamFramer(frames, 1*time.Second, 200*time.Millisecond, 1024) framer.Run() defer framer.Destroy() From dc977dc8e6ff6201e6ee4933574d4f88557de2b8 Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Mon, 4 Nov 2019 09:25:19 -0500 Subject: [PATCH 27/34] move forwarded monitor request into helper --- client/agent_endpoint.go | 1 - nomad/client_agent_endpoint.go | 136 +++++++++++++++++---------------- 2 files changed, 70 insertions(+), 67 deletions(-) diff --git a/client/agent_endpoint.go b/client/agent_endpoint.go index 6ad62bae4..a95a5345d 100644 --- a/client/agent_endpoint.go +++ b/client/agent_endpoint.go @@ -77,7 +77,6 @@ func (m *Agent) monitor(conn io.ReadWriteCloser) { var buf bytes.Buffer frameCodec := codec.NewEncoder(&buf, structs.JsonHandle) - // framer := sframer.NewStreamFramer(frames, 1*time.Second, 200*time.Millisecond, 64*1024) framer := sframer.NewStreamFramer(frames, 1*time.Second, 200*time.Millisecond, 1024) framer.Run() defer framer.Destroy() diff --git a/nomad/client_agent_endpoint.go b/nomad/client_agent_endpoint.go index 6c66c2233..a3da1aa39 100644 --- a/nomad/client_agent_endpoint.go +++ b/nomad/client_agent_endpoint.go @@ -62,72 +62,7 @@ func (m *Agent) monitor(conn io.ReadWriteCloser) { // Targeting a client so forward the request if args.NodeID != "" { - nodeID := args.NodeID - - snap, err := m.srv.State().Snapshot() - if err != nil { - handleStreamResultError(err, nil, encoder) - return - } - - node, err := snap.NodeByID(nil, nodeID) - if err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) - return - } - - if node == nil { - err := fmt.Errorf("Unknown node %q", nodeID) - handleStreamResultError(err, helper.Int64ToPtr(400), encoder) - return - } - - if err := nodeSupportsRpc(node); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(400), encoder) - return - } - - // Get the Connection to the client either by fowarding to another server - // or creating direct stream - var clientConn net.Conn - state, ok := m.srv.getNodeConn(nodeID) - if !ok { - // Determine the server that has a connection to the node - srv, err := m.srv.serverWithNodeConn(nodeID, m.srv.Region()) - if err != nil { - var code *int64 - if structs.IsErrNoNodeConn(err) { - code = helper.Int64ToPtr(404) - } - handleStreamResultError(err, code, encoder) - return - } - conn, err := m.srv.streamingRpc(srv, "Agent.Monitor") - if err != nil { - handleStreamResultError(err, nil, encoder) - return - } - - clientConn = conn - } else { - stream, err := NodeStreamingRpc(state.Session, "Agent.Monitor") - if err != nil { - handleStreamResultError(err, nil, encoder) - return - } - clientConn = stream - } - defer clientConn.Close() - - // Send the Request - outEncoder := codec.NewEncoder(clientConn, structs.MsgpackHandle) - if err := outEncoder.Encode(args); err != nil { - handleStreamResultError(err, nil, encoder) - return - } - - structs.Bridge(conn, clientConn) - return + m.forwardMonitor(conn, args, encoder, decoder) } // NodeID was empty, so monitor this current server @@ -234,3 +169,72 @@ OUTER: } } } + +func (m *Agent) forwardMonitor(conn io.ReadWriteCloser, args cstructs.MonitorRequest, encoder *codec.Encoder, decoder *codec.Decoder) { + nodeID := args.NodeID + + snap, err := m.srv.State().Snapshot() + if err != nil { + handleStreamResultError(err, nil, encoder) + return + } + + node, err := snap.NodeByID(nil, nodeID) + if err != nil { + handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + return + } + + if node == nil { + err := fmt.Errorf("Unknown node %q", nodeID) + handleStreamResultError(err, helper.Int64ToPtr(400), encoder) + return + } + + if err := nodeSupportsRpc(node); err != nil { + handleStreamResultError(err, helper.Int64ToPtr(400), encoder) + return + } + + // Get the Connection to the client either by fowarding to another server + // or creating direct stream + var clientConn net.Conn + state, ok := m.srv.getNodeConn(nodeID) + if !ok { + // Determine the server that has a connection to the node + srv, err := m.srv.serverWithNodeConn(nodeID, m.srv.Region()) + if err != nil { + var code *int64 + if structs.IsErrNoNodeConn(err) { + code = helper.Int64ToPtr(404) + } + handleStreamResultError(err, code, encoder) + return + } + conn, err := m.srv.streamingRpc(srv, "Agent.Monitor") + if err != nil { + handleStreamResultError(err, nil, encoder) + return + } + + clientConn = conn + } else { + stream, err := NodeStreamingRpc(state.Session, "Agent.Monitor") + if err != nil { + handleStreamResultError(err, nil, encoder) + return + } + clientConn = stream + } + defer clientConn.Close() + + // Send the Request + outEncoder := codec.NewEncoder(clientConn, structs.MsgpackHandle) + if err := outEncoder.Encode(args); err != nil { + handleStreamResultError(err, nil, encoder) + return + } + + structs.Bridge(conn, clientConn) + return +} From 4f618eb1e3048795e90b904b446ec62260dfb0eb Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Mon, 4 Nov 2019 10:07:03 -0500 Subject: [PATCH 28/34] simplify assert message --- command/agent/monitor/monitor_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/command/agent/monitor/monitor_test.go b/command/agent/monitor/monitor_test.go index f422171c1..2aa91d1bd 100644 --- a/command/agent/monitor/monitor_test.go +++ b/command/agent/monitor/monitor_test.go @@ -70,7 +70,7 @@ func TestMonitor_DroppedMessages(t *testing.T) { select { case recv := <-logCh: received += string(recv) - if strings.Contains(received, "[WARN] Monitor dropped 90 logs during monitor request") { + if strings.Contains(received, "[WARN] Monitor dropped") { close(passed) } } @@ -82,7 +82,7 @@ TEST: select { case <-passed: break TEST - case <-time.After(3 * time.Second): + case <-time.After(2 * time.Second): require.Fail(t, "expected to see warn dropped messages") } } From bb2a7f43385fe469e587348ebdb31ea0b15674ed Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Mon, 4 Nov 2019 14:17:15 -0500 Subject: [PATCH 29/34] address feedback, fix gauge metric name --- api/agent.go | 1 + client/agent_endpoint.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/api/agent.go b/api/agent.go index 2ec8ff233..d372b8062 100644 --- a/api/agent.go +++ b/api/agent.go @@ -263,6 +263,7 @@ func (a *Agent) Monitor(stopCh <-chan struct{}, q *QueryOptions) (<-chan *Stream for { select { case <-stopCh: + close(frames) return default: } diff --git a/client/agent_endpoint.go b/client/agent_endpoint.go index a95a5345d..c6c441d75 100644 --- a/client/agent_endpoint.go +++ b/client/agent_endpoint.go @@ -30,7 +30,7 @@ func NewAgentEndpoint(c *Client) *Agent { } func (m *Agent) monitor(conn io.ReadWriteCloser) { - defer metrics.MeasureSince([]string{"client", "monitor", "monitor"}, time.Now()) + defer metrics.MeasureSince([]string{"client", "agent", "monitor"}, time.Now()) defer conn.Close() // Decode arguments From 33ba36acbdd42c3c5e3c225fed7caf0f0c2d4fde Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Mon, 4 Nov 2019 14:32:53 -0500 Subject: [PATCH 30/34] log-json -> json fix typo command/agent/monitor/monitor.go Co-Authored-By: Chris Baker <1675087+cgbaker@users.noreply.github.com> Update command/agent/monitor/monitor.go Co-Authored-By: Chris Baker <1675087+cgbaker@users.noreply.github.com> address feedback, lock to prevent send on closed channel fix lock/unlock for dropped messages --- client/agent_endpoint.go | 7 ++----- command/agent/monitor/monitor.go | 34 +++++++++++++++++++++++--------- command/agent_monitor.go | 2 +- nomad/client_agent_endpoint.go | 7 ++----- 4 files changed, 30 insertions(+), 20 deletions(-) diff --git a/client/agent_endpoint.go b/client/agent_endpoint.go index c6c441d75..d22291cef 100644 --- a/client/agent_endpoint.go +++ b/client/agent_endpoint.go @@ -5,7 +5,6 @@ import ( "context" "errors" "io" - "strings" "time" "github.com/hashicorp/nomad/command/agent/monitor" @@ -159,9 +158,7 @@ OUTER: } if streamErr != nil { - // Nothing to do as conn is closed - if streamErr == io.EOF || strings.Contains(streamErr.Error(), "closed") { - return - } + handleStreamResultError(streamErr, helper.Int64ToPtr(500), encoder) + return } } diff --git a/command/agent/monitor/monitor.go b/command/agent/monitor/monitor.go index 471a2c451..60397b544 100644 --- a/command/agent/monitor/monitor.go +++ b/command/agent/monitor/monitor.go @@ -12,12 +12,22 @@ import ( // InterceptLogger and SinkAdapter. It allows streaming of logs // at a different log level than what is set on the logger. type Monitor struct { + // sync.Mutex protects droppedCount and logCh sync.Mutex - sink log.SinkAdapter - logger log.InterceptLogger - logCh chan []byte - droppedCount int - bufSize int + + sink log.SinkAdapter + + // logger is the logger we will be monitoring + logger log.InterceptLogger + logCh chan []byte + // droppedCount is the current count of messages + // that were dropped from the logCh buffer. + // only access under lock + droppedCount int + bufSize int + // droppedDuration is the amount of time we should + // wait to check for dropped messages. Defaults + // to 3 seconds droppedDuration time.Duration } @@ -38,9 +48,9 @@ func New(buf int, logger log.InterceptLogger, opts *log.LoggerOptions) *Monitor return sw } -// Start registers a sink on the monitors logger and starts sending +// Start registers a sink on the monitor's logger and starts sending // received log messages over the returned channel. A non-nil -// sopCh can be used to deregister the sink and stop log streaming +// stopCh can be used to de-register the sink and stop log streaming func (d *Monitor) Start(stopCh <-chan struct{}) <-chan []byte { d.logger.RegisterSink(d.sink) @@ -77,14 +87,16 @@ func (d *Monitor) Start(stopCh <-chan struct{}) <-chan []byte { break LOOP case <-time.After(d.droppedDuration): d.Lock() - defer d.Unlock() + // Check if there have been any dropped messages. if d.droppedCount > 0 { dropped := fmt.Sprintf("[WARN] Monitor dropped %d logs during monitor request\n", d.droppedCount) select { + // Try sending dropped message count to logCh in case + // there is room in the buffer now. case d.logCh <- []byte(dropped): default: - // Make room for dropped message + // Drop a log message to make room for "Monitor dropped.." message select { case <-d.logCh: d.droppedCount++ @@ -95,6 +107,7 @@ func (d *Monitor) Start(stopCh <-chan struct{}) <-chan []byte { } d.droppedCount = 0 } + d.Unlock() } } }() @@ -105,6 +118,9 @@ func (d *Monitor) Start(stopCh <-chan struct{}) <-chan []byte { // Write attempts to send latest log to logCh // it drops the log if channel is unavailable to receive func (d *Monitor) Write(p []byte) (n int, err error) { + d.Lock() + defer d.Unlock() + bytes := make([]byte, len(p)) copy(bytes, p) diff --git a/command/agent_monitor.go b/command/agent_monitor.go index b5ef2c28c..4927c5784 100644 --- a/command/agent_monitor.go +++ b/command/agent_monitor.go @@ -39,7 +39,7 @@ Monitor Specific Options: -node-id Sets the specific node to monitor - -log-json + -json Sets log output to JSON format ` return strings.TrimSpace(helpText) diff --git a/nomad/client_agent_endpoint.go b/nomad/client_agent_endpoint.go index a3da1aa39..3c4c15a53 100644 --- a/nomad/client_agent_endpoint.go +++ b/nomad/client_agent_endpoint.go @@ -7,7 +7,6 @@ import ( "fmt" "io" "net" - "strings" "time" log "github.com/hashicorp/go-hclog" @@ -163,10 +162,8 @@ OUTER: } if streamErr != nil { - // Nothing to do as conn is closed - if streamErr == io.EOF || strings.Contains(streamErr.Error(), "closed") { - return - } + handleStreamResultError(streamErr, helper.Int64ToPtr(500), encoder) + return } } From 79411c5e0ef765adb5cf4f8e3ad3e57dd514eab1 Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Tue, 5 Nov 2019 09:16:51 -0500 Subject: [PATCH 31/34] coordinate closing of doneCh, use interface to simplify callers comments --- client/agent_endpoint.go | 5 +-- command/agent/monitor/monitor.go | 60 +++++++++++++++++++++------ command/agent/monitor/monitor_test.go | 9 ++-- nomad/client_agent_endpoint.go | 5 +-- 4 files changed, 56 insertions(+), 23 deletions(-) diff --git a/client/agent_endpoint.go b/client/agent_endpoint.go index d22291cef..19df4732a 100644 --- a/client/agent_endpoint.go +++ b/client/agent_endpoint.go @@ -61,9 +61,7 @@ func (m *Agent) monitor(conn io.ReadWriteCloser) { return } - stopCh := make(chan struct{}) ctx, cancel := context.WithCancel(context.Background()) - defer close(stopCh) defer cancel() monitor := monitor.New(512, m.c.logger, &log.LoggerOptions{ @@ -93,7 +91,8 @@ func (m *Agent) monitor(conn io.ReadWriteCloser) { } }() - logCh := monitor.Start(stopCh) + logCh := monitor.Start() + defer monitor.Stop() initialOffset := int64(0) // receive logs and build frames diff --git a/command/agent/monitor/monitor.go b/command/agent/monitor/monitor.go index 60397b544..d85719712 100644 --- a/command/agent/monitor/monitor.go +++ b/command/agent/monitor/monitor.go @@ -11,15 +11,32 @@ import ( // Monitor provides a mechanism to stream logs using go-hclog // InterceptLogger and SinkAdapter. It allows streaming of logs // at a different log level than what is set on the logger. -type Monitor struct { - // sync.Mutex protects droppedCount and logCh +type Monitor interface { + // Start returns a channel of log messages which are sent + // ever time a log message occurs + Start() <-chan []byte + + // Stop de-registers the sink from the InterceptLogger + // and closes the log channels + Stop() +} + +// monitor implements the Monitor interface +type monitor struct { + // protects droppedCount and logCh sync.Mutex sink log.SinkAdapter // logger is the logger we will be monitoring logger log.InterceptLogger - logCh chan []byte + + // logCh is a buffered chan where we send logs when streaming + logCh chan []byte + + // doneCh coordinates the shutdown of logCh + doneCh chan struct{} + // droppedCount is the current count of messages // that were dropped from the logCh buffer. // only access under lock @@ -33,10 +50,15 @@ type Monitor struct { // New creates a new Monitor. Start must be called in order to actually start // streaming logs -func New(buf int, logger log.InterceptLogger, opts *log.LoggerOptions) *Monitor { - sw := &Monitor{ +func New(buf int, logger log.InterceptLogger, opts *log.LoggerOptions) Monitor { + return new(buf, logger, opts) +} + +func new(buf int, logger log.InterceptLogger, opts *log.LoggerOptions) *monitor { + sw := &monitor{ logger: logger, logCh: make(chan []byte, buf), + doneCh: make(chan struct{}, 1), bufSize: buf, droppedDuration: 3 * time.Second, } @@ -48,10 +70,14 @@ func New(buf int, logger log.InterceptLogger, opts *log.LoggerOptions) *Monitor return sw } +// Stop stops the monitoring process +func (d *monitor) Stop() { + close(d.doneCh) +} + // Start registers a sink on the monitor's logger and starts sending -// received log messages over the returned channel. A non-nil -// stopCh can be used to de-register the sink and stop log streaming -func (d *Monitor) Start(stopCh <-chan struct{}) <-chan []byte { +// received log messages over the returned channel. +func (d *monitor) Start() <-chan []byte { d.logger.RegisterSink(d.sink) streamCh := make(chan []byte, d.bufSize) @@ -61,13 +87,13 @@ func (d *Monitor) Start(stopCh <-chan struct{}) <-chan []byte { select { case log := <-d.logCh: select { - case <-stopCh: + case <-d.doneCh: d.logger.DeregisterSink(d.sink) close(d.logCh) return case streamCh <- log: } - case <-stopCh: + case <-d.doneCh: d.Lock() defer d.Unlock() @@ -83,7 +109,7 @@ func (d *Monitor) Start(stopCh <-chan struct{}) <-chan []byte { LOOP: for { select { - case <-stopCh: + case <-d.doneCh: break LOOP case <-time.After(d.droppedDuration): d.Lock() @@ -92,6 +118,8 @@ func (d *Monitor) Start(stopCh <-chan struct{}) <-chan []byte { if d.droppedCount > 0 { dropped := fmt.Sprintf("[WARN] Monitor dropped %d logs during monitor request\n", d.droppedCount) select { + case <-d.doneCh: + break LOOP // Try sending dropped message count to logCh in case // there is room in the buffer now. case d.logCh <- []byte(dropped): @@ -107,6 +135,7 @@ func (d *Monitor) Start(stopCh <-chan struct{}) <-chan []byte { } d.droppedCount = 0 } + // unlock after handling dropped message d.Unlock() } } @@ -117,10 +146,17 @@ func (d *Monitor) Start(stopCh <-chan struct{}) <-chan []byte { // Write attempts to send latest log to logCh // it drops the log if channel is unavailable to receive -func (d *Monitor) Write(p []byte) (n int, err error) { +func (d *monitor) Write(p []byte) (n int, err error) { d.Lock() defer d.Unlock() + // ensure logCh is still open + select { + case <-d.doneCh: + return + default: + } + bytes := make([]byte, len(p)) copy(bytes, p) diff --git a/command/agent/monitor/monitor_test.go b/command/agent/monitor/monitor_test.go index 2aa91d1bd..697b5bdc5 100644 --- a/command/agent/monitor/monitor_test.go +++ b/command/agent/monitor/monitor_test.go @@ -21,9 +21,8 @@ func TestMonitor_Start(t *testing.T) { Level: log.Debug, }) - closeCh := make(chan struct{}) - - logCh := m.Start(closeCh) + logCh := m.Start() + defer m.Stop() go func() { logger.Debug("test log") @@ -49,7 +48,7 @@ func TestMonitor_DroppedMessages(t *testing.T) { Level: log.Warn, }) - m := New(5, logger, &log.LoggerOptions{ + m := new(5, logger, &log.LoggerOptions{ Level: log.Debug, }) m.droppedDuration = 5 * time.Millisecond @@ -57,7 +56,7 @@ func TestMonitor_DroppedMessages(t *testing.T) { doneCh := make(chan struct{}) defer close(doneCh) - logCh := m.Start(doneCh) + logCh := m.Start() for i := 0; i <= 100; i++ { logger.Debug(fmt.Sprintf("test message %d", i)) diff --git a/nomad/client_agent_endpoint.go b/nomad/client_agent_endpoint.go index 3c4c15a53..bb9281441 100644 --- a/nomad/client_agent_endpoint.go +++ b/nomad/client_agent_endpoint.go @@ -65,9 +65,7 @@ func (m *Agent) monitor(conn io.ReadWriteCloser) { } // NodeID was empty, so monitor this current server - stopCh := make(chan struct{}) ctx, cancel := context.WithCancel(context.Background()) - defer close(stopCh) defer cancel() monitor := monitor.New(512, m.srv.logger, &log.LoggerOptions{ @@ -97,7 +95,8 @@ func (m *Agent) monitor(conn io.ReadWriteCloser) { } }() - logCh := monitor.Start(stopCh) + logCh := monitor.Start() + defer monitor.Stop() initialOffset := int64(0) // receive logs and build frames From 158517972bc7a8d9081ea6e970fd091aec14b22f Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Tue, 5 Nov 2019 10:34:02 -0500 Subject: [PATCH 32/34] wireup plain=true|false query param --- command/agent/agent_endpoint.go | 18 ++++++++++++--- command/agent/agent_endpoint_test.go | 33 ++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 3 deletions(-) diff --git a/command/agent/agent_endpoint.go b/command/agent/agent_endpoint.go index 8d19d3c2d..56c3631b7 100644 --- a/command/agent/agent_endpoint.go +++ b/command/agent/agent_endpoint.go @@ -188,12 +188,24 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) ( logJSON = parsed } + plainText := false + plainTextStr := req.URL.Query().Get("plain") + if plainTextStr != "" { + parsed, err := strconv.ParseBool(plainTextStr) + if err != nil { + return nil, CodedError(400, fmt.Sprintf("Unknown option for plain: %v", err)) + } + plainText = parsed + } + // Build the request and parse the ACL token args := cstructs.MonitorRequest{ - NodeID: nodeID, - LogLevel: logLevel, - LogJSON: logJSON, + NodeID: nodeID, + LogLevel: logLevel, + LogJSON: logJSON, + PlainText: plainText, } + s.parse(resp, req, &args.QueryOptions.Region, &args.QueryOptions) // Make the RPC diff --git a/command/agent/agent_endpoint_test.go b/command/agent/agent_endpoint_test.go index 504fc1634..572742fe0 100644 --- a/command/agent/agent_endpoint_test.go +++ b/command/agent/agent_endpoint_test.go @@ -314,6 +314,39 @@ func TestHTTP_AgentMonitor(t *testing.T) { }) } + // plain param set to true + { + req, err := http.NewRequest("GET", "/v1/agent/monitor?log_level=debug&plain=true", nil) + require.Nil(t, err) + resp := newClosableRecorder() + defer resp.Close() + + go func() { + _, err = s.Server.AgentMonitor(resp, req) + require.NoError(t, err) + }() + + // send the same log until monitor sink is set up + maxLogAttempts := 10 + tried := 0 + testutil.WaitForResult(func() (bool, error) { + if tried < maxLogAttempts { + s.Server.logger.Debug("log that should be sent") + tried++ + } + + got := resp.Body.String() + want := `[DEBUG] http: log that should be sent` + if strings.Contains(got, want) { + return true, nil + } + + return false, fmt.Errorf("missing expected log, got: %v, want: %v", got, want) + }, func(err error) { + require.Fail(t, err.Error()) + }) + } + // stream logs for a given node { req, err := http.NewRequest("GET", "/v1/agent/monitor?log_level=warn&node_id="+s.client.NodeID(), nil) From 8ccb770b00d3865bb4202f2f1ef7019d889e4261 Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Tue, 5 Nov 2019 11:01:50 -0500 Subject: [PATCH 33/34] simplify logch goroutine --- command/agent/monitor/monitor.go | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/command/agent/monitor/monitor.go b/command/agent/monitor/monitor.go index d85719712..2c28a09f6 100644 --- a/command/agent/monitor/monitor.go +++ b/command/agent/monitor/monitor.go @@ -81,29 +81,32 @@ func (d *monitor) Start() <-chan []byte { d.logger.RegisterSink(d.sink) streamCh := make(chan []byte, d.bufSize) + + // run a go routine that listens for streamed + // log messages and sends them to streamCh go func() { - defer close(streamCh) + defer func() { + d.logger.DeregisterSink(d.sink) + close(streamCh) + }() + for { select { case log := <-d.logCh: select { case <-d.doneCh: - d.logger.DeregisterSink(d.sink) - close(d.logCh) return case streamCh <- log: } case <-d.doneCh: - d.Lock() - defer d.Unlock() - - d.logger.DeregisterSink(d.sink) - close(d.logCh) return } } }() + // run a go routine that periodically checks for + // dropped messages and makes room on the logCh + // to add a dropped message count warning go func() { // loop and check for dropped messages LOOP: From 03f0aff0103e9508cc35040157e6cb2a6b6cba65 Mon Sep 17 00:00:00 2001 From: Drew Bailey <2614075+drewbailey@users.noreply.github.com> Date: Tue, 5 Nov 2019 11:12:55 -0500 Subject: [PATCH 34/34] unlock before returning, no need for label comment, trigger build return length written --- command/agent/monitor/monitor.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/command/agent/monitor/monitor.go b/command/agent/monitor/monitor.go index 2c28a09f6..d788110c1 100644 --- a/command/agent/monitor/monitor.go +++ b/command/agent/monitor/monitor.go @@ -70,14 +70,16 @@ func new(buf int, logger log.InterceptLogger, opts *log.LoggerOptions) *monitor return sw } -// Stop stops the monitoring process +// Stop deregisters the sink and stops the monitoring process func (d *monitor) Stop() { + d.logger.DeregisterSink(d.sink) close(d.doneCh) } // Start registers a sink on the monitor's logger and starts sending // received log messages over the returned channel. func (d *monitor) Start() <-chan []byte { + // register our sink with the logger d.logger.RegisterSink(d.sink) streamCh := make(chan []byte, d.bufSize) @@ -85,10 +87,7 @@ func (d *monitor) Start() <-chan []byte { // run a go routine that listens for streamed // log messages and sends them to streamCh go func() { - defer func() { - d.logger.DeregisterSink(d.sink) - close(streamCh) - }() + defer close(streamCh) for { select { @@ -109,11 +108,10 @@ func (d *monitor) Start() <-chan []byte { // to add a dropped message count warning go func() { // loop and check for dropped messages - LOOP: for { select { case <-d.doneCh: - break LOOP + return case <-time.After(d.droppedDuration): d.Lock() @@ -122,7 +120,8 @@ func (d *monitor) Start() <-chan []byte { dropped := fmt.Sprintf("[WARN] Monitor dropped %d logs during monitor request\n", d.droppedCount) select { case <-d.doneCh: - break LOOP + d.Unlock() + return // Try sending dropped message count to logCh in case // there is room in the buffer now. case d.logCh <- []byte(dropped): @@ -168,5 +167,6 @@ func (d *monitor) Write(p []byte) (n int, err error) { default: d.droppedCount++ } - return + + return len(p), nil }