From cc11d9a56357f2bded20fa7dd36475e2807254d8 Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Wed, 10 May 2017 17:39:45 -0700 Subject: [PATCH 01/36] Add new gc_max_allocs tuneable More than gc_max_allocs may be running on a node, but terminal allocs will be garbage collected to try to keep the total number below the limit. --- client/client.go | 19 ++- client/config/config.go | 5 + client/gc.go | 94 ++++++++--- client/gc_test.go | 152 ++++++++++++++---- client/task_runner_test.go | 5 +- command/agent/agent.go | 1 + command/agent/config-test-fixtures/basic.hcl | 1 + command/agent/config.go | 15 +- command/agent/config_parse.go | 1 + .../docs/agent/configuration/client.html.md | 6 + 10 files changed, 234 insertions(+), 65 deletions(-) diff --git a/client/client.go b/client/client.go index 59460bf11..c9beebae4 100644 --- a/client/client.go +++ b/client/client.go @@ -240,13 +240,15 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulServic // Add the garbage collector gcConfig := &GCConfig{ + MaxAllocs: cfg.GCMaxAllocs, DiskUsageThreshold: cfg.GCDiskUsageThreshold, InodeUsageThreshold: cfg.GCInodeUsageThreshold, Interval: cfg.GCInterval, ParallelDestroys: cfg.GCParallelDestroys, ReservedDiskMB: cfg.Node.Reserved.DiskMB, } - c.garbageCollector = NewAllocGarbageCollector(logger, statsCollector, gcConfig) + c.garbageCollector = NewAllocGarbageCollector(logger, statsCollector, c, gcConfig) + go c.garbageCollector.Run() // Setup the node if err := c.setupNode(); err != nil { @@ -482,17 +484,13 @@ func (c *Client) RPC(method string, args interface{}, reply interface{}) error { // Stats is used to return statistics for debugging and insight // for various sub-systems func (c *Client) Stats() map[string]map[string]string { - c.allocLock.RLock() - numAllocs := len(c.allocs) - c.allocLock.RUnlock() - c.heartbeatLock.Lock() defer c.heartbeatLock.Unlock() stats := map[string]map[string]string{ "client": map[string]string{ "node_id": c.Node().ID, "known_servers": c.servers.all().String(), - "num_allocations": strconv.Itoa(numAllocs), + "num_allocations": strconv.Itoa(c.NumAllocs()), "last_heartbeat": fmt.Sprintf("%v", time.Since(c.lastHeartbeat)), "heartbeat_ttl": fmt.Sprintf("%v", c.heartbeatTTL), }, @@ -722,6 +720,15 @@ func (c *Client) getAllocRunners() map[string]*AllocRunner { return runners } +// NumAllocs returns the number of allocs this client has. Used to +// fulfill the AllocCounter interface for the GC. +func (c *Client) NumAllocs() int { + c.allocLock.RLock() + n := len(c.allocs) + c.allocLock.RUnlock() + return n +} + // nodeID restores, or generates if necessary, a unique node ID and SecretID. // The node ID is, if available, a persistent unique ID. The secret ID is a // high-entropy random UUID. diff --git a/client/config/config.go b/client/config/config.go index 24e115f35..8d137948b 100644 --- a/client/config/config.go +++ b/client/config/config.go @@ -171,6 +171,10 @@ type Config struct { // beyond which the Nomad client triggers GC of the terminal allocations GCInodeUsageThreshold float64 + // GCMaxAllocs is the maximum number of allocations a node can have + // before garbage collection is triggered. + GCMaxAllocs int + // LogLevel is the level of the logs to putout LogLevel string @@ -205,6 +209,7 @@ func DefaultConfig() *Config { GCParallelDestroys: 2, GCDiskUsageThreshold: 80, GCInodeUsageThreshold: 70, + GCMaxAllocs: 200, } } diff --git a/client/gc.go b/client/gc.go index a07db1415..d6b58d777 100644 --- a/client/gc.go +++ b/client/gc.go @@ -18,6 +18,9 @@ const ( // GCConfig allows changing the behaviour of the garbage collector type GCConfig struct { + // MaxAllocs is the maximum number of allocations to track before a GC + // is triggered. + MaxAllocs int DiskUsageThreshold float64 InodeUsageThreshold float64 Interval time.Duration @@ -25,10 +28,17 @@ type GCConfig struct { ParallelDestroys int } +// AllocCounter is used by AllocGarbageCollector to discover how many +// allocations a node has and is generally fulfilled by the Client. +type AllocCounter interface { + NumAllocs() int +} + // AllocGarbageCollector garbage collects terminated allocations on a node type AllocGarbageCollector struct { allocRunners *IndexedGCAllocPQ statsCollector stats.NodeStatsCollector + allocCounter AllocCounter config *GCConfig logger *log.Logger destroyCh chan struct{} @@ -36,8 +46,9 @@ type AllocGarbageCollector struct { } // NewAllocGarbageCollector returns a garbage collector for terminated -// allocations on a node. -func NewAllocGarbageCollector(logger *log.Logger, statsCollector stats.NodeStatsCollector, config *GCConfig) *AllocGarbageCollector { +// allocations on a node. Must call Run() in a goroutine enable periodic +// garbage collection. +func NewAllocGarbageCollector(logger *log.Logger, statsCollector stats.NodeStatsCollector, ac AllocCounter, config *GCConfig) *AllocGarbageCollector { // Require at least 1 to make progress if config.ParallelDestroys <= 0 { logger.Printf("[WARN] client: garbage collector defaulting parallism to 1 due to invalid input value of %d", config.ParallelDestroys) @@ -47,17 +58,18 @@ func NewAllocGarbageCollector(logger *log.Logger, statsCollector stats.NodeStats gc := &AllocGarbageCollector{ allocRunners: NewIndexedGCAllocPQ(), statsCollector: statsCollector, + allocCounter: ac, config: config, logger: logger, destroyCh: make(chan struct{}, config.ParallelDestroys), shutdownCh: make(chan struct{}), } - go gc.run() return gc } -func (a *AllocGarbageCollector) run() { +// Run the periodic garbage collector. +func (a *AllocGarbageCollector) Run() { ticker := time.NewTicker(a.config.Interval) for { select { @@ -100,23 +112,33 @@ func (a *AllocGarbageCollector) keepUsageBelowThreshold() error { break } - if diskStats.UsedPercent <= a.config.DiskUsageThreshold && - diskStats.InodesUsedPercent <= a.config.InodeUsageThreshold { + reason := "" + + switch { + case diskStats.UsedPercent > a.config.DiskUsageThreshold: + reason = fmt.Sprintf("disk usage of %.0f is over gc threshold of %.0f", + diskStats.UsedPercent, a.config.DiskUsageThreshold) + case diskStats.InodesUsedPercent > a.config.InodeUsageThreshold: + reason = fmt.Sprintf("inode usage of %.0f is over gc threshold of %.0f", + diskStats.InodesUsedPercent, a.config.InodeUsageThreshold) + case a.numAllocs() > a.config.MaxAllocs: + reason = fmt.Sprintf("number of allocations is over the limit (%d)", a.config.MaxAllocs) + } + + // No reason to gc, exit + if reason == "" { break } // Collect an allocation gcAlloc := a.allocRunners.Pop() if gcAlloc == nil { + a.logger.Printf("[WARN] client: garbage collection due to %s skipped because no terminal allocations", reason) break } - ar := gcAlloc.allocRunner - alloc := ar.Alloc() - a.logger.Printf("[INFO] client: garbage collecting allocation %v", alloc.ID) - // Destroy the alloc runner and wait until it exits - a.destroyAllocRunner(ar) + a.destroyAllocRunner(gcAlloc.allocRunner, reason) } return nil } @@ -124,7 +146,13 @@ func (a *AllocGarbageCollector) keepUsageBelowThreshold() error { // destroyAllocRunner is used to destroy an allocation runner. It will acquire a // lock to restrict parallelism and then destroy the alloc runner, returning // once the allocation has been destroyed. -func (a *AllocGarbageCollector) destroyAllocRunner(ar *AllocRunner) { +func (a *AllocGarbageCollector) destroyAllocRunner(ar *AllocRunner, reason string) { + id := "" + if alloc := ar.Alloc(); alloc != nil { + id = alloc.ID + } + a.logger.Printf("[INFO] client: garbage collecting allocation %s due to %s", id, reason) + // Acquire the destroy lock select { case <-a.shutdownCh: @@ -155,11 +183,7 @@ func (a *AllocGarbageCollector) Collect(allocID string) error { if err != nil { return fmt.Errorf("unable to collect allocation %q: %v", allocID, err) } - - ar := gcAlloc.allocRunner - a.logger.Printf("[INFO] client: garbage collecting allocation %q", ar.Alloc().ID) - - a.destroyAllocRunner(ar) + a.destroyAllocRunner(gcAlloc.allocRunner, "forced collection") return nil } @@ -177,9 +201,7 @@ func (a *AllocGarbageCollector) CollectAll() error { break } - ar := gcAlloc.allocRunner - a.logger.Printf("[INFO] client: garbage collecting alloc runner for alloc %q", ar.Alloc().ID) - go a.destroyAllocRunner(ar) + go a.destroyAllocRunner(gcAlloc.allocRunner, "forced full collection") } return nil } @@ -187,6 +209,26 @@ func (a *AllocGarbageCollector) CollectAll() error { // MakeRoomFor garbage collects enough number of allocations in the terminal // state to make room for new allocations func (a *AllocGarbageCollector) MakeRoomFor(allocations []*structs.Allocation) error { + // GC allocs until below the max limit + the new allocations + max := a.config.MaxAllocs - len(allocations) + for a.numAllocs() > max { + select { + case <-a.shutdownCh: + return nil + default: + } + + gcAlloc := a.allocRunners.Pop() + if gcAlloc == nil { + // It's fine if we can't lower below the limit here as + // we'll keep trying to drop below the limit with each + // periodic gc + break + } + + // Destroy the alloc runner and wait until it exits + a.destroyAllocRunner(gcAlloc.allocRunner, "new allocations") + } totalResource := &structs.Resources{} for _, alloc := range allocations { if err := totalResource.Add(alloc.Resources); err != nil { @@ -244,10 +286,9 @@ func (a *AllocGarbageCollector) MakeRoomFor(allocations []*structs.Allocation) e ar := gcAlloc.allocRunner alloc := ar.Alloc() - a.logger.Printf("[INFO] client: garbage collecting allocation %v", alloc.ID) // Destroy the alloc runner and wait until it exits - a.destroyAllocRunner(ar) + a.destroyAllocRunner(ar, fmt.Sprintf("freeing %d MB for new allocations", alloc.Resources.DiskMB)) // Call stats collect again diskCleared += alloc.Resources.DiskMB @@ -261,8 +302,7 @@ func (a *AllocGarbageCollector) MarkForCollection(ar *AllocRunner) error { return fmt.Errorf("nil allocation runner inserted for garbage collection") } if ar.Alloc() == nil { - a.logger.Printf("[INFO] client: alloc is nil, so garbage collecting") - a.destroyAllocRunner(ar) + a.destroyAllocRunner(ar, "alloc is nil") } a.logger.Printf("[INFO] client: marking allocation %v for GC", ar.Alloc().ID) @@ -281,6 +321,12 @@ func (a *AllocGarbageCollector) Remove(ar *AllocRunner) { } } +// numAllocs returns the total number of allocs tracked by the client as well +// as those marked for GC. +func (a *AllocGarbageCollector) numAllocs() int { + return a.allocRunners.Length() + a.allocCounter.NumAllocs() +} + // GCAlloc wraps an allocation runner and an index enabling it to be used within // a PQ type GCAlloc struct { diff --git a/client/gc_test.go b/client/gc_test.go index f4fdedfbf..e132a8e9f 100644 --- a/client/gc_test.go +++ b/client/gc_test.go @@ -1,8 +1,6 @@ package client import ( - "log" - "os" "testing" "time" @@ -11,11 +9,14 @@ import ( "github.com/hashicorp/nomad/nomad/structs" ) -var gcConfig = GCConfig{ - DiskUsageThreshold: 80, - InodeUsageThreshold: 70, - Interval: 1 * time.Minute, - ReservedDiskMB: 0, +func gcConfig() *GCConfig { + return &GCConfig{ + DiskUsageThreshold: 80, + InodeUsageThreshold: 70, + Interval: 1 * time.Minute, + ReservedDiskMB: 0, + MaxAllocs: 100, + } } func TestIndexedGCAllocPQ(t *testing.T) { @@ -57,6 +58,15 @@ func TestIndexedGCAllocPQ(t *testing.T) { } } +// MockAllocCounter implements AllocCounter interface. +type MockAllocCounter struct { + allocs int +} + +func (m *MockAllocCounter) NumAllocs() int { + return m.allocs +} + type MockStatsCollector struct { availableValues []uint64 usedPercents []float64 @@ -90,8 +100,8 @@ func (m *MockStatsCollector) Stats() *stats.HostStats { } func TestAllocGarbageCollector_MarkForCollection(t *testing.T) { - logger := log.New(os.Stdout, "", 0) - gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &gcConfig) + logger := testLogger() + gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig()) _, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false) if err := gc.MarkForCollection(ar1); err != nil { @@ -105,8 +115,8 @@ func TestAllocGarbageCollector_MarkForCollection(t *testing.T) { } func TestAllocGarbageCollector_Collect(t *testing.T) { - logger := log.New(os.Stdout, "", 0) - gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &gcConfig) + logger := testLogger() + gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig()) _, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false) _, ar2 := testAllocRunnerFromAlloc(mock.Alloc(), false) @@ -131,8 +141,8 @@ func TestAllocGarbageCollector_Collect(t *testing.T) { } func TestAllocGarbageCollector_CollectAll(t *testing.T) { - logger := log.New(os.Stdout, "", 0) - gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &gcConfig) + logger := testLogger() + gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig()) _, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false) _, ar2 := testAllocRunnerFromAlloc(mock.Alloc(), false) @@ -153,10 +163,11 @@ func TestAllocGarbageCollector_CollectAll(t *testing.T) { } func TestAllocGarbageCollector_MakeRoomForAllocations_EnoughSpace(t *testing.T) { - logger := log.New(os.Stdout, "", 0) + logger := testLogger() statsCollector := &MockStatsCollector{} - gcConfig.ReservedDiskMB = 20 - gc := NewAllocGarbageCollector(logger, statsCollector, &gcConfig) + conf := gcConfig() + conf.ReservedDiskMB = 20 + gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf) _, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false) close(ar1.waitCh) @@ -190,10 +201,11 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_EnoughSpace(t *testing.T) } func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Partial(t *testing.T) { - logger := log.New(os.Stdout, "", 0) + logger := testLogger() statsCollector := &MockStatsCollector{} - gcConfig.ReservedDiskMB = 20 - gc := NewAllocGarbageCollector(logger, statsCollector, &gcConfig) + conf := gcConfig() + conf.ReservedDiskMB = 20 + gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf) _, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false) close(ar1.waitCh) @@ -228,10 +240,11 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Partial(t *testing.T) { } func TestAllocGarbageCollector_MakeRoomForAllocations_GC_All(t *testing.T) { - logger := log.New(os.Stdout, "", 0) + logger := testLogger() statsCollector := &MockStatsCollector{} - gcConfig.ReservedDiskMB = 20 - gc := NewAllocGarbageCollector(logger, statsCollector, &gcConfig) + conf := gcConfig() + conf.ReservedDiskMB = 20 + gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf) _, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false) close(ar1.waitCh) @@ -262,10 +275,11 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_All(t *testing.T) { } func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Fallback(t *testing.T) { - logger := log.New(os.Stdout, "", 0) + logger := testLogger() statsCollector := &MockStatsCollector{} - gcConfig.ReservedDiskMB = 20 - gc := NewAllocGarbageCollector(logger, statsCollector, &gcConfig) + conf := gcConfig() + conf.ReservedDiskMB = 20 + gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf) _, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false) close(ar1.waitCh) @@ -294,11 +308,49 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Fallback(t *testing.T) } } +func TestAllocGarbageCollector_MakeRoomForAllocations_MaxAllocs(t *testing.T) { + const ( + liveAllocs = 3 + maxAllocs = 6 + gcAllocs = 4 + gcAllocsLeft = 1 + ) + + logger := testLogger() + statsCollector := &MockStatsCollector{ + availableValues: []uint64{10 * 1024 * MB}, + usedPercents: []float64{0}, + inodePercents: []float64{0}, + } + allocCounter := &MockAllocCounter{allocs: liveAllocs} + conf := gcConfig() + conf.MaxAllocs = maxAllocs + gc := NewAllocGarbageCollector(logger, statsCollector, allocCounter, conf) + + for i := 0; i < gcAllocs; i++ { + _, ar := testAllocRunnerFromAlloc(mock.Alloc(), false) + close(ar.waitCh) + if err := gc.MarkForCollection(ar); err != nil { + t.Fatalf("error marking alloc for gc: %v", err) + } + } + + if err := gc.MakeRoomFor([]*structs.Allocation{mock.Alloc(), mock.Alloc()}); err != nil { + t.Fatalf("error making room for 2 new allocs: %v", err) + } + + // There should be gcAllocsLeft alloc runners left to be collected + if n := len(gc.allocRunners.index); n != gcAllocsLeft { + t.Fatalf("expected %d remaining GC-able alloc runners but found %d", gcAllocsLeft, n) + } +} + func TestAllocGarbageCollector_UsageBelowThreshold(t *testing.T) { - logger := log.New(os.Stdout, "", 0) + logger := testLogger() statsCollector := &MockStatsCollector{} - gcConfig.ReservedDiskMB = 20 - gc := NewAllocGarbageCollector(logger, statsCollector, &gcConfig) + conf := gcConfig() + conf.ReservedDiskMB = 20 + gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf) _, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false) close(ar1.waitCh) @@ -329,10 +381,11 @@ func TestAllocGarbageCollector_UsageBelowThreshold(t *testing.T) { } func TestAllocGarbageCollector_UsedPercentThreshold(t *testing.T) { - logger := log.New(os.Stdout, "", 0) + logger := testLogger() statsCollector := &MockStatsCollector{} - gcConfig.ReservedDiskMB = 20 - gc := NewAllocGarbageCollector(logger, statsCollector, &gcConfig) + conf := gcConfig() + conf.ReservedDiskMB = 20 + gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf) _, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false) close(ar1.waitCh) @@ -363,3 +416,40 @@ func TestAllocGarbageCollector_UsedPercentThreshold(t *testing.T) { t.Fatalf("gcAlloc: %v", gcAlloc) } } + +func TestAllocGarbageCollector_MaxAllocsThreshold(t *testing.T) { + const ( + liveAllocs = 3 + maxAllocs = 6 + gcAllocs = 4 + gcAllocsLeft = 1 + ) + + logger := testLogger() + statsCollector := &MockStatsCollector{ + availableValues: []uint64{1000}, + usedPercents: []float64{0}, + inodePercents: []float64{0}, + } + allocCounter := &MockAllocCounter{allocs: liveAllocs} + conf := gcConfig() + conf.MaxAllocs = 4 + gc := NewAllocGarbageCollector(logger, statsCollector, allocCounter, conf) + + for i := 0; i < gcAllocs; i++ { + _, ar := testAllocRunnerFromAlloc(mock.Alloc(), false) + close(ar.waitCh) + if err := gc.MarkForCollection(ar); err != nil { + t.Fatalf("error marking alloc for gc: %v", err) + } + } + + if err := gc.keepUsageBelowThreshold(); err != nil { + t.Fatalf("error gc'ing: %v", err) + } + + // We should have gc'd down to MaxAllocs + if n := len(gc.allocRunners.index); n != gcAllocsLeft { + t.Fatalf("expected remaining gc allocs (%d) to equal %d", n, gcAllocsLeft) + } +} diff --git a/client/task_runner_test.go b/client/task_runner_test.go index cfac5ea42..51117d8d0 100644 --- a/client/task_runner_test.go +++ b/client/task_runner_test.go @@ -31,7 +31,10 @@ func testLogger() *log.Logger { } func prefixedTestLogger(prefix string) *log.Logger { - return log.New(os.Stderr, prefix, log.LstdFlags) + if testing.Verbose() { + return log.New(os.Stderr, prefix, log.LstdFlags) + } + return log.New(ioutil.Discard, "", 0) } type MockTaskStateUpdater struct { diff --git a/command/agent/agent.go b/command/agent/agent.go index d09ee4147..45912712f 100644 --- a/command/agent/agent.go +++ b/command/agent/agent.go @@ -321,6 +321,7 @@ func (a *Agent) clientConfig() (*clientconfig.Config, error) { conf.GCParallelDestroys = a.config.Client.GCParallelDestroys conf.GCDiskUsageThreshold = a.config.Client.GCDiskUsageThreshold conf.GCInodeUsageThreshold = a.config.Client.GCInodeUsageThreshold + conf.GCMaxAllocs = a.config.Client.GCMaxAllocs conf.NoHostUUID = a.config.Client.NoHostUUID return conf, nil diff --git a/command/agent/config-test-fixtures/basic.hcl b/command/agent/config-test-fixtures/basic.hcl index 8d4880a7d..d75ca579c 100644 --- a/command/agent/config-test-fixtures/basic.hcl +++ b/command/agent/config-test-fixtures/basic.hcl @@ -58,6 +58,7 @@ client { gc_parallel_destroys = 6 gc_disk_usage_threshold = 82 gc_inode_usage_threshold = 91 + gc_max_allocs = 200 no_host_uuid = true } server { diff --git a/command/agent/config.go b/command/agent/config.go index cccd095f1..e8ee9117d 100644 --- a/command/agent/config.go +++ b/command/agent/config.go @@ -209,14 +209,18 @@ type ClientConfig struct { // collector will allow. GCParallelDestroys int `mapstructure:"gc_parallel_destroys"` - // GCInodeUsageThreshold is the inode usage threshold beyond which the Nomad - // client triggers GC of the terminal allocations + // GCDiskUsageThreshold is the disk usage threshold given as a percent + // beyond which the Nomad client triggers GC of terminal allocations GCDiskUsageThreshold float64 `mapstructure:"gc_disk_usage_threshold"` // GCInodeUsageThreshold is the inode usage threshold beyond which the Nomad // client triggers GC of the terminal allocations GCInodeUsageThreshold float64 `mapstructure:"gc_inode_usage_threshold"` + // GCMaxAllocs is the maximum number of allocations a node can have + // before garbage collection is triggered. + GCMaxAllocs int `mapstructure:"gc_max_allocs"` + // NoHostUUID disables using the host's UUID and will force generation of a // random UUID. NoHostUUID bool `mapstructure:"no_host_uuid"` @@ -503,6 +507,7 @@ func DevConfig() *Config { conf.Client.GCInterval = 10 * time.Minute conf.Client.GCDiskUsageThreshold = 99 conf.Client.GCInodeUsageThreshold = 99 + conf.Client.GCMaxAllocs = 200 return conf } @@ -532,8 +537,9 @@ func DefaultConfig() *Config { Reserved: &Resources{}, GCInterval: 1 * time.Minute, GCParallelDestroys: 2, - GCInodeUsageThreshold: 70, GCDiskUsageThreshold: 80, + GCInodeUsageThreshold: 70, + GCMaxAllocs: 200, }, Server: &ServerConfig{ Enabled: false, @@ -949,6 +955,9 @@ func (a *ClientConfig) Merge(b *ClientConfig) *ClientConfig { if b.GCInodeUsageThreshold != 0 { result.GCInodeUsageThreshold = b.GCInodeUsageThreshold } + if b.GCMaxAllocs != 0 { + result.GCMaxAllocs = b.GCMaxAllocs + } if b.NoHostUUID { result.NoHostUUID = b.NoHostUUID } diff --git a/command/agent/config_parse.go b/command/agent/config_parse.go index 403f5b75b..1d0f387bf 100644 --- a/command/agent/config_parse.go +++ b/command/agent/config_parse.go @@ -346,6 +346,7 @@ func parseClient(result **ClientConfig, list *ast.ObjectList) error { "gc_disk_usage_threshold", "gc_inode_usage_threshold", "gc_parallel_destroys", + "gc_max_allocs", "no_host_uuid", } if err := checkHCLKeys(listVal, valid); err != nil { diff --git a/website/source/docs/agent/configuration/client.html.md b/website/source/docs/agent/configuration/client.html.md index 1ce6fe77f..9d29da485 100644 --- a/website/source/docs/agent/configuration/client.html.md +++ b/website/source/docs/agent/configuration/client.html.md @@ -100,6 +100,12 @@ client { - `gc_inode_usage_threshold` `(float: 70)` - Specifies the inode usage percent which Nomad tries to maintain by garbage collecting terminal allocations. +- `gc_max_allocs` `(int: 200)` - Specifies the maximum number of allocations + which a client will track before triggering a garbage collection of terminal + allocations. This will *not* limit the number of allocations a node can run at + a time, however after `gc_max_allocs` every new allocation will cause terminal + allocations to be GC'd. + - `gc_parallel_destroys` `(int: 2)` - Specifies the maximum number of parallel destroys allowed by the garbage collector. This value should be relatively low to avoid high resource usage during garbage collections. From e3c1d35111876d4d69a78c7877b54d95c6a16f7a Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Fri, 12 May 2017 15:57:27 -0700 Subject: [PATCH 02/36] Lower default gc_max_allocs to 50 --- client/config/config.go | 2 +- command/agent/config-test-fixtures/basic.hcl | 2 +- command/agent/config.go | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/client/config/config.go b/client/config/config.go index 8d137948b..db4d8233c 100644 --- a/client/config/config.go +++ b/client/config/config.go @@ -209,7 +209,7 @@ func DefaultConfig() *Config { GCParallelDestroys: 2, GCDiskUsageThreshold: 80, GCInodeUsageThreshold: 70, - GCMaxAllocs: 200, + GCMaxAllocs: 50, } } diff --git a/command/agent/config-test-fixtures/basic.hcl b/command/agent/config-test-fixtures/basic.hcl index d75ca579c..006a4340c 100644 --- a/command/agent/config-test-fixtures/basic.hcl +++ b/command/agent/config-test-fixtures/basic.hcl @@ -58,7 +58,7 @@ client { gc_parallel_destroys = 6 gc_disk_usage_threshold = 82 gc_inode_usage_threshold = 91 - gc_max_allocs = 200 + gc_max_allocs = 50 no_host_uuid = true } server { diff --git a/command/agent/config.go b/command/agent/config.go index e8ee9117d..a96202d3d 100644 --- a/command/agent/config.go +++ b/command/agent/config.go @@ -507,7 +507,7 @@ func DevConfig() *Config { conf.Client.GCInterval = 10 * time.Minute conf.Client.GCDiskUsageThreshold = 99 conf.Client.GCInodeUsageThreshold = 99 - conf.Client.GCMaxAllocs = 200 + conf.Client.GCMaxAllocs = 50 return conf } @@ -539,7 +539,7 @@ func DefaultConfig() *Config { GCParallelDestroys: 2, GCDiskUsageThreshold: 80, GCInodeUsageThreshold: 70, - GCMaxAllocs: 200, + GCMaxAllocs: 50, }, Server: &ServerConfig{ Enabled: false, From fb72f20bb165a021242522d1d2ccdf8cb389f7a3 Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Fri, 12 May 2017 16:03:22 -0700 Subject: [PATCH 03/36] gc_max_allocs should include blocked & migrating --- client/client.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/client/client.go b/client/client.go index c9beebae4..a4b751b24 100644 --- a/client/client.go +++ b/client/client.go @@ -724,7 +724,13 @@ func (c *Client) getAllocRunners() map[string]*AllocRunner { // fulfill the AllocCounter interface for the GC. func (c *Client) NumAllocs() int { c.allocLock.RLock() + c.blockedAllocsLock.Lock() + c.migratingAllocsLock.Lock() n := len(c.allocs) + n += len(c.blockedAllocations) + n += len(c.migratingAllocs) + c.migratingAllocsLock.Unlock() + c.blockedAllocsLock.Unlock() c.allocLock.RUnlock() return n } From 0d76cd24bb3faa77aaa369e90ac8cac6c6917e08 Mon Sep 17 00:00:00 2001 From: Ian Eyberg Date: Thu, 25 May 2017 11:49:33 -0700 Subject: [PATCH 04/36] dont throw away errors in log rotation --- client/driver/logging/rotator.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/client/driver/logging/rotator.go b/client/driver/logging/rotator.go index 5cf200ee2..7494ad09c 100644 --- a/client/driver/logging/rotator.go +++ b/client/driver/logging/rotator.go @@ -220,6 +220,7 @@ func (f *FileRotator) purgeOldFiles() { var fIndexes []int files, err := ioutil.ReadDir(f.path) if err != nil { + f.logger.Printf("[ERROR] driver.rotator: error getting directory listing: %v", err) return } // Inserting all the rotated files in a slice @@ -228,6 +229,7 @@ func (f *FileRotator) purgeOldFiles() { fileIdx := strings.TrimPrefix(fi.Name(), fmt.Sprintf("%s.", f.baseFileName)) n, err := strconv.Atoi(fileIdx) if err != nil { + f.logger.Printf("[ERROR] driver.rotator: error extracting file index: %v", err) continue } fIndexes = append(fIndexes, n) @@ -246,7 +248,10 @@ func (f *FileRotator) purgeOldFiles() { toDelete := fIndexes[0 : len(fIndexes)-f.MaxFiles] for _, fIndex := range toDelete { fname := filepath.Join(f.path, fmt.Sprintf("%s.%d", f.baseFileName, fIndex)) - os.RemoveAll(fname) + err := os.RemoveAll(fname) + if err != nil { + f.logger.Printf("[ERROR] driver.rotator: error removing file: %v", err) + } } f.oldestLogFileIdx = fIndexes[0] case <-f.doneCh: From f6ea22302c628545e146230541b5754db7615c47 Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Tue, 30 May 2017 11:39:12 -0700 Subject: [PATCH 05/36] Update docs to match gc_max_allocs default --- website/source/docs/agent/configuration/client.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/agent/configuration/client.html.md b/website/source/docs/agent/configuration/client.html.md index 9d29da485..b817c154b 100644 --- a/website/source/docs/agent/configuration/client.html.md +++ b/website/source/docs/agent/configuration/client.html.md @@ -100,7 +100,7 @@ client { - `gc_inode_usage_threshold` `(float: 70)` - Specifies the inode usage percent which Nomad tries to maintain by garbage collecting terminal allocations. -- `gc_max_allocs` `(int: 200)` - Specifies the maximum number of allocations +- `gc_max_allocs` `(int: 50)` - Specifies the maximum number of allocations which a client will track before triggering a garbage collection of terminal allocations. This will *not* limit the number of allocations a node can run at a time, however after `gc_max_allocs` every new allocation will cause terminal From f2476cfa671f63a1eeaae90d9fa8667cbae0eb63 Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Tue, 30 May 2017 11:39:26 -0700 Subject: [PATCH 06/36] Fix config parsing test Went overboard before I realized there's only one test case. --- command/agent/config_parse_test.go | 31 +++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/command/agent/config_parse_test.go b/command/agent/config_parse_test.go index 9774b0db3..adff7035b 100644 --- a/command/agent/config_parse_test.go +++ b/command/agent/config_parse_test.go @@ -3,10 +3,12 @@ package agent import ( "path/filepath" "reflect" + "strings" "testing" "time" "github.com/hashicorp/nomad/nomad/structs/config" + "github.com/kr/pretty" ) func TestConfig_Parse(t *testing.T) { @@ -75,6 +77,7 @@ func TestConfig_Parse(t *testing.T) { GCParallelDestroys: 6, GCDiskUsageThreshold: 82, GCInodeUsageThreshold: 91, + GCMaxAllocs: 50, NoHostUUID: true, }, Server: &ServerConfig{ @@ -165,22 +168,20 @@ func TestConfig_Parse(t *testing.T) { } for _, tc := range cases { - t.Logf("Testing parse: %s", tc.File) + t.Run(tc.File, func(t *testing.T) { + path, err := filepath.Abs(filepath.Join("./config-test-fixtures", tc.File)) + if err != nil { + t.Fatalf("file: %s\n\n%s", tc.File, err) + } - path, err := filepath.Abs(filepath.Join("./config-test-fixtures", tc.File)) - if err != nil { - t.Fatalf("file: %s\n\n%s", tc.File, err) - continue - } + actual, err := ParseConfigFile(path) + if (err != nil) != tc.Err { + t.Fatalf("file: %s\n\n%s", tc.File, err) + } - actual, err := ParseConfigFile(path) - if (err != nil) != tc.Err { - t.Fatalf("file: %s\n\n%s", tc.File, err) - continue - } - - if !reflect.DeepEqual(actual, tc.Result) { - t.Fatalf("file: %s\n\n%#v\n\n%#v", tc.File, actual, tc.Result) - } + if !reflect.DeepEqual(actual, tc.Result) { + t.Errorf("file: %s diff: (actual vs expected)\n\n%s", tc.File, strings.Join(pretty.Diff(actual, tc.Result), "\n")) + } + }) } } From f079b3ee7e5281198aa64885acc0cfd6c946bf37 Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Tue, 16 May 2017 12:19:16 -0700 Subject: [PATCH 07/36] Update consul/api and fix tls handling Since I was already fixing consul's tls handling in #2645 I decided to update consul/api and pre-emptively fix our tls handling against the newest consul/api behavior. consul/api's handling of http.Transports has improved but would have broken how we handled tls (again). This would have made for a nasty surprise the next time we updated consul/api. --- nomad/structs/config/consul.go | 12 +++++ vendor/github.com/hashicorp/consul/api/acl.go | 35 ++++++++++++ vendor/github.com/hashicorp/consul/api/api.go | 53 +++++++++++++------ .../hashicorp/consul/api/catalog.go | 2 + .../github.com/hashicorp/consul/api/health.go | 1 + vendor/github.com/hashicorp/consul/api/kv.go | 23 ++++---- .../github.com/hashicorp/consul/api/lock.go | 29 +++++----- .../hashicorp/consul/api/semaphore.go | 29 +++++----- vendor/vendor.json | 6 +-- 9 files changed, 131 insertions(+), 59 deletions(-) diff --git a/nomad/structs/config/consul.go b/nomad/structs/config/consul.go index 17d2218cf..4360502c5 100644 --- a/nomad/structs/config/consul.go +++ b/nomad/structs/config/consul.go @@ -1,6 +1,7 @@ package config import ( + "net/http" "strings" "time" @@ -144,6 +145,8 @@ func (a *ConsulConfig) Merge(b *ConsulConfig) *ConsulConfig { // ApiConfig returns a usable Consul config that can be passed directly to // hashicorp/consul/api. NOTE: datacenter is not set func (c *ConsulConfig) ApiConfig() (*consul.Config, error) { + // Get the default config from consul to reuse things like the default + // http.Transport. config := consul.DefaultConfig() if c.Addr != "" { config.Address = c.Addr @@ -152,7 +155,11 @@ func (c *ConsulConfig) ApiConfig() (*consul.Config, error) { config.Token = c.Token } if c.Timeout != 0 { + if config.HttpClient == nil { + config.HttpClient = &http.Client{} + } config.HttpClient.Timeout = c.Timeout + config.HttpClient.Transport = config.Transport } if c.Auth != "" { var username, password string @@ -180,6 +187,11 @@ func (c *ConsulConfig) ApiConfig() (*consul.Config, error) { if c.VerifySSL != nil { config.TLSConfig.InsecureSkipVerify = !*c.VerifySSL } + tlsConfig, err := consul.SetupTLSConfig(&config.TLSConfig) + if err != nil { + return nil, err + } + config.Transport.TLSClientConfig = tlsConfig } return config, nil diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go index c3fb0d53a..15d1f9f5a 100644 --- a/vendor/github.com/hashicorp/consul/api/acl.go +++ b/vendor/github.com/hashicorp/consul/api/acl.go @@ -1,5 +1,9 @@ package api +import ( + "time" +) + const ( // ACLCLientType is the client type token ACLClientType = "client" @@ -18,6 +22,16 @@ type ACLEntry struct { Rules string } +// ACLReplicationStatus is used to represent the status of ACL replication. +type ACLReplicationStatus struct { + Enabled bool + Running bool + SourceDatacenter string + ReplicatedIndex uint64 + LastSuccess time.Time + LastError time.Time +} + // ACL can be used to query the ACL endpoints type ACL struct { c *Client @@ -138,3 +152,24 @@ func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) { } return entries, qm, nil } + +// Replication returns the status of the ACL replication process in the datacenter +func (a *ACL) Replication(q *QueryOptions) (*ACLReplicationStatus, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/replication") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries *ACLReplicationStatus + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go index a2a9e89b6..2225dd3bf 100644 --- a/vendor/github.com/hashicorp/consul/api/api.go +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -168,6 +168,9 @@ type Config struct { // Datacenter to use. If not provided, the default agent datacenter is used. Datacenter string + // Transport is the Transport to use for the http client. + Transport *http.Transport + // HttpClient is the client to use. Default will be // used if not provided. HttpClient *http.Client @@ -237,11 +240,9 @@ func DefaultNonPooledConfig() *Config { // given function to make the transport. func defaultConfig(transportFn func() *http.Transport) *Config { config := &Config{ - Address: "127.0.0.1:8500", - Scheme: "http", - HttpClient: &http.Client{ - Transport: transportFn(), - }, + Address: "127.0.0.1:8500", + Scheme: "http", + Transport: transportFn(), } if addr := os.Getenv(HTTPAddrEnvName); addr != "" { @@ -364,6 +365,10 @@ func NewClient(config *Config) (*Client, error) { config.Scheme = defConfig.Scheme } + if config.Transport == nil { + config.Transport = defConfig.Transport + } + if config.HttpClient == nil { config.HttpClient = defConfig.HttpClient } @@ -392,17 +397,14 @@ func NewClient(config *Config) (*Client, error) { config.TLSConfig.InsecureSkipVerify = defConfig.TLSConfig.InsecureSkipVerify } - tlsClientConfig, err := SetupTLSConfig(&config.TLSConfig) - - // We don't expect this to fail given that we aren't - // parsing any of the input, but we panic just in case - // since this doesn't have an error return. - if err != nil { - return nil, err + if config.HttpClient == nil { + var err error + config.HttpClient, err = NewHttpClient(config.Transport, config.TLSConfig) + if err != nil { + return nil, err + } } - config.HttpClient.Transport.(*http.Transport).TLSClientConfig = tlsClientConfig - parts := strings.SplitN(config.Address, "://", 2) if len(parts) == 2 { switch parts[0] { @@ -429,6 +431,23 @@ func NewClient(config *Config) (*Client, error) { return client, nil } +// NewHttpClient returns an http client configured with the given Transport and TLS +// config. +func NewHttpClient(transport *http.Transport, tlsConf TLSConfig) (*http.Client, error) { + tlsClientConfig, err := SetupTLSConfig(&tlsConf) + + if err != nil { + return nil, err + } + + transport.TLSClientConfig = tlsClientConfig + client := &http.Client{ + Transport: transport, + } + + return client, nil +} + // request is used to help build up a request type request struct { config *Config @@ -528,11 +547,11 @@ func (r *request) toHTTP() (*http.Request, error) { // Check if we should encode the body if r.body == nil && r.obj != nil { - if b, err := encodeBody(r.obj); err != nil { + b, err := encodeBody(r.obj) + if err != nil { return nil, err - } else { - r.body = b } + r.body = b } // Create the HTTP request diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go index 36b088249..babfc9a1d 100644 --- a/vendor/github.com/hashicorp/consul/api/catalog.go +++ b/vendor/github.com/hashicorp/consul/api/catalog.go @@ -4,6 +4,7 @@ type Node struct { ID string Node string Address string + Datacenter string TaggedAddresses map[string]string Meta map[string]string CreateIndex uint64 @@ -14,6 +15,7 @@ type CatalogService struct { ID string Node string Address string + Datacenter string TaggedAddresses map[string]string NodeMeta map[string]string ServiceID string diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go index 8abe2393a..38c105fdb 100644 --- a/vendor/github.com/hashicorp/consul/api/health.go +++ b/vendor/github.com/hashicorp/consul/api/health.go @@ -33,6 +33,7 @@ type HealthCheck struct { Output string ServiceID string ServiceName string + ServiceTags []string } // HealthChecks is a collection of HealthCheck structs. diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go index 44e06bbb4..f91bb50fc 100644 --- a/vendor/github.com/hashicorp/consul/api/kv.go +++ b/vendor/github.com/hashicorp/consul/api/kv.go @@ -49,17 +49,18 @@ type KVPairs []*KVPair type KVOp string const ( - KVSet KVOp = "set" - KVDelete KVOp = "delete" - KVDeleteCAS KVOp = "delete-cas" - KVDeleteTree KVOp = "delete-tree" - KVCAS KVOp = "cas" - KVLock KVOp = "lock" - KVUnlock KVOp = "unlock" - KVGet KVOp = "get" - KVGetTree KVOp = "get-tree" - KVCheckSession KVOp = "check-session" - KVCheckIndex KVOp = "check-index" + KVSet KVOp = "set" + KVDelete KVOp = "delete" + KVDeleteCAS KVOp = "delete-cas" + KVDeleteTree KVOp = "delete-tree" + KVCAS KVOp = "cas" + KVLock KVOp = "lock" + KVUnlock KVOp = "unlock" + KVGet KVOp = "get" + KVGetTree KVOp = "get-tree" + KVCheckSession KVOp = "check-session" + KVCheckIndex KVOp = "check-index" + KVCheckNotExists KVOp = "check-not-exists" ) // KVTxnOp defines a single operation inside a transaction. diff --git a/vendor/github.com/hashicorp/consul/api/lock.go b/vendor/github.com/hashicorp/consul/api/lock.go index 9f9845a43..466ef5fdf 100644 --- a/vendor/github.com/hashicorp/consul/api/lock.go +++ b/vendor/github.com/hashicorp/consul/api/lock.go @@ -143,22 +143,23 @@ func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { // Check if we need to create a session first l.lockSession = l.opts.Session if l.lockSession == "" { - if s, err := l.createSession(); err != nil { + s, err := l.createSession() + if err != nil { return nil, fmt.Errorf("failed to create session: %v", err) - } else { - l.sessionRenew = make(chan struct{}) - l.lockSession = s - session := l.c.Session() - go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew) - - // If we fail to acquire the lock, cleanup the session - defer func() { - if !l.isHeld { - close(l.sessionRenew) - l.sessionRenew = nil - } - }() } + + l.sessionRenew = make(chan struct{}) + l.lockSession = s + session := l.c.Session() + go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew) + + // If we fail to acquire the lock, cleanup the session + defer func() { + if !l.isHeld { + close(l.sessionRenew) + l.sessionRenew = nil + } + }() } // Setup the query options diff --git a/vendor/github.com/hashicorp/consul/api/semaphore.go b/vendor/github.com/hashicorp/consul/api/semaphore.go index e6645ac1d..9ddbdc49e 100644 --- a/vendor/github.com/hashicorp/consul/api/semaphore.go +++ b/vendor/github.com/hashicorp/consul/api/semaphore.go @@ -155,22 +155,23 @@ func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) { // Check if we need to create a session first s.lockSession = s.opts.Session if s.lockSession == "" { - if sess, err := s.createSession(); err != nil { + sess, err := s.createSession() + if err != nil { return nil, fmt.Errorf("failed to create session: %v", err) - } else { - s.sessionRenew = make(chan struct{}) - s.lockSession = sess - session := s.c.Session() - go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew) - - // If we fail to acquire the lock, cleanup the session - defer func() { - if !s.isHeld { - close(s.sessionRenew) - s.sessionRenew = nil - } - }() } + + s.sessionRenew = make(chan struct{}) + s.lockSession = sess + session := s.c.Session() + go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew) + + // If we fail to acquire the lock, cleanup the session + defer func() { + if !s.isHeld { + close(s.sessionRenew) + s.sessionRenew = nil + } + }() } // Create the contender entry diff --git a/vendor/vendor.json b/vendor/vendor.json index acc3458ad..13a807c0b 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -639,10 +639,10 @@ "revisionTime": "2017-04-17T18:01:43Z" }, { - "checksumSHA1": "k8spDLTgdEFy15C1AdBJLAW+Zng=", + "checksumSHA1": "iGAHizBUN43QlmYmn1bPLlNN9pw=", "path": "github.com/hashicorp/consul/api", - "revision": "e9ca44d0a1757ac9aecc6785904a701936c10e4a", - "revisionTime": "2017-04-17T18:01:43Z" + "revision": "ad40a855bd5003eb5d4c48f791479828d6a9393e", + "revisionTime": "2017-05-15T20:10:36Z" }, { "checksumSHA1": "Z1N3jX/5B7GbLNfNp5GTxrsJItc=", From f48dd30aef18fab1d8a9ba7703cabee48c251800 Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Tue, 30 May 2017 11:59:02 -0700 Subject: [PATCH 08/36] Update consul/api and comment to custom http.Client --- nomad/structs/config/consul.go | 1 + vendor/github.com/hashicorp/consul/api/api.go | 21 +++++++++---------- vendor/vendor.json | 6 +++--- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/nomad/structs/config/consul.go b/nomad/structs/config/consul.go index 4360502c5..2d12b141d 100644 --- a/nomad/structs/config/consul.go +++ b/nomad/structs/config/consul.go @@ -155,6 +155,7 @@ func (c *ConsulConfig) ApiConfig() (*consul.Config, error) { config.Token = c.Token } if c.Timeout != 0 { + // Create a custom Client to set the timeout if config.HttpClient == nil { config.HttpClient = &http.Client{} } diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go index 2225dd3bf..6f90ed5d6 100644 --- a/vendor/github.com/hashicorp/consul/api/api.go +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -369,10 +369,6 @@ func NewClient(config *Config) (*Client, error) { config.Transport = defConfig.Transport } - if config.HttpClient == nil { - config.HttpClient = defConfig.HttpClient - } - if config.TLSConfig.Address == "" { config.TLSConfig.Address = defConfig.TLSConfig.Address } @@ -434,17 +430,20 @@ func NewClient(config *Config) (*Client, error) { // NewHttpClient returns an http client configured with the given Transport and TLS // config. func NewHttpClient(transport *http.Transport, tlsConf TLSConfig) (*http.Client, error) { - tlsClientConfig, err := SetupTLSConfig(&tlsConf) - - if err != nil { - return nil, err - } - - transport.TLSClientConfig = tlsClientConfig client := &http.Client{ Transport: transport, } + if transport.TLSClientConfig == nil { + tlsClientConfig, err := SetupTLSConfig(&tlsConf) + + if err != nil { + return nil, err + } + + transport.TLSClientConfig = tlsClientConfig + } + return client, nil } diff --git a/vendor/vendor.json b/vendor/vendor.json index 13a807c0b..4c9c91f62 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -639,10 +639,10 @@ "revisionTime": "2017-04-17T18:01:43Z" }, { - "checksumSHA1": "iGAHizBUN43QlmYmn1bPLlNN9pw=", + "checksumSHA1": "RmhTKLvlDtxNPKZFnPYnfG/HzrI=", "path": "github.com/hashicorp/consul/api", - "revision": "ad40a855bd5003eb5d4c48f791479828d6a9393e", - "revisionTime": "2017-05-15T20:10:36Z" + "revision": "eea8f4ce75e8e6ff97c9913d89f687e8f8489ce6", + "revisionTime": "2017-05-30T15:52:51Z" }, { "checksumSHA1": "Z1N3jX/5B7GbLNfNp5GTxrsJItc=", From 30d52c00e516669f3c5f55a179ee6243af4f3213 Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Tue, 30 May 2017 15:08:54 -0700 Subject: [PATCH 09/36] Update consul-template to fix compat with consul/api --- .../hashicorp/consul-template/child/child.go | 6 +++ .../hashicorp/consul-template/config/retry.go | 27 ++++++++++++- .../dependency/catalog_node.go | 1 + .../dependency/catalog_nodes.go | 2 + .../dependency/catalog_service.go | 2 + .../consul-template/dependency/client_set.go | 12 +++--- .../dependency/health_service.go | 2 +- .../consul-template/manager/dedup.go | 35 +++++++++++++--- .../consul-template/template/scratch.go | 2 +- .../hashicorp/consul-template/watch/view.go | 28 +++++++++++-- vendor/vendor.json | 40 +++++++++---------- 11 files changed, 120 insertions(+), 37 deletions(-) diff --git a/vendor/github.com/hashicorp/consul-template/child/child.go b/vendor/github.com/hashicorp/consul-template/child/child.go index e9a496db9..b7364b52a 100644 --- a/vendor/github.com/hashicorp/consul-template/child/child.go +++ b/vendor/github.com/hashicorp/consul-template/child/child.go @@ -14,6 +14,12 @@ import ( "time" ) +func init() { + // Seed the default rand Source with current time to produce better random + // numbers used with splay + rand.Seed(time.Now().UnixNano()) +} + var ( // ErrMissingCommand is the error returned when no command is specified // to run. diff --git a/vendor/github.com/hashicorp/consul-template/config/retry.go b/vendor/github.com/hashicorp/consul-template/config/retry.go index 57c39f57d..8215e7ee2 100644 --- a/vendor/github.com/hashicorp/consul-template/config/retry.go +++ b/vendor/github.com/hashicorp/consul-template/config/retry.go @@ -8,11 +8,14 @@ import ( const ( // DefaultRetryAttempts is the default number of maximum retry attempts. - DefaultRetryAttempts = 5 + DefaultRetryAttempts = 12 // DefaultRetryBackoff is the default base for the exponential backoff // algorithm. DefaultRetryBackoff = 250 * time.Millisecond + + // DefaultRetryMaxBackoff is the default maximum of backoff time + DefaultRetryMaxBackoff = 1 * time.Minute ) // RetryFunc is the signature of a function that supports retries. @@ -23,12 +26,17 @@ type RetryFunc func(int) (bool, time.Duration) type RetryConfig struct { // Attempts is the total number of maximum attempts to retry before letting // the error fall through. + // 0 means unlimited. Attempts *int // Backoff is the base of the exponentialbackoff. This number will be // multipled by the next power of 2 on each iteration. Backoff *time.Duration + // MaxBackoff is an upper limit to the sleep time between retries + // A MaxBackoff of zero means there is no limit to the exponential growth of the backoff. + MaxBackoff *time.Duration `mapstructure:"max_backoff"` + // Enabled signals if this retry is enabled. Enabled *bool } @@ -51,6 +59,8 @@ func (c *RetryConfig) Copy() *RetryConfig { o.Backoff = c.Backoff + o.MaxBackoff = c.MaxBackoff + o.Enabled = c.Enabled return &o @@ -82,6 +92,10 @@ func (c *RetryConfig) Merge(o *RetryConfig) *RetryConfig { r.Backoff = o.Backoff } + if o.MaxBackoff != nil { + r.MaxBackoff = o.MaxBackoff + } + if o.Enabled != nil { r.Enabled = o.Enabled } @@ -103,6 +117,11 @@ func (c *RetryConfig) RetryFunc() RetryFunc { base := math.Pow(2, float64(retry)) sleep := time.Duration(base) * TimeDurationVal(c.Backoff) + maxSleep := TimeDurationVal(c.MaxBackoff) + if maxSleep > 0 && maxSleep < sleep { + return true, maxSleep + } + return true, sleep } } @@ -117,6 +136,10 @@ func (c *RetryConfig) Finalize() { c.Backoff = TimeDuration(DefaultRetryBackoff) } + if c.MaxBackoff == nil { + c.MaxBackoff = TimeDuration(DefaultRetryMaxBackoff) + } + if c.Enabled == nil { c.Enabled = Bool(true) } @@ -131,10 +154,12 @@ func (c *RetryConfig) GoString() string { return fmt.Sprintf("&RetryConfig{"+ "Attempts:%s, "+ "Backoff:%s, "+ + "MaxBackoff:%s, "+ "Enabled:%s"+ "}", IntGoString(c.Attempts), TimeDurationGoString(c.Backoff), + TimeDurationGoString(c.MaxBackoff), BoolGoString(c.Enabled), ) } diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_node.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_node.go index 25554efce..115837bad 100644 --- a/vendor/github.com/hashicorp/consul-template/dependency/catalog_node.go +++ b/vendor/github.com/hashicorp/consul-template/dependency/catalog_node.go @@ -128,6 +128,7 @@ func (d *CatalogNodeQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interf ID: node.Node.ID, Node: node.Node.Node, Address: node.Node.Address, + Datacenter: node.Node.Datacenter, TaggedAddresses: node.Node.TaggedAddresses, Meta: node.Node.Meta, }, diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_nodes.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_nodes.go index 99f8179ba..845aae929 100644 --- a/vendor/github.com/hashicorp/consul-template/dependency/catalog_nodes.go +++ b/vendor/github.com/hashicorp/consul-template/dependency/catalog_nodes.go @@ -28,6 +28,7 @@ type Node struct { ID string Node string Address string + Datacenter string TaggedAddresses map[string]string Meta map[string]string } @@ -86,6 +87,7 @@ func (d *CatalogNodesQuery) Fetch(clients *ClientSet, opts *QueryOptions) (inter ID: node.ID, Node: node.Node, Address: node.Address, + Datacenter: node.Datacenter, TaggedAddresses: node.TaggedAddresses, Meta: node.Meta, }) diff --git a/vendor/github.com/hashicorp/consul-template/dependency/catalog_service.go b/vendor/github.com/hashicorp/consul-template/dependency/catalog_service.go index 80dcee98f..e12739bc6 100644 --- a/vendor/github.com/hashicorp/consul-template/dependency/catalog_service.go +++ b/vendor/github.com/hashicorp/consul-template/dependency/catalog_service.go @@ -27,6 +27,7 @@ type CatalogService struct { ID string Node string Address string + Datacenter string TaggedAddresses map[string]string NodeMeta map[string]string ServiceID string @@ -101,6 +102,7 @@ func (d *CatalogServiceQuery) Fetch(clients *ClientSet, opts *QueryOptions) (int ID: s.ID, Node: s.Node, Address: s.Address, + Datacenter: s.Datacenter, TaggedAddresses: s.TaggedAddresses, NodeMeta: s.NodeMeta, ServiceID: s.ServiceID, diff --git a/vendor/github.com/hashicorp/consul-template/dependency/client_set.go b/vendor/github.com/hashicorp/consul-template/dependency/client_set.go index e353749b6..647c68b76 100644 --- a/vendor/github.com/hashicorp/consul-template/dependency/client_set.go +++ b/vendor/github.com/hashicorp/consul-template/dependency/client_set.go @@ -25,8 +25,8 @@ type ClientSet struct { // consulClient is a wrapper around a real Consul API client. type consulClient struct { - client *consulapi.Client - httpClient *http.Client + client *consulapi.Client + transport *http.Transport } // vaultClient is a wrapper around a real Vault API client. @@ -169,7 +169,7 @@ func (c *ClientSet) CreateConsulClient(i *CreateConsulClientInput) error { } // Setup the new transport - consulConfig.HttpClient.Transport = transport + consulConfig.Transport = transport // Create the API client client, err := consulapi.NewClient(consulConfig) @@ -180,8 +180,8 @@ func (c *ClientSet) CreateConsulClient(i *CreateConsulClientInput) error { // Save the data on ourselves c.Lock() c.consul = &consulClient{ - client: client, - httpClient: consulConfig.HttpClient, + client: client, + transport: transport, } c.Unlock() @@ -323,7 +323,7 @@ func (c *ClientSet) Stop() { defer c.Unlock() if c.consul != nil { - c.consul.httpClient.Transport.(*http.Transport).CloseIdleConnections() + c.consul.transport.CloseIdleConnections() } if c.vault != nil { diff --git a/vendor/github.com/hashicorp/consul-template/dependency/health_service.go b/vendor/github.com/hashicorp/consul-template/dependency/health_service.go index 91c97f8a4..8edcb8154 100644 --- a/vendor/github.com/hashicorp/consul-template/dependency/health_service.go +++ b/vendor/github.com/hashicorp/consul-template/dependency/health_service.go @@ -47,7 +47,7 @@ type HealthService struct { ID string Name string Tags ServiceTags - Checks []*api.HealthCheck + Checks api.HealthChecks Status string Port int } diff --git a/vendor/github.com/hashicorp/consul-template/manager/dedup.go b/vendor/github.com/hashicorp/consul-template/manager/dedup.go index 62981eb57..816fe14be 100644 --- a/vendor/github.com/hashicorp/consul-template/manager/dedup.go +++ b/vendor/github.com/hashicorp/consul-template/manager/dedup.go @@ -295,6 +295,9 @@ func (d *DedupManager) watchTemplate(client *consulapi.Client, t *template.Templ WaitTime: 60 * time.Second, } + var lastData []byte + var lastIndex uint64 + START: // Stop listening if we're stopped select { @@ -330,6 +333,13 @@ START: } opts.WaitIndex = meta.LastIndex + // Stop listening if we're stopped + select { + case <-d.stopCh: + return + default: + } + // If we've exceeded the maximum staleness, retry without stale if allowStale && meta.LastContact > *d.config.MaxStale { allowStale = false @@ -342,13 +352,28 @@ START: allowStale = true } - // Stop listening if we're stopped - select { - case <-d.stopCh: - return - default: + if meta.LastIndex == lastIndex { + log.Printf("[TRACE] (dedup) %s no new data (index was the same)", path) + goto START } + if meta.LastIndex < lastIndex { + log.Printf("[TRACE] (dedup) %s had a lower index, resetting", path) + lastIndex = 0 + goto START + } + lastIndex = meta.LastIndex + + var data []byte + if pair != nil { + data = pair.Value + } + if bytes.Equal(lastData, data) { + log.Printf("[TRACE] (dedup) %s no new data (contents were the same)", path) + goto START + } + lastData = data + // If we are current the leader, wait for leadership lost d.leaderLock.RLock() lockCh, ok = d.leader[t] diff --git a/vendor/github.com/hashicorp/consul-template/template/scratch.go b/vendor/github.com/hashicorp/consul-template/template/scratch.go index d26787c1a..c3d959dc8 100644 --- a/vendor/github.com/hashicorp/consul-template/template/scratch.go +++ b/vendor/github.com/hashicorp/consul-template/template/scratch.go @@ -101,7 +101,7 @@ func (s *Scratch) MapValues(k string) ([]interface{}, error) { typed, ok := s.values[k].(map[string]interface{}) if !ok { - return nil, fmt.Errorf("%q is not a map", k) + return nil, nil } keys := make([]string, 0, len(typed)) diff --git a/vendor/github.com/hashicorp/consul-template/watch/view.go b/vendor/github.com/hashicorp/consul-template/watch/view.go index ca47f50f8..b73b1ed72 100644 --- a/vendor/github.com/hashicorp/consul-template/watch/view.go +++ b/vendor/github.com/hashicorp/consul-template/watch/view.go @@ -108,9 +108,12 @@ func (v *View) poll(viewCh chan<- *View, errCh chan<- error) { var retries int for { - doneCh, fetchErrCh := make(chan struct{}, 1), make(chan error, 1) - go v.fetch(doneCh, fetchErrCh) + doneCh := make(chan struct{}, 1) + successCh := make(chan struct{}, 1) + fetchErrCh := make(chan error, 1) + go v.fetch(doneCh, successCh, fetchErrCh) + WAIT: select { case <-doneCh: // Reset the retry to avoid exponentially incrementing retries when we @@ -129,6 +132,16 @@ func (v *View) poll(viewCh chan<- *View, errCh chan<- error) { if v.once { return } + case <-successCh: + // We successfully received a non-error response from the server. This + // does not mean we have data (that's dataCh's job), but rather this + // just resets the counter indicating we communciated successfully. For + // example, Consul make have an outage, but when it returns, the view + // is unchanged. We have to reset the counter retries, but not update the + // actual template. + log.Printf("[TRACE] view %s successful contact, resetting retries", v.dependency) + retries = 0 + goto WAIT case err := <-fetchErrCh: if v.retryFunc != nil { retry, sleep := v.retryFunc(retries) @@ -166,7 +179,7 @@ func (v *View) poll(viewCh chan<- *View, errCh chan<- error) { // written to errCh. It is designed to be run in a goroutine that selects the // result of doneCh and errCh. It is assumed that only one instance of fetch // is running per View and therefore no locking or mutexes are used. -func (v *View) fetch(doneCh chan<- struct{}, errCh chan<- error) { +func (v *View) fetch(doneCh, successCh chan<- struct{}, errCh chan<- error) { log.Printf("[TRACE] (view) %s starting fetch", v.dependency) var allowStale bool @@ -203,6 +216,15 @@ func (v *View) fetch(doneCh chan<- struct{}, errCh chan<- error) { return } + // If we got this far, we received data successfully. That data might not + // trigger a data update (because we could continue below), but we need to + // inform the poller to reset the retry count. + log.Printf("[TRACE] (view) %s marking successful data response", v.dependency) + select { + case successCh <- struct{}{}: + default: + } + if allowStale && rm.LastContact > v.maxStale { allowStale = false log.Printf("[TRACE] (view) %s stale data (last contact exceeded max_stale)", v.dependency) diff --git a/vendor/vendor.json b/vendor/vendor.json index 4c9c91f62..24b0ba10d 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -591,46 +591,46 @@ "revision": "a557574d6c024ed6e36acc8b610f5f211c91568a" }, { - "checksumSHA1": "gx2CAg/v3k7kfBA/rT5NCkI0jDI=", + "checksumSHA1": "Nu2j1GusM7ZH0uYrGzqr1K7yH7I=", "path": "github.com/hashicorp/consul-template/child", - "revision": "e79894aad0b3789b93d0372e23f6eb0d2b75b35a", - "revisionTime": "2017-03-28T18:42:41Z" + "revision": "92746fc5cf86dbb113558bacec43459a65c8df14", + "revisionTime": "2017-05-26T18:30:17Z" }, { - "checksumSHA1": "VMDorxQ1u/r2BYZ/azJd71UQi4A=", + "checksumSHA1": "7TBPXChZZS84qZbzP7qFYeQding=", "path": "github.com/hashicorp/consul-template/config", - "revision": "e79894aad0b3789b93d0372e23f6eb0d2b75b35a", - "revisionTime": "2017-03-28T18:42:41Z" + "revision": "92746fc5cf86dbb113558bacec43459a65c8df14", + "revisionTime": "2017-05-26T18:30:17Z" }, { - "checksumSHA1": "kvyZVRAc/JG3Ua1deyKaFtOrlqc=", + "checksumSHA1": "7rKifM082rlbHN9EcsVyu7VXLoo=", "path": "github.com/hashicorp/consul-template/dependency", - "revision": "e79894aad0b3789b93d0372e23f6eb0d2b75b35a", - "revisionTime": "2017-03-28T18:42:41Z" + "revision": "92746fc5cf86dbb113558bacec43459a65c8df14", + "revisionTime": "2017-05-26T18:30:17Z" }, { - "checksumSHA1": "QHR0vkzQMi3UH7q1HdV2QhxrOt8=", + "checksumSHA1": "Ci5EmLs/h7ke9bUg7a34UfTbB5U=", "path": "github.com/hashicorp/consul-template/manager", - "revision": "e79894aad0b3789b93d0372e23f6eb0d2b75b35a", - "revisionTime": "2017-03-28T18:42:41Z" + "revision": "92746fc5cf86dbb113558bacec43459a65c8df14", + "revisionTime": "2017-05-26T18:30:17Z" }, { "checksumSHA1": "oskgb0WteBKOItG8NNDduM7E/D0=", "path": "github.com/hashicorp/consul-template/signals", - "revision": "e79894aad0b3789b93d0372e23f6eb0d2b75b35a", - "revisionTime": "2017-03-28T18:42:41Z" + "revision": "92746fc5cf86dbb113558bacec43459a65c8df14", + "revisionTime": "2017-05-26T18:30:17Z" }, { - "checksumSHA1": "KFFY15i/0MuTL2z6OzbQfB4xIBE=", + "checksumSHA1": "804hk7BQd6V2xjBwz+cE0hdzSlI=", "path": "github.com/hashicorp/consul-template/template", - "revision": "e79894aad0b3789b93d0372e23f6eb0d2b75b35a", - "revisionTime": "2017-03-28T18:42:41Z" + "revision": "92746fc5cf86dbb113558bacec43459a65c8df14", + "revisionTime": "2017-05-26T18:30:17Z" }, { - "checksumSHA1": "cl9R28+I+YT6a0Z+KQFP//wuC+0=", + "checksumSHA1": "KjcelGP7qPh0ObKouBJuHmXUjqk=", "path": "github.com/hashicorp/consul-template/watch", - "revision": "e79894aad0b3789b93d0372e23f6eb0d2b75b35a", - "revisionTime": "2017-03-28T18:42:41Z" + "revision": "92746fc5cf86dbb113558bacec43459a65c8df14", + "revisionTime": "2017-05-26T18:30:17Z" }, { "checksumSHA1": "jfELEMRhiTcppZmRH+ZwtkVS5Uw=", From a42b7383a2f55e21a59e990419fb208191150b15 Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Tue, 30 May 2017 15:16:14 -0700 Subject: [PATCH 10/36] Add #2636 to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2ac26f1ab..98573c0ec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ IMPROVEMENTS: * api/job: Ability to revert job to older versions [GH-2575] * client: Environment variables for client DC and Region [GH-2507] * client: Hash host ID so its stable and well distributed [GH-2541] + * client: GC dead allocs if total allocs > `gc_max_allocs` tunable [GH-2636] * client: Persist state using bolt-db and more efficient write patterns [GH-2610] * client: Fingerprint all routable addresses on an interface including IPv6 From d2e2f4aebfddd963f877734920ef067c7448643d Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Fri, 26 May 2017 19:08:17 -0400 Subject: [PATCH 11/36] Bump middleman-hashicorp --- website/Gemfile | 2 +- website/Gemfile.lock | 16 +++++++++------- website/Makefile | 2 +- website/packer.json | 2 +- 4 files changed, 12 insertions(+), 10 deletions(-) diff --git a/website/Gemfile b/website/Gemfile index 405a8c992..efb257727 100644 --- a/website/Gemfile +++ b/website/Gemfile @@ -1,3 +1,3 @@ source "https://rubygems.org" -gem "middleman-hashicorp", "0.3.22" +gem "middleman-hashicorp", "0.3.25" diff --git a/website/Gemfile.lock b/website/Gemfile.lock index 229218ac9..289a890c9 100644 --- a/website/Gemfile.lock +++ b/website/Gemfile.lock @@ -6,7 +6,7 @@ GEM minitest (~> 5.1) thread_safe (~> 0.3, >= 0.3.4) tzinfo (~> 1.1) - autoprefixer-rails (6.7.7.1) + autoprefixer-rails (7.0.1) execjs bootstrap-sass (3.3.7) autoprefixer-rails (>= 5.2.1) @@ -42,14 +42,15 @@ GEM eventmachine (1.2.3) execjs (2.7.0) ffi (1.9.18) - haml (4.0.7) + haml (5.0.1) + temple (>= 0.8.0) tilt hike (1.2.3) hooks (0.4.1) uber (~> 0.0.14) http_parser.rb (0.6.0) i18n (0.7.0) - json (2.0.3) + json (2.1.0) kramdown (1.13.2) listen (3.0.8) rb-fsevent (~> 0.9, >= 0.9.4) @@ -77,7 +78,7 @@ GEM rack (>= 1.4.5, < 2.0) thor (>= 0.15.2, < 2.0) tilt (~> 1.4.1, < 2.0) - middleman-hashicorp (0.3.22) + middleman-hashicorp (0.3.25) bootstrap-sass (~> 3.3) builder (~> 3.2) middleman (~> 3.4) @@ -111,7 +112,7 @@ GEM tilt (~> 1.4.1) padrino-support (0.12.8.1) activesupport (>= 3.1) - rack (1.6.5) + rack (1.6.6) rack-livereload (0.3.16) rack rack-test (0.6.3) @@ -132,12 +133,13 @@ GEM sprockets-sass (1.3.1) sprockets (~> 2.0) tilt (~> 1.1) + temple (0.8.0) thor (0.19.4) thread_safe (0.3.6) tilt (1.4.1) turbolinks (5.0.1) turbolinks-source (~> 5) - turbolinks-source (5.0.0) + turbolinks-source (5.0.3) tzinfo (1.2.3) thread_safe (~> 0.1) uber (0.0.15) @@ -151,7 +153,7 @@ PLATFORMS ruby DEPENDENCIES - middleman-hashicorp (= 0.3.22) + middleman-hashicorp (= 0.3.25) BUNDLED WITH 1.14.6 diff --git a/website/Makefile b/website/Makefile index d7620d1c2..3bb8b83bf 100644 --- a/website/Makefile +++ b/website/Makefile @@ -1,4 +1,4 @@ -VERSION?="0.3.22" +VERSION?="0.3.25" build: @echo "==> Starting build in Docker..." diff --git a/website/packer.json b/website/packer.json index 35de63232..4184097c0 100644 --- a/website/packer.json +++ b/website/packer.json @@ -8,7 +8,7 @@ "builders": [ { "type": "docker", - "image": "hashicorp/middleman-hashicorp:0.3.22", + "image": "hashicorp/middleman-hashicorp:0.3.25", "discard": "true", "run_command": ["-d", "-i", "-t", "{{ .Image }}", "/bin/sh"] } From f34a61ed1babf9da634d1f3de893fd7348fa0e27 Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Fri, 26 May 2017 19:08:31 -0400 Subject: [PATCH 12/36] Fix community page formatting --- website/source/community.html.erb | 201 ++++++++++++++++-------------- 1 file changed, 106 insertions(+), 95 deletions(-) diff --git a/website/source/community.html.erb b/website/source/community.html.erb index 5273df22f..62e641f81 100644 --- a/website/source/community.html.erb +++ b/website/source/community.html.erb @@ -8,120 +8,131 @@ description: |-

Community

-Nomad is an open source project with a growing community. There are -active, dedicated users willing to help you through various mediums. + Nomad is an open source project with a growing community. There are + active, dedicated users willing to help you through various mediums.

-Gitter: Nomad Gitter Room + Gitter: Nomad Gitter Room

-IRC: Use the Gitter IRC bridge + IRC: Use the Gitter IRC bridge

-Mailing list: -Nomad Google Group + Mailing list: + Nomad Google Group

-Bug Tracker: -Issue tracker - on GitHub. Please only use this for reporting bugs. Do not ask -for general help here. Use IRC or the mailing list for that. + Bug Tracker: + Issue tracker + on GitHub. Please only use this for reporting bugs. Do not ask + for general help here. Use IRC or the mailing list for that. +

+ +

Community Tools

+

+ These Nomad projects are created and managed by the amazing members of the + Nomad community: +

-

Community Tools

-

These Nomad projects are created and managed by the amazing members of the Nomad community:

    -
  • nomad-ui - Nomad UI is a simple to deploy, web based UI for interacting with Nomad
  • -
  • nomad-jenkins - This project uses Nomad to provision new Jenkins build slaves based on workload
  • +
  • nomad-ui - Nomad UI is a simple to deploy, web based UI for interacting with Nomad
  • +
  • nomad-jenkins - This project uses Nomad to provision new Jenkins build slaves based on workload
-

People

+ + Want to see your library here? + Submit a Pull Request. + We also have a full list of HTTP API Libraries. + + + +

People

+

-The following people are some of the faces behind Nomad. They each -contribute to Nomad in some core way. Over time, faces may appear and -disappear from this list as contributors come and go. In addition to -the faces below, Nomad is a project by -HashiCorp, so many HashiCorp -employees actively contribute to Nomad. + The following people are some of the faces behind Nomad. They each contribute + to Nomad in some core way. Over time, faces may appear and disappear from this + list as contributors come and go. In addition to the faces below, Nomad is a + project by HashiCorp, so many + HashiCorp employees actively contribute to Nomad.

+
-
- -
-

Armon Dadgar (@armon)

-

- Armon Dadgar is a creator of Nomad. He works on all aspects of Nomad, - focusing on core architecture. Armon is also an author or - core contributor to: - Vault, - Consul, - Serf, - Terraform, - and Statsite. -

-
+
+ +
+

Armon Dadgar (@armon)

+

+ Armon Dadgar is a creator of Nomad. He works on all aspects of Nomad, + focusing on core architecture. Armon is also an author or + core contributor to: + Vault, + Consul, + Serf, + Terraform, + and Statsite. +

+
-
- -
-

Ryan Uber (@ryanuber)

-

- Ryan Uber is a HashiCorp employee and core contributor to Nomad, with a - focus on the agent, API client, and command-line interface. Ryan is also - an active contributor to both Consul - and Serf. -

-
-
- -
- -
-

Alex Dadgar (@dadgar)

-

- Alex is a HashiCorp employee and a core contributor to Nomad. He works on - resource isolation and Drivers, among other things. -

-
-
- -
- -
-

Clint Shryock (@catsby)

-

- Clint Shryock is a HashiCorp employee and core developer on Nomad, - mostly focusing on Drivers and Fingerprinters. Mostly. Clint is also - a core developer on Terraform, and - contributes to Packer. -

-
-
- -
- -
-

Chris Bednarski (@cbednarski)

-

- Chris works at HashiCorp where he helps build Nomad and - Packer, making sure all - parts of the stack play nice together. Chris created - Hostess. -

-
-
+
+ +
+

Ryan Uber (@ryanuber)

+

+ Ryan Uber is a HashiCorp employee and core contributor to Nomad, with a + focus on the agent, API client, and command-line interface. Ryan is also + an active contributor to both Consul + and Serf. +

+
+
- -
-

Jonathan Thomas - JT (@captainill)

-

- JT is an employee at Hashicorp where he works on the identity of all the open source projects. - JT will take the designs and cut up responsive HTML/CSS for each project. -

-
-
+ +
+

Alex Dadgar (@dadgar)

+

+ Alex is a HashiCorp employee and a core contributor to Nomad. He works on + resource isolation and Drivers, among other things. +

+
+
-
+
+ +
+

Clint Shryock (@catsby)

+

+ Clint Shryock is a HashiCorp employee and core developer on Nomad, + mostly focusing on Drivers and Fingerprinters. Mostly. Clint is also + a core developer on Terraform, and + contributes to Packer. +

+
+
+ +
+ +
+

Chris Bednarski (@cbednarski)

+

+ Chris works at HashiCorp where he helps build Nomad and + Packer, making sure all + parts of the stack play nice together. Chris created + Hostess. +

+
+
+ +
+ +
+

Jonathan Thomas - JT (@captainill)

+

+ JT is an employee at Hashicorp where he works on the identity of all the open source projects. + JT will take the designs and cut up responsive HTML/CSS for each project. +

+
+
From 8332722eaa74e9e3972d3699798b99d343111920 Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Fri, 26 May 2017 19:10:38 -0400 Subject: [PATCH 13/36] Add API section to header and sidebar --- website/source/layouts/_sidebar.erb | 1 + website/source/layouts/docs.erb | 4 ---- website/source/layouts/layout.erb | 2 ++ 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/website/source/layouts/_sidebar.erb b/website/source/layouts/_sidebar.erb index 38f234513..bee68ba1e 100644 --- a/website/source/layouts/_sidebar.erb +++ b/website/source/layouts/_sidebar.erb @@ -9,6 +9,7 @@
  • Intro
  • Guides
  • Docs
  • +
  • API
  • Community
  • Security
  • Press Kit
  • diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 40b4aa873..854692613 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -291,10 +291,6 @@
    - > - HTTP API - - > Internals