mirror of
https://github.com/kemko/nomad.git
synced 2026-01-04 09:25:46 +03:00
Merge branch 'master' into feature/2334
This commit is contained in:
@@ -11,6 +11,7 @@ IMPROVEMENTS:
|
||||
* api/job: Ability to revert job to older versions [GH-2575]
|
||||
* client: Environment variables for client DC and Region [GH-2507]
|
||||
* client: Hash host ID so its stable and well distributed [GH-2541]
|
||||
* client: GC dead allocs if total allocs > `gc_max_allocs` tunable [GH-2636]
|
||||
* client: Persist state using bolt-db and more efficient write patterns
|
||||
[GH-2610]
|
||||
* client: Fingerprint all routable addresses on an interface including IPv6
|
||||
|
||||
@@ -323,6 +323,7 @@ func TestAllocRunner_SaveRestoreState(t *testing.T) {
|
||||
|
||||
upd, ar := testAllocRunnerFromAlloc(alloc, false)
|
||||
go ar.Run()
|
||||
defer ar.Destroy()
|
||||
|
||||
// Snapshot state
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
@@ -390,6 +391,7 @@ func TestAllocRunner_SaveRestoreState_TerminalAlloc(t *testing.T) {
|
||||
task := ar.alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Config["run_for"] = "10s"
|
||||
go ar.Run()
|
||||
defer ar.Destroy()
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if upd.Count == 0 {
|
||||
@@ -436,8 +438,9 @@ func TestAllocRunner_SaveRestoreState_TerminalAlloc(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
ar2.logger.Println("[TESTING] running second alloc runner")
|
||||
go ar2.Run()
|
||||
ar2.logger.Println("[TESTING] starting second alloc runner")
|
||||
defer ar2.Destroy() // Just-in-case of failure before Destroy below
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
// Check the state still exists
|
||||
@@ -516,6 +519,7 @@ func TestAllocRunner_SaveRestoreState_Upgrade(t *testing.T) {
|
||||
origConfig := ar.config.Copy()
|
||||
ar.config.Version = "0.5.6"
|
||||
go ar.Run()
|
||||
defer ar.Destroy()
|
||||
|
||||
// Snapshot state
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
@@ -544,6 +548,7 @@ func TestAllocRunner_SaveRestoreState_Upgrade(t *testing.T) {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
go ar2.Run()
|
||||
defer ar2.Destroy() // Just-in-case of failure before Destroy below
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if len(ar2.tasks) != 1 {
|
||||
@@ -736,6 +741,7 @@ func TestAllocRunner_TaskFailed_KillTG(t *testing.T) {
|
||||
ar.alloc.Job.TaskGroups[0].Tasks = append(ar.alloc.Job.TaskGroups[0].Tasks, task2)
|
||||
ar.alloc.TaskResources[task2.Name] = task2.Resources
|
||||
go ar.Run()
|
||||
defer ar.Destroy()
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if upd.Count == 0 {
|
||||
@@ -862,6 +868,7 @@ func TestAllocRunner_MoveAllocDir(t *testing.T) {
|
||||
}
|
||||
upd, ar := testAllocRunnerFromAlloc(alloc, false)
|
||||
go ar.Run()
|
||||
defer ar.Destroy()
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if upd.Count == 0 {
|
||||
@@ -893,6 +900,7 @@ func TestAllocRunner_MoveAllocDir(t *testing.T) {
|
||||
upd1, ar1 := testAllocRunnerFromAlloc(alloc1, false)
|
||||
ar1.SetPreviousAllocDir(ar.allocDir)
|
||||
go ar1.Run()
|
||||
defer ar1.Destroy()
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
if upd1.Count == 0 {
|
||||
|
||||
@@ -137,7 +137,7 @@ type Client struct {
|
||||
|
||||
// migratingAllocs is the set of allocs whose data migration is in flight
|
||||
migratingAllocs map[string]*migrateAllocCtrl
|
||||
migratingAllocsLock sync.Mutex
|
||||
migratingAllocsLock sync.RWMutex
|
||||
|
||||
// allocUpdates stores allocations that need to be synced to the server.
|
||||
allocUpdates chan *structs.Allocation
|
||||
@@ -240,13 +240,15 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulServic
|
||||
|
||||
// Add the garbage collector
|
||||
gcConfig := &GCConfig{
|
||||
MaxAllocs: cfg.GCMaxAllocs,
|
||||
DiskUsageThreshold: cfg.GCDiskUsageThreshold,
|
||||
InodeUsageThreshold: cfg.GCInodeUsageThreshold,
|
||||
Interval: cfg.GCInterval,
|
||||
ParallelDestroys: cfg.GCParallelDestroys,
|
||||
ReservedDiskMB: cfg.Node.Reserved.DiskMB,
|
||||
}
|
||||
c.garbageCollector = NewAllocGarbageCollector(logger, statsCollector, gcConfig)
|
||||
c.garbageCollector = NewAllocGarbageCollector(logger, statsCollector, c, gcConfig)
|
||||
go c.garbageCollector.Run()
|
||||
|
||||
// Setup the node
|
||||
if err := c.setupNode(); err != nil {
|
||||
@@ -482,17 +484,13 @@ func (c *Client) RPC(method string, args interface{}, reply interface{}) error {
|
||||
// Stats is used to return statistics for debugging and insight
|
||||
// for various sub-systems
|
||||
func (c *Client) Stats() map[string]map[string]string {
|
||||
c.allocLock.RLock()
|
||||
numAllocs := len(c.allocs)
|
||||
c.allocLock.RUnlock()
|
||||
|
||||
c.heartbeatLock.Lock()
|
||||
defer c.heartbeatLock.Unlock()
|
||||
stats := map[string]map[string]string{
|
||||
"client": map[string]string{
|
||||
"node_id": c.Node().ID,
|
||||
"known_servers": c.servers.all().String(),
|
||||
"num_allocations": strconv.Itoa(numAllocs),
|
||||
"num_allocations": strconv.Itoa(c.NumAllocs()),
|
||||
"last_heartbeat": fmt.Sprintf("%v", time.Since(c.lastHeartbeat)),
|
||||
"heartbeat_ttl": fmt.Sprintf("%v", c.heartbeatTTL),
|
||||
},
|
||||
@@ -722,6 +720,24 @@ func (c *Client) getAllocRunners() map[string]*AllocRunner {
|
||||
return runners
|
||||
}
|
||||
|
||||
// NumAllocs returns the number of allocs this client has. Used to
|
||||
// fulfill the AllocCounter interface for the GC.
|
||||
func (c *Client) NumAllocs() int {
|
||||
c.allocLock.RLock()
|
||||
n := len(c.allocs)
|
||||
c.allocLock.RUnlock()
|
||||
|
||||
c.blockedAllocsLock.RLock()
|
||||
n += len(c.blockedAllocations)
|
||||
c.blockedAllocsLock.RUnlock()
|
||||
|
||||
c.migratingAllocsLock.RLock()
|
||||
n += len(c.migratingAllocs)
|
||||
c.migratingAllocsLock.RUnlock()
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// nodeID restores, or generates if necessary, a unique node ID and SecretID.
|
||||
// The node ID is, if available, a persistent unique ID. The secret ID is a
|
||||
// high-entropy random UUID.
|
||||
@@ -1228,25 +1244,31 @@ func (c *Client) updateNodeStatus() error {
|
||||
func (c *Client) updateAllocStatus(alloc *structs.Allocation) {
|
||||
// If this alloc was blocking another alloc and transitioned to a
|
||||
// terminal state then start the blocked allocation
|
||||
c.blockedAllocsLock.Lock()
|
||||
if blockedAlloc, ok := c.blockedAllocations[alloc.ID]; ok && alloc.Terminated() {
|
||||
var prevAllocDir *allocdir.AllocDir
|
||||
if ar, ok := c.getAllocRunners()[alloc.ID]; ok {
|
||||
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
|
||||
if tg != nil && tg.EphemeralDisk != nil && tg.EphemeralDisk.Sticky {
|
||||
prevAllocDir = ar.GetAllocDir()
|
||||
}
|
||||
}
|
||||
if err := c.addAlloc(blockedAlloc, prevAllocDir); err != nil {
|
||||
c.logger.Printf("[ERR] client: failed to add alloc which was previously blocked %q: %v",
|
||||
blockedAlloc.ID, err)
|
||||
}
|
||||
delete(c.blockedAllocations, blockedAlloc.PreviousAllocation)
|
||||
}
|
||||
c.blockedAllocsLock.Unlock()
|
||||
|
||||
// Mark the allocation for GC if it is in terminal state
|
||||
if alloc.Terminated() {
|
||||
c.blockedAllocsLock.Lock()
|
||||
blockedAlloc, ok := c.blockedAllocations[alloc.ID]
|
||||
if ok {
|
||||
var prevAllocDir *allocdir.AllocDir
|
||||
if ar, ok := c.getAllocRunners()[alloc.ID]; ok {
|
||||
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
|
||||
if tg != nil && tg.EphemeralDisk != nil && tg.EphemeralDisk.Sticky {
|
||||
prevAllocDir = ar.GetAllocDir()
|
||||
}
|
||||
}
|
||||
|
||||
delete(c.blockedAllocations, blockedAlloc.PreviousAllocation)
|
||||
c.blockedAllocsLock.Unlock()
|
||||
|
||||
// Need to call addAlloc without holding the lock
|
||||
if err := c.addAlloc(blockedAlloc, prevAllocDir); err != nil {
|
||||
c.logger.Printf("[ERR] client: failed to add alloc which was previously blocked %q: %v",
|
||||
blockedAlloc.ID, err)
|
||||
}
|
||||
} else {
|
||||
c.blockedAllocsLock.Unlock()
|
||||
}
|
||||
|
||||
// Mark the allocation for GC if it is in terminal state
|
||||
if ar, ok := c.getAllocRunners()[alloc.ID]; ok {
|
||||
if err := c.garbageCollector.MarkForCollection(ar); err != nil {
|
||||
c.logger.Printf("[DEBUG] client: couldn't add alloc %v for GC: %v", alloc.ID, err)
|
||||
@@ -1553,9 +1575,9 @@ func (c *Client) runAllocs(update *allocUpdates) {
|
||||
}
|
||||
|
||||
// See if the updated alloc is getting migrated
|
||||
c.migratingAllocsLock.Lock()
|
||||
c.migratingAllocsLock.RLock()
|
||||
ch, ok := c.migratingAllocs[update.updated.ID]
|
||||
c.migratingAllocsLock.Unlock()
|
||||
c.migratingAllocsLock.RUnlock()
|
||||
if ok {
|
||||
// Stopping the migration if the allocation doesn't need any
|
||||
// migration
|
||||
@@ -2314,13 +2336,13 @@ func (c *Client) emitClientMetrics() {
|
||||
nodeID := c.Node().ID
|
||||
|
||||
// Emit allocation metrics
|
||||
c.migratingAllocsLock.Lock()
|
||||
migrating := len(c.migratingAllocs)
|
||||
c.migratingAllocsLock.Unlock()
|
||||
|
||||
c.blockedAllocsLock.Lock()
|
||||
c.blockedAllocsLock.RLock()
|
||||
blocked := len(c.blockedAllocations)
|
||||
c.blockedAllocsLock.Unlock()
|
||||
c.blockedAllocsLock.RUnlock()
|
||||
|
||||
c.migratingAllocsLock.RLock()
|
||||
migrating := len(c.migratingAllocs)
|
||||
c.migratingAllocsLock.RUnlock()
|
||||
|
||||
pending, running, terminal := 0, 0, 0
|
||||
for _, ar := range c.getAllocRunners() {
|
||||
@@ -2392,17 +2414,17 @@ func (c *Client) allAllocs() map[string]*structs.Allocation {
|
||||
a := ar.Alloc()
|
||||
allocs[a.ID] = a
|
||||
}
|
||||
c.blockedAllocsLock.Lock()
|
||||
c.blockedAllocsLock.RLock()
|
||||
for _, alloc := range c.blockedAllocations {
|
||||
allocs[alloc.ID] = alloc
|
||||
}
|
||||
c.blockedAllocsLock.Unlock()
|
||||
c.blockedAllocsLock.RUnlock()
|
||||
|
||||
c.migratingAllocsLock.Lock()
|
||||
c.migratingAllocsLock.RLock()
|
||||
for _, ctrl := range c.migratingAllocs {
|
||||
allocs[ctrl.alloc.ID] = ctrl.alloc
|
||||
}
|
||||
c.migratingAllocsLock.Unlock()
|
||||
c.migratingAllocsLock.RUnlock()
|
||||
return allocs
|
||||
}
|
||||
|
||||
|
||||
@@ -171,6 +171,10 @@ type Config struct {
|
||||
// beyond which the Nomad client triggers GC of the terminal allocations
|
||||
GCInodeUsageThreshold float64
|
||||
|
||||
// GCMaxAllocs is the maximum number of allocations a node can have
|
||||
// before garbage collection is triggered.
|
||||
GCMaxAllocs int
|
||||
|
||||
// LogLevel is the level of the logs to putout
|
||||
LogLevel string
|
||||
|
||||
@@ -205,6 +209,7 @@ func DefaultConfig() *Config {
|
||||
GCParallelDestroys: 2,
|
||||
GCDiskUsageThreshold: 80,
|
||||
GCInodeUsageThreshold: 70,
|
||||
GCMaxAllocs: 50,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -220,6 +220,7 @@ func (f *FileRotator) purgeOldFiles() {
|
||||
var fIndexes []int
|
||||
files, err := ioutil.ReadDir(f.path)
|
||||
if err != nil {
|
||||
f.logger.Printf("[ERROR] driver.rotator: error getting directory listing: %v", err)
|
||||
return
|
||||
}
|
||||
// Inserting all the rotated files in a slice
|
||||
@@ -228,6 +229,7 @@ func (f *FileRotator) purgeOldFiles() {
|
||||
fileIdx := strings.TrimPrefix(fi.Name(), fmt.Sprintf("%s.", f.baseFileName))
|
||||
n, err := strconv.Atoi(fileIdx)
|
||||
if err != nil {
|
||||
f.logger.Printf("[ERROR] driver.rotator: error extracting file index: %v", err)
|
||||
continue
|
||||
}
|
||||
fIndexes = append(fIndexes, n)
|
||||
@@ -246,7 +248,10 @@ func (f *FileRotator) purgeOldFiles() {
|
||||
toDelete := fIndexes[0 : len(fIndexes)-f.MaxFiles]
|
||||
for _, fIndex := range toDelete {
|
||||
fname := filepath.Join(f.path, fmt.Sprintf("%s.%d", f.baseFileName, fIndex))
|
||||
os.RemoveAll(fname)
|
||||
err := os.RemoveAll(fname)
|
||||
if err != nil {
|
||||
f.logger.Printf("[ERROR] driver.rotator: error removing file: %v", err)
|
||||
}
|
||||
}
|
||||
f.oldestLogFileIdx = fIndexes[0]
|
||||
case <-f.doneCh:
|
||||
|
||||
94
client/gc.go
94
client/gc.go
@@ -18,6 +18,9 @@ const (
|
||||
|
||||
// GCConfig allows changing the behaviour of the garbage collector
|
||||
type GCConfig struct {
|
||||
// MaxAllocs is the maximum number of allocations to track before a GC
|
||||
// is triggered.
|
||||
MaxAllocs int
|
||||
DiskUsageThreshold float64
|
||||
InodeUsageThreshold float64
|
||||
Interval time.Duration
|
||||
@@ -25,10 +28,17 @@ type GCConfig struct {
|
||||
ParallelDestroys int
|
||||
}
|
||||
|
||||
// AllocCounter is used by AllocGarbageCollector to discover how many
|
||||
// allocations a node has and is generally fulfilled by the Client.
|
||||
type AllocCounter interface {
|
||||
NumAllocs() int
|
||||
}
|
||||
|
||||
// AllocGarbageCollector garbage collects terminated allocations on a node
|
||||
type AllocGarbageCollector struct {
|
||||
allocRunners *IndexedGCAllocPQ
|
||||
statsCollector stats.NodeStatsCollector
|
||||
allocCounter AllocCounter
|
||||
config *GCConfig
|
||||
logger *log.Logger
|
||||
destroyCh chan struct{}
|
||||
@@ -36,8 +46,9 @@ type AllocGarbageCollector struct {
|
||||
}
|
||||
|
||||
// NewAllocGarbageCollector returns a garbage collector for terminated
|
||||
// allocations on a node.
|
||||
func NewAllocGarbageCollector(logger *log.Logger, statsCollector stats.NodeStatsCollector, config *GCConfig) *AllocGarbageCollector {
|
||||
// allocations on a node. Must call Run() in a goroutine enable periodic
|
||||
// garbage collection.
|
||||
func NewAllocGarbageCollector(logger *log.Logger, statsCollector stats.NodeStatsCollector, ac AllocCounter, config *GCConfig) *AllocGarbageCollector {
|
||||
// Require at least 1 to make progress
|
||||
if config.ParallelDestroys <= 0 {
|
||||
logger.Printf("[WARN] client: garbage collector defaulting parallism to 1 due to invalid input value of %d", config.ParallelDestroys)
|
||||
@@ -47,17 +58,18 @@ func NewAllocGarbageCollector(logger *log.Logger, statsCollector stats.NodeStats
|
||||
gc := &AllocGarbageCollector{
|
||||
allocRunners: NewIndexedGCAllocPQ(),
|
||||
statsCollector: statsCollector,
|
||||
allocCounter: ac,
|
||||
config: config,
|
||||
logger: logger,
|
||||
destroyCh: make(chan struct{}, config.ParallelDestroys),
|
||||
shutdownCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
go gc.run()
|
||||
return gc
|
||||
}
|
||||
|
||||
func (a *AllocGarbageCollector) run() {
|
||||
// Run the periodic garbage collector.
|
||||
func (a *AllocGarbageCollector) Run() {
|
||||
ticker := time.NewTicker(a.config.Interval)
|
||||
for {
|
||||
select {
|
||||
@@ -100,23 +112,33 @@ func (a *AllocGarbageCollector) keepUsageBelowThreshold() error {
|
||||
break
|
||||
}
|
||||
|
||||
if diskStats.UsedPercent <= a.config.DiskUsageThreshold &&
|
||||
diskStats.InodesUsedPercent <= a.config.InodeUsageThreshold {
|
||||
reason := ""
|
||||
|
||||
switch {
|
||||
case diskStats.UsedPercent > a.config.DiskUsageThreshold:
|
||||
reason = fmt.Sprintf("disk usage of %.0f is over gc threshold of %.0f",
|
||||
diskStats.UsedPercent, a.config.DiskUsageThreshold)
|
||||
case diskStats.InodesUsedPercent > a.config.InodeUsageThreshold:
|
||||
reason = fmt.Sprintf("inode usage of %.0f is over gc threshold of %.0f",
|
||||
diskStats.InodesUsedPercent, a.config.InodeUsageThreshold)
|
||||
case a.numAllocs() > a.config.MaxAllocs:
|
||||
reason = fmt.Sprintf("number of allocations is over the limit (%d)", a.config.MaxAllocs)
|
||||
}
|
||||
|
||||
// No reason to gc, exit
|
||||
if reason == "" {
|
||||
break
|
||||
}
|
||||
|
||||
// Collect an allocation
|
||||
gcAlloc := a.allocRunners.Pop()
|
||||
if gcAlloc == nil {
|
||||
a.logger.Printf("[WARN] client: garbage collection due to %s skipped because no terminal allocations", reason)
|
||||
break
|
||||
}
|
||||
|
||||
ar := gcAlloc.allocRunner
|
||||
alloc := ar.Alloc()
|
||||
a.logger.Printf("[INFO] client: garbage collecting allocation %v", alloc.ID)
|
||||
|
||||
// Destroy the alloc runner and wait until it exits
|
||||
a.destroyAllocRunner(ar)
|
||||
a.destroyAllocRunner(gcAlloc.allocRunner, reason)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -124,7 +146,13 @@ func (a *AllocGarbageCollector) keepUsageBelowThreshold() error {
|
||||
// destroyAllocRunner is used to destroy an allocation runner. It will acquire a
|
||||
// lock to restrict parallelism and then destroy the alloc runner, returning
|
||||
// once the allocation has been destroyed.
|
||||
func (a *AllocGarbageCollector) destroyAllocRunner(ar *AllocRunner) {
|
||||
func (a *AllocGarbageCollector) destroyAllocRunner(ar *AllocRunner, reason string) {
|
||||
id := "<nil>"
|
||||
if alloc := ar.Alloc(); alloc != nil {
|
||||
id = alloc.ID
|
||||
}
|
||||
a.logger.Printf("[INFO] client: garbage collecting allocation %s due to %s", id, reason)
|
||||
|
||||
// Acquire the destroy lock
|
||||
select {
|
||||
case <-a.shutdownCh:
|
||||
@@ -155,11 +183,7 @@ func (a *AllocGarbageCollector) Collect(allocID string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to collect allocation %q: %v", allocID, err)
|
||||
}
|
||||
|
||||
ar := gcAlloc.allocRunner
|
||||
a.logger.Printf("[INFO] client: garbage collecting allocation %q", ar.Alloc().ID)
|
||||
|
||||
a.destroyAllocRunner(ar)
|
||||
a.destroyAllocRunner(gcAlloc.allocRunner, "forced collection")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -177,9 +201,7 @@ func (a *AllocGarbageCollector) CollectAll() error {
|
||||
break
|
||||
}
|
||||
|
||||
ar := gcAlloc.allocRunner
|
||||
a.logger.Printf("[INFO] client: garbage collecting alloc runner for alloc %q", ar.Alloc().ID)
|
||||
go a.destroyAllocRunner(ar)
|
||||
go a.destroyAllocRunner(gcAlloc.allocRunner, "forced full collection")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -187,6 +209,26 @@ func (a *AllocGarbageCollector) CollectAll() error {
|
||||
// MakeRoomFor garbage collects enough number of allocations in the terminal
|
||||
// state to make room for new allocations
|
||||
func (a *AllocGarbageCollector) MakeRoomFor(allocations []*structs.Allocation) error {
|
||||
// GC allocs until below the max limit + the new allocations
|
||||
max := a.config.MaxAllocs - len(allocations)
|
||||
for a.numAllocs() > max {
|
||||
select {
|
||||
case <-a.shutdownCh:
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
gcAlloc := a.allocRunners.Pop()
|
||||
if gcAlloc == nil {
|
||||
// It's fine if we can't lower below the limit here as
|
||||
// we'll keep trying to drop below the limit with each
|
||||
// periodic gc
|
||||
break
|
||||
}
|
||||
|
||||
// Destroy the alloc runner and wait until it exits
|
||||
a.destroyAllocRunner(gcAlloc.allocRunner, "new allocations")
|
||||
}
|
||||
totalResource := &structs.Resources{}
|
||||
for _, alloc := range allocations {
|
||||
if err := totalResource.Add(alloc.Resources); err != nil {
|
||||
@@ -244,10 +286,9 @@ func (a *AllocGarbageCollector) MakeRoomFor(allocations []*structs.Allocation) e
|
||||
|
||||
ar := gcAlloc.allocRunner
|
||||
alloc := ar.Alloc()
|
||||
a.logger.Printf("[INFO] client: garbage collecting allocation %v", alloc.ID)
|
||||
|
||||
// Destroy the alloc runner and wait until it exits
|
||||
a.destroyAllocRunner(ar)
|
||||
a.destroyAllocRunner(ar, fmt.Sprintf("freeing %d MB for new allocations", alloc.Resources.DiskMB))
|
||||
|
||||
// Call stats collect again
|
||||
diskCleared += alloc.Resources.DiskMB
|
||||
@@ -261,8 +302,7 @@ func (a *AllocGarbageCollector) MarkForCollection(ar *AllocRunner) error {
|
||||
return fmt.Errorf("nil allocation runner inserted for garbage collection")
|
||||
}
|
||||
if ar.Alloc() == nil {
|
||||
a.logger.Printf("[INFO] client: alloc is nil, so garbage collecting")
|
||||
a.destroyAllocRunner(ar)
|
||||
a.destroyAllocRunner(ar, "alloc is nil")
|
||||
}
|
||||
|
||||
a.logger.Printf("[INFO] client: marking allocation %v for GC", ar.Alloc().ID)
|
||||
@@ -281,6 +321,12 @@ func (a *AllocGarbageCollector) Remove(ar *AllocRunner) {
|
||||
}
|
||||
}
|
||||
|
||||
// numAllocs returns the total number of allocs tracked by the client as well
|
||||
// as those marked for GC.
|
||||
func (a *AllocGarbageCollector) numAllocs() int {
|
||||
return a.allocRunners.Length() + a.allocCounter.NumAllocs()
|
||||
}
|
||||
|
||||
// GCAlloc wraps an allocation runner and an index enabling it to be used within
|
||||
// a PQ
|
||||
type GCAlloc struct {
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -11,11 +9,14 @@ import (
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
||||
var gcConfig = GCConfig{
|
||||
DiskUsageThreshold: 80,
|
||||
InodeUsageThreshold: 70,
|
||||
Interval: 1 * time.Minute,
|
||||
ReservedDiskMB: 0,
|
||||
func gcConfig() *GCConfig {
|
||||
return &GCConfig{
|
||||
DiskUsageThreshold: 80,
|
||||
InodeUsageThreshold: 70,
|
||||
Interval: 1 * time.Minute,
|
||||
ReservedDiskMB: 0,
|
||||
MaxAllocs: 100,
|
||||
}
|
||||
}
|
||||
|
||||
func TestIndexedGCAllocPQ(t *testing.T) {
|
||||
@@ -57,6 +58,15 @@ func TestIndexedGCAllocPQ(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// MockAllocCounter implements AllocCounter interface.
|
||||
type MockAllocCounter struct {
|
||||
allocs int
|
||||
}
|
||||
|
||||
func (m *MockAllocCounter) NumAllocs() int {
|
||||
return m.allocs
|
||||
}
|
||||
|
||||
type MockStatsCollector struct {
|
||||
availableValues []uint64
|
||||
usedPercents []float64
|
||||
@@ -90,8 +100,8 @@ func (m *MockStatsCollector) Stats() *stats.HostStats {
|
||||
}
|
||||
|
||||
func TestAllocGarbageCollector_MarkForCollection(t *testing.T) {
|
||||
logger := log.New(os.Stdout, "", 0)
|
||||
gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &gcConfig)
|
||||
logger := testLogger()
|
||||
gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig())
|
||||
|
||||
_, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false)
|
||||
if err := gc.MarkForCollection(ar1); err != nil {
|
||||
@@ -105,8 +115,8 @@ func TestAllocGarbageCollector_MarkForCollection(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAllocGarbageCollector_Collect(t *testing.T) {
|
||||
logger := log.New(os.Stdout, "", 0)
|
||||
gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &gcConfig)
|
||||
logger := testLogger()
|
||||
gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig())
|
||||
|
||||
_, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false)
|
||||
_, ar2 := testAllocRunnerFromAlloc(mock.Alloc(), false)
|
||||
@@ -131,8 +141,8 @@ func TestAllocGarbageCollector_Collect(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAllocGarbageCollector_CollectAll(t *testing.T) {
|
||||
logger := log.New(os.Stdout, "", 0)
|
||||
gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &gcConfig)
|
||||
logger := testLogger()
|
||||
gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig())
|
||||
|
||||
_, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false)
|
||||
_, ar2 := testAllocRunnerFromAlloc(mock.Alloc(), false)
|
||||
@@ -153,10 +163,11 @@ func TestAllocGarbageCollector_CollectAll(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAllocGarbageCollector_MakeRoomForAllocations_EnoughSpace(t *testing.T) {
|
||||
logger := log.New(os.Stdout, "", 0)
|
||||
logger := testLogger()
|
||||
statsCollector := &MockStatsCollector{}
|
||||
gcConfig.ReservedDiskMB = 20
|
||||
gc := NewAllocGarbageCollector(logger, statsCollector, &gcConfig)
|
||||
conf := gcConfig()
|
||||
conf.ReservedDiskMB = 20
|
||||
gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf)
|
||||
|
||||
_, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false)
|
||||
close(ar1.waitCh)
|
||||
@@ -190,10 +201,11 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_EnoughSpace(t *testing.T)
|
||||
}
|
||||
|
||||
func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Partial(t *testing.T) {
|
||||
logger := log.New(os.Stdout, "", 0)
|
||||
logger := testLogger()
|
||||
statsCollector := &MockStatsCollector{}
|
||||
gcConfig.ReservedDiskMB = 20
|
||||
gc := NewAllocGarbageCollector(logger, statsCollector, &gcConfig)
|
||||
conf := gcConfig()
|
||||
conf.ReservedDiskMB = 20
|
||||
gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf)
|
||||
|
||||
_, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false)
|
||||
close(ar1.waitCh)
|
||||
@@ -228,10 +240,11 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Partial(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAllocGarbageCollector_MakeRoomForAllocations_GC_All(t *testing.T) {
|
||||
logger := log.New(os.Stdout, "", 0)
|
||||
logger := testLogger()
|
||||
statsCollector := &MockStatsCollector{}
|
||||
gcConfig.ReservedDiskMB = 20
|
||||
gc := NewAllocGarbageCollector(logger, statsCollector, &gcConfig)
|
||||
conf := gcConfig()
|
||||
conf.ReservedDiskMB = 20
|
||||
gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf)
|
||||
|
||||
_, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false)
|
||||
close(ar1.waitCh)
|
||||
@@ -262,10 +275,11 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_All(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Fallback(t *testing.T) {
|
||||
logger := log.New(os.Stdout, "", 0)
|
||||
logger := testLogger()
|
||||
statsCollector := &MockStatsCollector{}
|
||||
gcConfig.ReservedDiskMB = 20
|
||||
gc := NewAllocGarbageCollector(logger, statsCollector, &gcConfig)
|
||||
conf := gcConfig()
|
||||
conf.ReservedDiskMB = 20
|
||||
gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf)
|
||||
|
||||
_, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false)
|
||||
close(ar1.waitCh)
|
||||
@@ -294,11 +308,49 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Fallback(t *testing.T)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllocGarbageCollector_MakeRoomForAllocations_MaxAllocs(t *testing.T) {
|
||||
const (
|
||||
liveAllocs = 3
|
||||
maxAllocs = 6
|
||||
gcAllocs = 4
|
||||
gcAllocsLeft = 1
|
||||
)
|
||||
|
||||
logger := testLogger()
|
||||
statsCollector := &MockStatsCollector{
|
||||
availableValues: []uint64{10 * 1024 * MB},
|
||||
usedPercents: []float64{0},
|
||||
inodePercents: []float64{0},
|
||||
}
|
||||
allocCounter := &MockAllocCounter{allocs: liveAllocs}
|
||||
conf := gcConfig()
|
||||
conf.MaxAllocs = maxAllocs
|
||||
gc := NewAllocGarbageCollector(logger, statsCollector, allocCounter, conf)
|
||||
|
||||
for i := 0; i < gcAllocs; i++ {
|
||||
_, ar := testAllocRunnerFromAlloc(mock.Alloc(), false)
|
||||
close(ar.waitCh)
|
||||
if err := gc.MarkForCollection(ar); err != nil {
|
||||
t.Fatalf("error marking alloc for gc: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := gc.MakeRoomFor([]*structs.Allocation{mock.Alloc(), mock.Alloc()}); err != nil {
|
||||
t.Fatalf("error making room for 2 new allocs: %v", err)
|
||||
}
|
||||
|
||||
// There should be gcAllocsLeft alloc runners left to be collected
|
||||
if n := len(gc.allocRunners.index); n != gcAllocsLeft {
|
||||
t.Fatalf("expected %d remaining GC-able alloc runners but found %d", gcAllocsLeft, n)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllocGarbageCollector_UsageBelowThreshold(t *testing.T) {
|
||||
logger := log.New(os.Stdout, "", 0)
|
||||
logger := testLogger()
|
||||
statsCollector := &MockStatsCollector{}
|
||||
gcConfig.ReservedDiskMB = 20
|
||||
gc := NewAllocGarbageCollector(logger, statsCollector, &gcConfig)
|
||||
conf := gcConfig()
|
||||
conf.ReservedDiskMB = 20
|
||||
gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf)
|
||||
|
||||
_, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false)
|
||||
close(ar1.waitCh)
|
||||
@@ -329,10 +381,11 @@ func TestAllocGarbageCollector_UsageBelowThreshold(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAllocGarbageCollector_UsedPercentThreshold(t *testing.T) {
|
||||
logger := log.New(os.Stdout, "", 0)
|
||||
logger := testLogger()
|
||||
statsCollector := &MockStatsCollector{}
|
||||
gcConfig.ReservedDiskMB = 20
|
||||
gc := NewAllocGarbageCollector(logger, statsCollector, &gcConfig)
|
||||
conf := gcConfig()
|
||||
conf.ReservedDiskMB = 20
|
||||
gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf)
|
||||
|
||||
_, ar1 := testAllocRunnerFromAlloc(mock.Alloc(), false)
|
||||
close(ar1.waitCh)
|
||||
@@ -363,3 +416,40 @@ func TestAllocGarbageCollector_UsedPercentThreshold(t *testing.T) {
|
||||
t.Fatalf("gcAlloc: %v", gcAlloc)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllocGarbageCollector_MaxAllocsThreshold(t *testing.T) {
|
||||
const (
|
||||
liveAllocs = 3
|
||||
maxAllocs = 6
|
||||
gcAllocs = 4
|
||||
gcAllocsLeft = 1
|
||||
)
|
||||
|
||||
logger := testLogger()
|
||||
statsCollector := &MockStatsCollector{
|
||||
availableValues: []uint64{1000},
|
||||
usedPercents: []float64{0},
|
||||
inodePercents: []float64{0},
|
||||
}
|
||||
allocCounter := &MockAllocCounter{allocs: liveAllocs}
|
||||
conf := gcConfig()
|
||||
conf.MaxAllocs = 4
|
||||
gc := NewAllocGarbageCollector(logger, statsCollector, allocCounter, conf)
|
||||
|
||||
for i := 0; i < gcAllocs; i++ {
|
||||
_, ar := testAllocRunnerFromAlloc(mock.Alloc(), false)
|
||||
close(ar.waitCh)
|
||||
if err := gc.MarkForCollection(ar); err != nil {
|
||||
t.Fatalf("error marking alloc for gc: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := gc.keepUsageBelowThreshold(); err != nil {
|
||||
t.Fatalf("error gc'ing: %v", err)
|
||||
}
|
||||
|
||||
// We should have gc'd down to MaxAllocs
|
||||
if n := len(gc.allocRunners.index); n != gcAllocsLeft {
|
||||
t.Fatalf("expected remaining gc allocs (%d) to equal %d", n, gcAllocsLeft)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,7 +31,10 @@ func testLogger() *log.Logger {
|
||||
}
|
||||
|
||||
func prefixedTestLogger(prefix string) *log.Logger {
|
||||
return log.New(os.Stderr, prefix, log.LstdFlags)
|
||||
if testing.Verbose() {
|
||||
return log.New(os.Stderr, prefix, log.LstdFlags)
|
||||
}
|
||||
return log.New(ioutil.Discard, "", 0)
|
||||
}
|
||||
|
||||
type MockTaskStateUpdater struct {
|
||||
|
||||
@@ -321,6 +321,7 @@ func (a *Agent) clientConfig() (*clientconfig.Config, error) {
|
||||
conf.GCParallelDestroys = a.config.Client.GCParallelDestroys
|
||||
conf.GCDiskUsageThreshold = a.config.Client.GCDiskUsageThreshold
|
||||
conf.GCInodeUsageThreshold = a.config.Client.GCInodeUsageThreshold
|
||||
conf.GCMaxAllocs = a.config.Client.GCMaxAllocs
|
||||
conf.NoHostUUID = a.config.Client.NoHostUUID
|
||||
|
||||
return conf, nil
|
||||
|
||||
@@ -58,6 +58,7 @@ client {
|
||||
gc_parallel_destroys = 6
|
||||
gc_disk_usage_threshold = 82
|
||||
gc_inode_usage_threshold = 91
|
||||
gc_max_allocs = 50
|
||||
no_host_uuid = true
|
||||
}
|
||||
server {
|
||||
|
||||
@@ -212,14 +212,18 @@ type ClientConfig struct {
|
||||
// collector will allow.
|
||||
GCParallelDestroys int `mapstructure:"gc_parallel_destroys"`
|
||||
|
||||
// GCInodeUsageThreshold is the inode usage threshold beyond which the Nomad
|
||||
// client triggers GC of the terminal allocations
|
||||
// GCDiskUsageThreshold is the disk usage threshold given as a percent
|
||||
// beyond which the Nomad client triggers GC of terminal allocations
|
||||
GCDiskUsageThreshold float64 `mapstructure:"gc_disk_usage_threshold"`
|
||||
|
||||
// GCInodeUsageThreshold is the inode usage threshold beyond which the Nomad
|
||||
// client triggers GC of the terminal allocations
|
||||
GCInodeUsageThreshold float64 `mapstructure:"gc_inode_usage_threshold"`
|
||||
|
||||
// GCMaxAllocs is the maximum number of allocations a node can have
|
||||
// before garbage collection is triggered.
|
||||
GCMaxAllocs int `mapstructure:"gc_max_allocs"`
|
||||
|
||||
// NoHostUUID disables using the host's UUID and will force generation of a
|
||||
// random UUID.
|
||||
NoHostUUID bool `mapstructure:"no_host_uuid"`
|
||||
@@ -506,6 +510,7 @@ func DevConfig() *Config {
|
||||
conf.Client.GCInterval = 10 * time.Minute
|
||||
conf.Client.GCDiskUsageThreshold = 99
|
||||
conf.Client.GCInodeUsageThreshold = 99
|
||||
conf.Client.GCMaxAllocs = 50
|
||||
|
||||
return conf
|
||||
}
|
||||
@@ -535,8 +540,9 @@ func DefaultConfig() *Config {
|
||||
Reserved: &Resources{},
|
||||
GCInterval: 1 * time.Minute,
|
||||
GCParallelDestroys: 2,
|
||||
GCInodeUsageThreshold: 70,
|
||||
GCDiskUsageThreshold: 80,
|
||||
GCInodeUsageThreshold: 70,
|
||||
GCMaxAllocs: 50,
|
||||
},
|
||||
Server: &ServerConfig{
|
||||
Enabled: false,
|
||||
@@ -986,6 +992,9 @@ func (a *ClientConfig) Merge(b *ClientConfig) *ClientConfig {
|
||||
if b.GCInodeUsageThreshold != 0 {
|
||||
result.GCInodeUsageThreshold = b.GCInodeUsageThreshold
|
||||
}
|
||||
if b.GCMaxAllocs != 0 {
|
||||
result.GCMaxAllocs = b.GCMaxAllocs
|
||||
}
|
||||
if b.NoHostUUID {
|
||||
result.NoHostUUID = b.NoHostUUID
|
||||
}
|
||||
|
||||
@@ -346,6 +346,7 @@ func parseClient(result **ClientConfig, list *ast.ObjectList) error {
|
||||
"gc_disk_usage_threshold",
|
||||
"gc_inode_usage_threshold",
|
||||
"gc_parallel_destroys",
|
||||
"gc_max_allocs",
|
||||
"no_host_uuid",
|
||||
}
|
||||
if err := checkHCLKeys(listVal, valid); err != nil {
|
||||
|
||||
@@ -3,10 +3,12 @@ package agent
|
||||
import (
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/nomad/structs/config"
|
||||
"github.com/kr/pretty"
|
||||
)
|
||||
|
||||
func TestConfig_Parse(t *testing.T) {
|
||||
@@ -75,6 +77,7 @@ func TestConfig_Parse(t *testing.T) {
|
||||
GCParallelDestroys: 6,
|
||||
GCDiskUsageThreshold: 82,
|
||||
GCInodeUsageThreshold: 91,
|
||||
GCMaxAllocs: 50,
|
||||
NoHostUUID: true,
|
||||
},
|
||||
Server: &ServerConfig{
|
||||
@@ -165,22 +168,20 @@ func TestConfig_Parse(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Logf("Testing parse: %s", tc.File)
|
||||
t.Run(tc.File, func(t *testing.T) {
|
||||
path, err := filepath.Abs(filepath.Join("./config-test-fixtures", tc.File))
|
||||
if err != nil {
|
||||
t.Fatalf("file: %s\n\n%s", tc.File, err)
|
||||
}
|
||||
|
||||
path, err := filepath.Abs(filepath.Join("./config-test-fixtures", tc.File))
|
||||
if err != nil {
|
||||
t.Fatalf("file: %s\n\n%s", tc.File, err)
|
||||
continue
|
||||
}
|
||||
actual, err := ParseConfigFile(path)
|
||||
if (err != nil) != tc.Err {
|
||||
t.Fatalf("file: %s\n\n%s", tc.File, err)
|
||||
}
|
||||
|
||||
actual, err := ParseConfigFile(path)
|
||||
if (err != nil) != tc.Err {
|
||||
t.Fatalf("file: %s\n\n%s", tc.File, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(actual, tc.Result) {
|
||||
t.Fatalf("file: %s\n\n%#v\n\n%#v", tc.File, actual, tc.Result)
|
||||
}
|
||||
if !reflect.DeepEqual(actual, tc.Result) {
|
||||
t.Errorf("file: %s diff: (actual vs expected)\n\n%s", tc.File, strings.Join(pretty.Diff(actual, tc.Result), "\n"))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -144,6 +145,8 @@ func (a *ConsulConfig) Merge(b *ConsulConfig) *ConsulConfig {
|
||||
// ApiConfig returns a usable Consul config that can be passed directly to
|
||||
// hashicorp/consul/api. NOTE: datacenter is not set
|
||||
func (c *ConsulConfig) ApiConfig() (*consul.Config, error) {
|
||||
// Get the default config from consul to reuse things like the default
|
||||
// http.Transport.
|
||||
config := consul.DefaultConfig()
|
||||
if c.Addr != "" {
|
||||
config.Address = c.Addr
|
||||
@@ -152,7 +155,12 @@ func (c *ConsulConfig) ApiConfig() (*consul.Config, error) {
|
||||
config.Token = c.Token
|
||||
}
|
||||
if c.Timeout != 0 {
|
||||
// Create a custom Client to set the timeout
|
||||
if config.HttpClient == nil {
|
||||
config.HttpClient = &http.Client{}
|
||||
}
|
||||
config.HttpClient.Timeout = c.Timeout
|
||||
config.HttpClient.Transport = config.Transport
|
||||
}
|
||||
if c.Auth != "" {
|
||||
var username, password string
|
||||
@@ -180,6 +188,11 @@ func (c *ConsulConfig) ApiConfig() (*consul.Config, error) {
|
||||
if c.VerifySSL != nil {
|
||||
config.TLSConfig.InsecureSkipVerify = !*c.VerifySSL
|
||||
}
|
||||
tlsConfig, err := consul.SetupTLSConfig(&config.TLSConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config.Transport.TLSClientConfig = tlsConfig
|
||||
}
|
||||
|
||||
return config, nil
|
||||
|
||||
6
vendor/github.com/hashicorp/consul-template/child/child.go
generated
vendored
6
vendor/github.com/hashicorp/consul-template/child/child.go
generated
vendored
@@ -14,6 +14,12 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Seed the default rand Source with current time to produce better random
|
||||
// numbers used with splay
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrMissingCommand is the error returned when no command is specified
|
||||
// to run.
|
||||
|
||||
27
vendor/github.com/hashicorp/consul-template/config/retry.go
generated
vendored
27
vendor/github.com/hashicorp/consul-template/config/retry.go
generated
vendored
@@ -8,11 +8,14 @@ import (
|
||||
|
||||
const (
|
||||
// DefaultRetryAttempts is the default number of maximum retry attempts.
|
||||
DefaultRetryAttempts = 5
|
||||
DefaultRetryAttempts = 12
|
||||
|
||||
// DefaultRetryBackoff is the default base for the exponential backoff
|
||||
// algorithm.
|
||||
DefaultRetryBackoff = 250 * time.Millisecond
|
||||
|
||||
// DefaultRetryMaxBackoff is the default maximum of backoff time
|
||||
DefaultRetryMaxBackoff = 1 * time.Minute
|
||||
)
|
||||
|
||||
// RetryFunc is the signature of a function that supports retries.
|
||||
@@ -23,12 +26,17 @@ type RetryFunc func(int) (bool, time.Duration)
|
||||
type RetryConfig struct {
|
||||
// Attempts is the total number of maximum attempts to retry before letting
|
||||
// the error fall through.
|
||||
// 0 means unlimited.
|
||||
Attempts *int
|
||||
|
||||
// Backoff is the base of the exponentialbackoff. This number will be
|
||||
// multipled by the next power of 2 on each iteration.
|
||||
Backoff *time.Duration
|
||||
|
||||
// MaxBackoff is an upper limit to the sleep time between retries
|
||||
// A MaxBackoff of zero means there is no limit to the exponential growth of the backoff.
|
||||
MaxBackoff *time.Duration `mapstructure:"max_backoff"`
|
||||
|
||||
// Enabled signals if this retry is enabled.
|
||||
Enabled *bool
|
||||
}
|
||||
@@ -51,6 +59,8 @@ func (c *RetryConfig) Copy() *RetryConfig {
|
||||
|
||||
o.Backoff = c.Backoff
|
||||
|
||||
o.MaxBackoff = c.MaxBackoff
|
||||
|
||||
o.Enabled = c.Enabled
|
||||
|
||||
return &o
|
||||
@@ -82,6 +92,10 @@ func (c *RetryConfig) Merge(o *RetryConfig) *RetryConfig {
|
||||
r.Backoff = o.Backoff
|
||||
}
|
||||
|
||||
if o.MaxBackoff != nil {
|
||||
r.MaxBackoff = o.MaxBackoff
|
||||
}
|
||||
|
||||
if o.Enabled != nil {
|
||||
r.Enabled = o.Enabled
|
||||
}
|
||||
@@ -103,6 +117,11 @@ func (c *RetryConfig) RetryFunc() RetryFunc {
|
||||
base := math.Pow(2, float64(retry))
|
||||
sleep := time.Duration(base) * TimeDurationVal(c.Backoff)
|
||||
|
||||
maxSleep := TimeDurationVal(c.MaxBackoff)
|
||||
if maxSleep > 0 && maxSleep < sleep {
|
||||
return true, maxSleep
|
||||
}
|
||||
|
||||
return true, sleep
|
||||
}
|
||||
}
|
||||
@@ -117,6 +136,10 @@ func (c *RetryConfig) Finalize() {
|
||||
c.Backoff = TimeDuration(DefaultRetryBackoff)
|
||||
}
|
||||
|
||||
if c.MaxBackoff == nil {
|
||||
c.MaxBackoff = TimeDuration(DefaultRetryMaxBackoff)
|
||||
}
|
||||
|
||||
if c.Enabled == nil {
|
||||
c.Enabled = Bool(true)
|
||||
}
|
||||
@@ -131,10 +154,12 @@ func (c *RetryConfig) GoString() string {
|
||||
return fmt.Sprintf("&RetryConfig{"+
|
||||
"Attempts:%s, "+
|
||||
"Backoff:%s, "+
|
||||
"MaxBackoff:%s, "+
|
||||
"Enabled:%s"+
|
||||
"}",
|
||||
IntGoString(c.Attempts),
|
||||
TimeDurationGoString(c.Backoff),
|
||||
TimeDurationGoString(c.MaxBackoff),
|
||||
BoolGoString(c.Enabled),
|
||||
)
|
||||
}
|
||||
|
||||
1
vendor/github.com/hashicorp/consul-template/dependency/catalog_node.go
generated
vendored
1
vendor/github.com/hashicorp/consul-template/dependency/catalog_node.go
generated
vendored
@@ -128,6 +128,7 @@ func (d *CatalogNodeQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interf
|
||||
ID: node.Node.ID,
|
||||
Node: node.Node.Node,
|
||||
Address: node.Node.Address,
|
||||
Datacenter: node.Node.Datacenter,
|
||||
TaggedAddresses: node.Node.TaggedAddresses,
|
||||
Meta: node.Node.Meta,
|
||||
},
|
||||
|
||||
2
vendor/github.com/hashicorp/consul-template/dependency/catalog_nodes.go
generated
vendored
2
vendor/github.com/hashicorp/consul-template/dependency/catalog_nodes.go
generated
vendored
@@ -28,6 +28,7 @@ type Node struct {
|
||||
ID string
|
||||
Node string
|
||||
Address string
|
||||
Datacenter string
|
||||
TaggedAddresses map[string]string
|
||||
Meta map[string]string
|
||||
}
|
||||
@@ -86,6 +87,7 @@ func (d *CatalogNodesQuery) Fetch(clients *ClientSet, opts *QueryOptions) (inter
|
||||
ID: node.ID,
|
||||
Node: node.Node,
|
||||
Address: node.Address,
|
||||
Datacenter: node.Datacenter,
|
||||
TaggedAddresses: node.TaggedAddresses,
|
||||
Meta: node.Meta,
|
||||
})
|
||||
|
||||
2
vendor/github.com/hashicorp/consul-template/dependency/catalog_service.go
generated
vendored
2
vendor/github.com/hashicorp/consul-template/dependency/catalog_service.go
generated
vendored
@@ -27,6 +27,7 @@ type CatalogService struct {
|
||||
ID string
|
||||
Node string
|
||||
Address string
|
||||
Datacenter string
|
||||
TaggedAddresses map[string]string
|
||||
NodeMeta map[string]string
|
||||
ServiceID string
|
||||
@@ -101,6 +102,7 @@ func (d *CatalogServiceQuery) Fetch(clients *ClientSet, opts *QueryOptions) (int
|
||||
ID: s.ID,
|
||||
Node: s.Node,
|
||||
Address: s.Address,
|
||||
Datacenter: s.Datacenter,
|
||||
TaggedAddresses: s.TaggedAddresses,
|
||||
NodeMeta: s.NodeMeta,
|
||||
ServiceID: s.ServiceID,
|
||||
|
||||
12
vendor/github.com/hashicorp/consul-template/dependency/client_set.go
generated
vendored
12
vendor/github.com/hashicorp/consul-template/dependency/client_set.go
generated
vendored
@@ -25,8 +25,8 @@ type ClientSet struct {
|
||||
|
||||
// consulClient is a wrapper around a real Consul API client.
|
||||
type consulClient struct {
|
||||
client *consulapi.Client
|
||||
httpClient *http.Client
|
||||
client *consulapi.Client
|
||||
transport *http.Transport
|
||||
}
|
||||
|
||||
// vaultClient is a wrapper around a real Vault API client.
|
||||
@@ -169,7 +169,7 @@ func (c *ClientSet) CreateConsulClient(i *CreateConsulClientInput) error {
|
||||
}
|
||||
|
||||
// Setup the new transport
|
||||
consulConfig.HttpClient.Transport = transport
|
||||
consulConfig.Transport = transport
|
||||
|
||||
// Create the API client
|
||||
client, err := consulapi.NewClient(consulConfig)
|
||||
@@ -180,8 +180,8 @@ func (c *ClientSet) CreateConsulClient(i *CreateConsulClientInput) error {
|
||||
// Save the data on ourselves
|
||||
c.Lock()
|
||||
c.consul = &consulClient{
|
||||
client: client,
|
||||
httpClient: consulConfig.HttpClient,
|
||||
client: client,
|
||||
transport: transport,
|
||||
}
|
||||
c.Unlock()
|
||||
|
||||
@@ -323,7 +323,7 @@ func (c *ClientSet) Stop() {
|
||||
defer c.Unlock()
|
||||
|
||||
if c.consul != nil {
|
||||
c.consul.httpClient.Transport.(*http.Transport).CloseIdleConnections()
|
||||
c.consul.transport.CloseIdleConnections()
|
||||
}
|
||||
|
||||
if c.vault != nil {
|
||||
|
||||
2
vendor/github.com/hashicorp/consul-template/dependency/health_service.go
generated
vendored
2
vendor/github.com/hashicorp/consul-template/dependency/health_service.go
generated
vendored
@@ -47,7 +47,7 @@ type HealthService struct {
|
||||
ID string
|
||||
Name string
|
||||
Tags ServiceTags
|
||||
Checks []*api.HealthCheck
|
||||
Checks api.HealthChecks
|
||||
Status string
|
||||
Port int
|
||||
}
|
||||
|
||||
35
vendor/github.com/hashicorp/consul-template/manager/dedup.go
generated
vendored
35
vendor/github.com/hashicorp/consul-template/manager/dedup.go
generated
vendored
@@ -295,6 +295,9 @@ func (d *DedupManager) watchTemplate(client *consulapi.Client, t *template.Templ
|
||||
WaitTime: 60 * time.Second,
|
||||
}
|
||||
|
||||
var lastData []byte
|
||||
var lastIndex uint64
|
||||
|
||||
START:
|
||||
// Stop listening if we're stopped
|
||||
select {
|
||||
@@ -330,6 +333,13 @@ START:
|
||||
}
|
||||
opts.WaitIndex = meta.LastIndex
|
||||
|
||||
// Stop listening if we're stopped
|
||||
select {
|
||||
case <-d.stopCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// If we've exceeded the maximum staleness, retry without stale
|
||||
if allowStale && meta.LastContact > *d.config.MaxStale {
|
||||
allowStale = false
|
||||
@@ -342,13 +352,28 @@ START:
|
||||
allowStale = true
|
||||
}
|
||||
|
||||
// Stop listening if we're stopped
|
||||
select {
|
||||
case <-d.stopCh:
|
||||
return
|
||||
default:
|
||||
if meta.LastIndex == lastIndex {
|
||||
log.Printf("[TRACE] (dedup) %s no new data (index was the same)", path)
|
||||
goto START
|
||||
}
|
||||
|
||||
if meta.LastIndex < lastIndex {
|
||||
log.Printf("[TRACE] (dedup) %s had a lower index, resetting", path)
|
||||
lastIndex = 0
|
||||
goto START
|
||||
}
|
||||
lastIndex = meta.LastIndex
|
||||
|
||||
var data []byte
|
||||
if pair != nil {
|
||||
data = pair.Value
|
||||
}
|
||||
if bytes.Equal(lastData, data) {
|
||||
log.Printf("[TRACE] (dedup) %s no new data (contents were the same)", path)
|
||||
goto START
|
||||
}
|
||||
lastData = data
|
||||
|
||||
// If we are current the leader, wait for leadership lost
|
||||
d.leaderLock.RLock()
|
||||
lockCh, ok = d.leader[t]
|
||||
|
||||
2
vendor/github.com/hashicorp/consul-template/template/scratch.go
generated
vendored
2
vendor/github.com/hashicorp/consul-template/template/scratch.go
generated
vendored
@@ -101,7 +101,7 @@ func (s *Scratch) MapValues(k string) ([]interface{}, error) {
|
||||
|
||||
typed, ok := s.values[k].(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%q is not a map", k)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
keys := make([]string, 0, len(typed))
|
||||
|
||||
28
vendor/github.com/hashicorp/consul-template/watch/view.go
generated
vendored
28
vendor/github.com/hashicorp/consul-template/watch/view.go
generated
vendored
@@ -108,9 +108,12 @@ func (v *View) poll(viewCh chan<- *View, errCh chan<- error) {
|
||||
var retries int
|
||||
|
||||
for {
|
||||
doneCh, fetchErrCh := make(chan struct{}, 1), make(chan error, 1)
|
||||
go v.fetch(doneCh, fetchErrCh)
|
||||
doneCh := make(chan struct{}, 1)
|
||||
successCh := make(chan struct{}, 1)
|
||||
fetchErrCh := make(chan error, 1)
|
||||
go v.fetch(doneCh, successCh, fetchErrCh)
|
||||
|
||||
WAIT:
|
||||
select {
|
||||
case <-doneCh:
|
||||
// Reset the retry to avoid exponentially incrementing retries when we
|
||||
@@ -129,6 +132,16 @@ func (v *View) poll(viewCh chan<- *View, errCh chan<- error) {
|
||||
if v.once {
|
||||
return
|
||||
}
|
||||
case <-successCh:
|
||||
// We successfully received a non-error response from the server. This
|
||||
// does not mean we have data (that's dataCh's job), but rather this
|
||||
// just resets the counter indicating we communciated successfully. For
|
||||
// example, Consul make have an outage, but when it returns, the view
|
||||
// is unchanged. We have to reset the counter retries, but not update the
|
||||
// actual template.
|
||||
log.Printf("[TRACE] view %s successful contact, resetting retries", v.dependency)
|
||||
retries = 0
|
||||
goto WAIT
|
||||
case err := <-fetchErrCh:
|
||||
if v.retryFunc != nil {
|
||||
retry, sleep := v.retryFunc(retries)
|
||||
@@ -166,7 +179,7 @@ func (v *View) poll(viewCh chan<- *View, errCh chan<- error) {
|
||||
// written to errCh. It is designed to be run in a goroutine that selects the
|
||||
// result of doneCh and errCh. It is assumed that only one instance of fetch
|
||||
// is running per View and therefore no locking or mutexes are used.
|
||||
func (v *View) fetch(doneCh chan<- struct{}, errCh chan<- error) {
|
||||
func (v *View) fetch(doneCh, successCh chan<- struct{}, errCh chan<- error) {
|
||||
log.Printf("[TRACE] (view) %s starting fetch", v.dependency)
|
||||
|
||||
var allowStale bool
|
||||
@@ -203,6 +216,15 @@ func (v *View) fetch(doneCh chan<- struct{}, errCh chan<- error) {
|
||||
return
|
||||
}
|
||||
|
||||
// If we got this far, we received data successfully. That data might not
|
||||
// trigger a data update (because we could continue below), but we need to
|
||||
// inform the poller to reset the retry count.
|
||||
log.Printf("[TRACE] (view) %s marking successful data response", v.dependency)
|
||||
select {
|
||||
case successCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
|
||||
if allowStale && rm.LastContact > v.maxStale {
|
||||
allowStale = false
|
||||
log.Printf("[TRACE] (view) %s stale data (last contact exceeded max_stale)", v.dependency)
|
||||
|
||||
35
vendor/github.com/hashicorp/consul/api/acl.go
generated
vendored
35
vendor/github.com/hashicorp/consul/api/acl.go
generated
vendored
@@ -1,5 +1,9 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// ACLCLientType is the client type token
|
||||
ACLClientType = "client"
|
||||
@@ -18,6 +22,16 @@ type ACLEntry struct {
|
||||
Rules string
|
||||
}
|
||||
|
||||
// ACLReplicationStatus is used to represent the status of ACL replication.
|
||||
type ACLReplicationStatus struct {
|
||||
Enabled bool
|
||||
Running bool
|
||||
SourceDatacenter string
|
||||
ReplicatedIndex uint64
|
||||
LastSuccess time.Time
|
||||
LastError time.Time
|
||||
}
|
||||
|
||||
// ACL can be used to query the ACL endpoints
|
||||
type ACL struct {
|
||||
c *Client
|
||||
@@ -138,3 +152,24 @@ func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) {
|
||||
}
|
||||
return entries, qm, nil
|
||||
}
|
||||
|
||||
// Replication returns the status of the ACL replication process in the datacenter
|
||||
func (a *ACL) Replication(q *QueryOptions) (*ACLReplicationStatus, *QueryMeta, error) {
|
||||
r := a.c.newRequest("GET", "/v1/acl/replication")
|
||||
r.setQueryOptions(q)
|
||||
rtt, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
qm := &QueryMeta{}
|
||||
parseQueryMeta(resp, qm)
|
||||
qm.RequestTime = rtt
|
||||
|
||||
var entries *ACLReplicationStatus
|
||||
if err := decodeBody(resp, &entries); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return entries, qm, nil
|
||||
}
|
||||
|
||||
56
vendor/github.com/hashicorp/consul/api/api.go
generated
vendored
56
vendor/github.com/hashicorp/consul/api/api.go
generated
vendored
@@ -168,6 +168,9 @@ type Config struct {
|
||||
// Datacenter to use. If not provided, the default agent datacenter is used.
|
||||
Datacenter string
|
||||
|
||||
// Transport is the Transport to use for the http client.
|
||||
Transport *http.Transport
|
||||
|
||||
// HttpClient is the client to use. Default will be
|
||||
// used if not provided.
|
||||
HttpClient *http.Client
|
||||
@@ -237,11 +240,9 @@ func DefaultNonPooledConfig() *Config {
|
||||
// given function to make the transport.
|
||||
func defaultConfig(transportFn func() *http.Transport) *Config {
|
||||
config := &Config{
|
||||
Address: "127.0.0.1:8500",
|
||||
Scheme: "http",
|
||||
HttpClient: &http.Client{
|
||||
Transport: transportFn(),
|
||||
},
|
||||
Address: "127.0.0.1:8500",
|
||||
Scheme: "http",
|
||||
Transport: transportFn(),
|
||||
}
|
||||
|
||||
if addr := os.Getenv(HTTPAddrEnvName); addr != "" {
|
||||
@@ -364,8 +365,8 @@ func NewClient(config *Config) (*Client, error) {
|
||||
config.Scheme = defConfig.Scheme
|
||||
}
|
||||
|
||||
if config.HttpClient == nil {
|
||||
config.HttpClient = defConfig.HttpClient
|
||||
if config.Transport == nil {
|
||||
config.Transport = defConfig.Transport
|
||||
}
|
||||
|
||||
if config.TLSConfig.Address == "" {
|
||||
@@ -392,17 +393,14 @@ func NewClient(config *Config) (*Client, error) {
|
||||
config.TLSConfig.InsecureSkipVerify = defConfig.TLSConfig.InsecureSkipVerify
|
||||
}
|
||||
|
||||
tlsClientConfig, err := SetupTLSConfig(&config.TLSConfig)
|
||||
|
||||
// We don't expect this to fail given that we aren't
|
||||
// parsing any of the input, but we panic just in case
|
||||
// since this doesn't have an error return.
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if config.HttpClient == nil {
|
||||
var err error
|
||||
config.HttpClient, err = NewHttpClient(config.Transport, config.TLSConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
config.HttpClient.Transport.(*http.Transport).TLSClientConfig = tlsClientConfig
|
||||
|
||||
parts := strings.SplitN(config.Address, "://", 2)
|
||||
if len(parts) == 2 {
|
||||
switch parts[0] {
|
||||
@@ -429,6 +427,26 @@ func NewClient(config *Config) (*Client, error) {
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// NewHttpClient returns an http client configured with the given Transport and TLS
|
||||
// config.
|
||||
func NewHttpClient(transport *http.Transport, tlsConf TLSConfig) (*http.Client, error) {
|
||||
client := &http.Client{
|
||||
Transport: transport,
|
||||
}
|
||||
|
||||
if transport.TLSClientConfig == nil {
|
||||
tlsClientConfig, err := SetupTLSConfig(&tlsConf)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
transport.TLSClientConfig = tlsClientConfig
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// request is used to help build up a request
|
||||
type request struct {
|
||||
config *Config
|
||||
@@ -528,11 +546,11 @@ func (r *request) toHTTP() (*http.Request, error) {
|
||||
|
||||
// Check if we should encode the body
|
||||
if r.body == nil && r.obj != nil {
|
||||
if b, err := encodeBody(r.obj); err != nil {
|
||||
b, err := encodeBody(r.obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
r.body = b
|
||||
}
|
||||
r.body = b
|
||||
}
|
||||
|
||||
// Create the HTTP request
|
||||
|
||||
2
vendor/github.com/hashicorp/consul/api/catalog.go
generated
vendored
2
vendor/github.com/hashicorp/consul/api/catalog.go
generated
vendored
@@ -4,6 +4,7 @@ type Node struct {
|
||||
ID string
|
||||
Node string
|
||||
Address string
|
||||
Datacenter string
|
||||
TaggedAddresses map[string]string
|
||||
Meta map[string]string
|
||||
CreateIndex uint64
|
||||
@@ -14,6 +15,7 @@ type CatalogService struct {
|
||||
ID string
|
||||
Node string
|
||||
Address string
|
||||
Datacenter string
|
||||
TaggedAddresses map[string]string
|
||||
NodeMeta map[string]string
|
||||
ServiceID string
|
||||
|
||||
1
vendor/github.com/hashicorp/consul/api/health.go
generated
vendored
1
vendor/github.com/hashicorp/consul/api/health.go
generated
vendored
@@ -33,6 +33,7 @@ type HealthCheck struct {
|
||||
Output string
|
||||
ServiceID string
|
||||
ServiceName string
|
||||
ServiceTags []string
|
||||
}
|
||||
|
||||
// HealthChecks is a collection of HealthCheck structs.
|
||||
|
||||
23
vendor/github.com/hashicorp/consul/api/kv.go
generated
vendored
23
vendor/github.com/hashicorp/consul/api/kv.go
generated
vendored
@@ -49,17 +49,18 @@ type KVPairs []*KVPair
|
||||
type KVOp string
|
||||
|
||||
const (
|
||||
KVSet KVOp = "set"
|
||||
KVDelete KVOp = "delete"
|
||||
KVDeleteCAS KVOp = "delete-cas"
|
||||
KVDeleteTree KVOp = "delete-tree"
|
||||
KVCAS KVOp = "cas"
|
||||
KVLock KVOp = "lock"
|
||||
KVUnlock KVOp = "unlock"
|
||||
KVGet KVOp = "get"
|
||||
KVGetTree KVOp = "get-tree"
|
||||
KVCheckSession KVOp = "check-session"
|
||||
KVCheckIndex KVOp = "check-index"
|
||||
KVSet KVOp = "set"
|
||||
KVDelete KVOp = "delete"
|
||||
KVDeleteCAS KVOp = "delete-cas"
|
||||
KVDeleteTree KVOp = "delete-tree"
|
||||
KVCAS KVOp = "cas"
|
||||
KVLock KVOp = "lock"
|
||||
KVUnlock KVOp = "unlock"
|
||||
KVGet KVOp = "get"
|
||||
KVGetTree KVOp = "get-tree"
|
||||
KVCheckSession KVOp = "check-session"
|
||||
KVCheckIndex KVOp = "check-index"
|
||||
KVCheckNotExists KVOp = "check-not-exists"
|
||||
)
|
||||
|
||||
// KVTxnOp defines a single operation inside a transaction.
|
||||
|
||||
29
vendor/github.com/hashicorp/consul/api/lock.go
generated
vendored
29
vendor/github.com/hashicorp/consul/api/lock.go
generated
vendored
@@ -143,22 +143,23 @@ func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
|
||||
// Check if we need to create a session first
|
||||
l.lockSession = l.opts.Session
|
||||
if l.lockSession == "" {
|
||||
if s, err := l.createSession(); err != nil {
|
||||
s, err := l.createSession()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create session: %v", err)
|
||||
} else {
|
||||
l.sessionRenew = make(chan struct{})
|
||||
l.lockSession = s
|
||||
session := l.c.Session()
|
||||
go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew)
|
||||
|
||||
// If we fail to acquire the lock, cleanup the session
|
||||
defer func() {
|
||||
if !l.isHeld {
|
||||
close(l.sessionRenew)
|
||||
l.sessionRenew = nil
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
l.sessionRenew = make(chan struct{})
|
||||
l.lockSession = s
|
||||
session := l.c.Session()
|
||||
go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew)
|
||||
|
||||
// If we fail to acquire the lock, cleanup the session
|
||||
defer func() {
|
||||
if !l.isHeld {
|
||||
close(l.sessionRenew)
|
||||
l.sessionRenew = nil
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Setup the query options
|
||||
|
||||
29
vendor/github.com/hashicorp/consul/api/semaphore.go
generated
vendored
29
vendor/github.com/hashicorp/consul/api/semaphore.go
generated
vendored
@@ -155,22 +155,23 @@ func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) {
|
||||
// Check if we need to create a session first
|
||||
s.lockSession = s.opts.Session
|
||||
if s.lockSession == "" {
|
||||
if sess, err := s.createSession(); err != nil {
|
||||
sess, err := s.createSession()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create session: %v", err)
|
||||
} else {
|
||||
s.sessionRenew = make(chan struct{})
|
||||
s.lockSession = sess
|
||||
session := s.c.Session()
|
||||
go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew)
|
||||
|
||||
// If we fail to acquire the lock, cleanup the session
|
||||
defer func() {
|
||||
if !s.isHeld {
|
||||
close(s.sessionRenew)
|
||||
s.sessionRenew = nil
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
s.sessionRenew = make(chan struct{})
|
||||
s.lockSession = sess
|
||||
session := s.c.Session()
|
||||
go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew)
|
||||
|
||||
// If we fail to acquire the lock, cleanup the session
|
||||
defer func() {
|
||||
if !s.isHeld {
|
||||
close(s.sessionRenew)
|
||||
s.sessionRenew = nil
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Create the contender entry
|
||||
|
||||
46
vendor/vendor.json
vendored
46
vendor/vendor.json
vendored
@@ -591,46 +591,46 @@
|
||||
"revision": "a557574d6c024ed6e36acc8b610f5f211c91568a"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "gx2CAg/v3k7kfBA/rT5NCkI0jDI=",
|
||||
"checksumSHA1": "Nu2j1GusM7ZH0uYrGzqr1K7yH7I=",
|
||||
"path": "github.com/hashicorp/consul-template/child",
|
||||
"revision": "e79894aad0b3789b93d0372e23f6eb0d2b75b35a",
|
||||
"revisionTime": "2017-03-28T18:42:41Z"
|
||||
"revision": "92746fc5cf86dbb113558bacec43459a65c8df14",
|
||||
"revisionTime": "2017-05-26T18:30:17Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "VMDorxQ1u/r2BYZ/azJd71UQi4A=",
|
||||
"checksumSHA1": "7TBPXChZZS84qZbzP7qFYeQding=",
|
||||
"path": "github.com/hashicorp/consul-template/config",
|
||||
"revision": "e79894aad0b3789b93d0372e23f6eb0d2b75b35a",
|
||||
"revisionTime": "2017-03-28T18:42:41Z"
|
||||
"revision": "92746fc5cf86dbb113558bacec43459a65c8df14",
|
||||
"revisionTime": "2017-05-26T18:30:17Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "kvyZVRAc/JG3Ua1deyKaFtOrlqc=",
|
||||
"checksumSHA1": "7rKifM082rlbHN9EcsVyu7VXLoo=",
|
||||
"path": "github.com/hashicorp/consul-template/dependency",
|
||||
"revision": "e79894aad0b3789b93d0372e23f6eb0d2b75b35a",
|
||||
"revisionTime": "2017-03-28T18:42:41Z"
|
||||
"revision": "92746fc5cf86dbb113558bacec43459a65c8df14",
|
||||
"revisionTime": "2017-05-26T18:30:17Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "QHR0vkzQMi3UH7q1HdV2QhxrOt8=",
|
||||
"checksumSHA1": "Ci5EmLs/h7ke9bUg7a34UfTbB5U=",
|
||||
"path": "github.com/hashicorp/consul-template/manager",
|
||||
"revision": "e79894aad0b3789b93d0372e23f6eb0d2b75b35a",
|
||||
"revisionTime": "2017-03-28T18:42:41Z"
|
||||
"revision": "92746fc5cf86dbb113558bacec43459a65c8df14",
|
||||
"revisionTime": "2017-05-26T18:30:17Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "oskgb0WteBKOItG8NNDduM7E/D0=",
|
||||
"path": "github.com/hashicorp/consul-template/signals",
|
||||
"revision": "e79894aad0b3789b93d0372e23f6eb0d2b75b35a",
|
||||
"revisionTime": "2017-03-28T18:42:41Z"
|
||||
"revision": "92746fc5cf86dbb113558bacec43459a65c8df14",
|
||||
"revisionTime": "2017-05-26T18:30:17Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "KFFY15i/0MuTL2z6OzbQfB4xIBE=",
|
||||
"checksumSHA1": "804hk7BQd6V2xjBwz+cE0hdzSlI=",
|
||||
"path": "github.com/hashicorp/consul-template/template",
|
||||
"revision": "e79894aad0b3789b93d0372e23f6eb0d2b75b35a",
|
||||
"revisionTime": "2017-03-28T18:42:41Z"
|
||||
"revision": "92746fc5cf86dbb113558bacec43459a65c8df14",
|
||||
"revisionTime": "2017-05-26T18:30:17Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "cl9R28+I+YT6a0Z+KQFP//wuC+0=",
|
||||
"checksumSHA1": "KjcelGP7qPh0ObKouBJuHmXUjqk=",
|
||||
"path": "github.com/hashicorp/consul-template/watch",
|
||||
"revision": "e79894aad0b3789b93d0372e23f6eb0d2b75b35a",
|
||||
"revisionTime": "2017-03-28T18:42:41Z"
|
||||
"revision": "92746fc5cf86dbb113558bacec43459a65c8df14",
|
||||
"revisionTime": "2017-05-26T18:30:17Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "jfELEMRhiTcppZmRH+ZwtkVS5Uw=",
|
||||
@@ -639,10 +639,10 @@
|
||||
"revisionTime": "2017-04-17T18:01:43Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "k8spDLTgdEFy15C1AdBJLAW+Zng=",
|
||||
"checksumSHA1": "RmhTKLvlDtxNPKZFnPYnfG/HzrI=",
|
||||
"path": "github.com/hashicorp/consul/api",
|
||||
"revision": "e9ca44d0a1757ac9aecc6785904a701936c10e4a",
|
||||
"revisionTime": "2017-04-17T18:01:43Z"
|
||||
"revision": "eea8f4ce75e8e6ff97c9913d89f687e8f8489ce6",
|
||||
"revisionTime": "2017-05-30T15:52:51Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "Z1N3jX/5B7GbLNfNp5GTxrsJItc=",
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
source "https://rubygems.org"
|
||||
|
||||
gem "middleman-hashicorp", "0.3.22"
|
||||
gem "middleman-hashicorp", "0.3.25"
|
||||
|
||||
@@ -6,7 +6,7 @@ GEM
|
||||
minitest (~> 5.1)
|
||||
thread_safe (~> 0.3, >= 0.3.4)
|
||||
tzinfo (~> 1.1)
|
||||
autoprefixer-rails (6.7.7.1)
|
||||
autoprefixer-rails (7.1.1)
|
||||
execjs
|
||||
bootstrap-sass (3.3.7)
|
||||
autoprefixer-rails (>= 5.2.1)
|
||||
@@ -42,14 +42,15 @@ GEM
|
||||
eventmachine (1.2.3)
|
||||
execjs (2.7.0)
|
||||
ffi (1.9.18)
|
||||
haml (4.0.7)
|
||||
haml (5.0.1)
|
||||
temple (>= 0.8.0)
|
||||
tilt
|
||||
hike (1.2.3)
|
||||
hooks (0.4.1)
|
||||
uber (~> 0.0.14)
|
||||
http_parser.rb (0.6.0)
|
||||
i18n (0.7.0)
|
||||
json (2.0.3)
|
||||
json (2.1.0)
|
||||
kramdown (1.13.2)
|
||||
listen (3.0.8)
|
||||
rb-fsevent (~> 0.9, >= 0.9.4)
|
||||
@@ -77,7 +78,7 @@ GEM
|
||||
rack (>= 1.4.5, < 2.0)
|
||||
thor (>= 0.15.2, < 2.0)
|
||||
tilt (~> 1.4.1, < 2.0)
|
||||
middleman-hashicorp (0.3.22)
|
||||
middleman-hashicorp (0.3.25)
|
||||
bootstrap-sass (~> 3.3)
|
||||
builder (~> 3.2)
|
||||
middleman (~> 3.4)
|
||||
@@ -101,9 +102,9 @@ GEM
|
||||
mime-types-data (~> 3.2015)
|
||||
mime-types-data (3.2016.0521)
|
||||
mini_portile2 (2.1.0)
|
||||
minitest (5.10.1)
|
||||
minitest (5.10.2)
|
||||
multi_json (1.12.1)
|
||||
nokogiri (1.7.1)
|
||||
nokogiri (1.7.2)
|
||||
mini_portile2 (~> 2.1.0)
|
||||
padrino-helpers (0.12.8.1)
|
||||
i18n (~> 0.6, >= 0.6.7)
|
||||
@@ -111,7 +112,7 @@ GEM
|
||||
tilt (~> 1.4.1)
|
||||
padrino-support (0.12.8.1)
|
||||
activesupport (>= 3.1)
|
||||
rack (1.6.5)
|
||||
rack (1.6.8)
|
||||
rack-livereload (0.3.16)
|
||||
rack
|
||||
rack-test (0.6.3)
|
||||
@@ -121,7 +122,7 @@ GEM
|
||||
ffi (>= 0.5.0)
|
||||
redcarpet (3.4.0)
|
||||
rouge (2.0.7)
|
||||
sass (3.4.23)
|
||||
sass (3.4.24)
|
||||
sprockets (2.12.4)
|
||||
hike (~> 1.2)
|
||||
multi_json (~> 1.0)
|
||||
@@ -132,26 +133,27 @@ GEM
|
||||
sprockets-sass (1.3.1)
|
||||
sprockets (~> 2.0)
|
||||
tilt (~> 1.1)
|
||||
temple (0.8.0)
|
||||
thor (0.19.4)
|
||||
thread_safe (0.3.6)
|
||||
tilt (1.4.1)
|
||||
turbolinks (5.0.1)
|
||||
turbolinks-source (~> 5)
|
||||
turbolinks-source (5.0.0)
|
||||
turbolinks-source (5.0.3)
|
||||
tzinfo (1.2.3)
|
||||
thread_safe (~> 0.1)
|
||||
uber (0.0.15)
|
||||
uglifier (2.7.2)
|
||||
execjs (>= 0.3.0)
|
||||
json (>= 1.8.0)
|
||||
xpath (2.0.0)
|
||||
xpath (2.1.0)
|
||||
nokogiri (~> 1.3)
|
||||
|
||||
PLATFORMS
|
||||
ruby
|
||||
|
||||
DEPENDENCIES
|
||||
middleman-hashicorp (= 0.3.22)
|
||||
middleman-hashicorp (= 0.3.25)
|
||||
|
||||
BUNDLED WITH
|
||||
1.14.6
|
||||
1.15.0
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
VERSION?="0.3.22"
|
||||
VERSION?="0.3.25"
|
||||
|
||||
build:
|
||||
@echo "==> Starting build in Docker..."
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
"builders": [
|
||||
{
|
||||
"type": "docker",
|
||||
"image": "hashicorp/middleman-hashicorp:0.3.22",
|
||||
"image": "hashicorp/middleman-hashicorp:0.3.25",
|
||||
"discard": "true",
|
||||
"run_command": ["-d", "-i", "-t", "{{ .Image }}", "/bin/sh"]
|
||||
}
|
||||
|
||||
86
website/redirects.txt
Normal file
86
website/redirects.txt
Normal file
@@ -0,0 +1,86 @@
|
||||
#
|
||||
# REDIRECTS FILE
|
||||
#
|
||||
# This is a sample redirect file. Redirects allow individual projects to add
|
||||
# their own redirect rules in a declarative manner using Fastly edge
|
||||
# dictionaries.
|
||||
#
|
||||
# FORMAT
|
||||
#
|
||||
# Redirects are in the format. There must be at least one space between the
|
||||
# original path and the new path, and there must be exactly two entries per
|
||||
# line.
|
||||
#
|
||||
# /original-path /new-path
|
||||
#
|
||||
# GLOB MATCHING
|
||||
#
|
||||
# Because of the way lookup tables work, there is no support for glob matching.
|
||||
# Fastly does not provide a way to iterate through the lookup table, so it is
|
||||
# not possible to run through the table and find anything that matches. As such
|
||||
# URLs must match directly.
|
||||
#
|
||||
# More complex redirects are possible, but must be added directly to the
|
||||
# configuration. Please contact the release engineering team for assistance.
|
||||
#
|
||||
# DELETING
|
||||
#
|
||||
# Deleting items is not supported at this time. To delete an item, contact the
|
||||
# release engineering team and they will delete the dictionary item.
|
||||
#
|
||||
# MISC
|
||||
#
|
||||
# - Blank lines are ignored
|
||||
# - Comments are hash-style
|
||||
# - URLs are limited to 256 characters
|
||||
# - Items are case-sensitive (please use all lowercase)
|
||||
#
|
||||
|
||||
# Docs
|
||||
/docs/agent/config.html /docs/agent/configuration/index.html
|
||||
/docs/jobops /docs/operating-a-job/index.html
|
||||
/docs/jobops/ /docs/operating-a-job/index.html
|
||||
/docs/jobops/index.html /docs/operating-a-job/index.html
|
||||
/docs/jobops/taskconfig.html /docs/operating-a-job/configuring-tasks.html
|
||||
/docs/jobops/inspecting.html /docs/operating-a-job/inspecting-state.html
|
||||
/docs/jobops/resources.html /docs/operating-a-job/resource-utilization.html
|
||||
/docs/jobops/servicediscovery.html /docs/service-discovery/index.html
|
||||
/docs/jobops/logs.html /docs/operating-a-job/accessing-logs.html
|
||||
/docs/jobops/updating.html /docs/operating-a-job/update-strategies/index.html
|
||||
/docs/jobspec /docs/job-specification/index.html
|
||||
/docs/jobspec/ /docs/job-specification/index.html
|
||||
/docs/jobspec/index.html /docs/job-specification/index.html
|
||||
/docs/jobspec/interpreted.html /docs/runtime/interpolation.html
|
||||
/docs/jobspec/json.html /api/json-jobs.html
|
||||
/docs/jobspec/environment.html /docs/runtime/environment.html
|
||||
/docs/jobspec/schedulers.html /docs/runtime/schedulers.html
|
||||
/docs/jobspec/servicediscovery.html /docs/job-specification/service.html
|
||||
/docs/jobspec/networking.html /docs/job-specification/network.html
|
||||
/docs/cluster/automatic.html /guides/cluster/automatic.html
|
||||
/docs/cluster/manual.html /guides/cluster/manual.html
|
||||
/docs/cluster/federation.html /guides/cluster/federation.html
|
||||
/docs/cluster/requirements.html /guides/cluster/requirements.html
|
||||
|
||||
# API
|
||||
/docs/http/index.html /api/index.html
|
||||
/docs/http/json-jobs.html /api/json-jobs.html
|
||||
/docs/http/job.html /api/jobs.html
|
||||
/docs/http/jobs.html /api/jobs.html
|
||||
/docs/http/node.html /api/nodes.html
|
||||
/docs/http/nodes.html /api/nodes.html
|
||||
/docs/http/alloc.html /api/allocations.html
|
||||
/docs/http/allocs.html /api/allocations.html
|
||||
/docs/http/eval.html /api/evaluations.html
|
||||
/docs/http/evals.html /api/evaluations.html
|
||||
/docs/http/agent-self.html /api/agent.html
|
||||
/docs/http/agent-join.html /api/agent.html
|
||||
/docs/http/agent-members.html /api/agent.html
|
||||
/docs/http/agent-force-leave.html /api/agent.html
|
||||
/docs/http/agent-servers.html /api/agent.html
|
||||
/docs/http/client-fs.html /api/client.html
|
||||
/docs/http/client-stats.html /api/client.html
|
||||
/docs/http/client-allocation-stats.html /api/client.html
|
||||
/docs/http/regions.html /api/regions.html
|
||||
/docs/http/status.html /api/status.html
|
||||
/docs/http/operator.html /api/operator.html
|
||||
/docs/http/system.html /api/system.html
|
||||
@@ -4,6 +4,7 @@ set -e
|
||||
PROJECT="nomad"
|
||||
PROJECT_URL="www.nomadproject.io"
|
||||
FASTLY_SERVICE_ID="7GrxRJP3PVBuqQbyxYQ0MV"
|
||||
FASTLY_DICTIONARY_ID="4OEpQ4S6HbEu7wkfTvrWUG"
|
||||
|
||||
# Ensure the proper AWS environment variables are set
|
||||
if [ -z "$AWS_ACCESS_KEY_ID" ]; then
|
||||
@@ -93,6 +94,71 @@ if [ -z "$NO_UPLOAD" ]; then
|
||||
modify "s3://hc-sites/$PROJECT/latest/"
|
||||
fi
|
||||
|
||||
# Add redirects if they exist
|
||||
if [ -z "$NO_REDIRECTS" ] || [ ! test -f "$DIR/redirects.txt" ]; then
|
||||
echo "Adding redirects..."
|
||||
fields=()
|
||||
while read -r line; do
|
||||
[[ "$line" =~ ^#.* ]] && continue
|
||||
[[ -z "$line" ]] && continue
|
||||
|
||||
# Read fields
|
||||
IFS=" " read -ra parts <<<"$line"
|
||||
fields+=("${parts[@]}")
|
||||
done < "$DIR/redirects.txt"
|
||||
|
||||
# Check we have pairs
|
||||
if [ $((${#fields[@]} % 2)) -ne 0 ]; then
|
||||
echo "Bad redirects (not an even number)!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check we don't have more than 1000 entries (yes, it says 2000 below, but that
|
||||
# is because we've split into multiple lines).
|
||||
if [ "${#fields}" -gt 2000 ]; then
|
||||
echo "More than 1000 entries!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validations
|
||||
for field in "${fields[@]}"; do
|
||||
if [ "${#field}" -gt 256 ]; then
|
||||
echo "'$field' is > 256 characters!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "${field:0:1}" != "/" ]; then
|
||||
echo "'$field' does not start with /!"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
# Build the payload for single-request updates.
|
||||
jq_args=()
|
||||
jq_query="."
|
||||
for (( i=0; i<${#fields[@]}; i+=2 )); do
|
||||
original="${fields[i]}"
|
||||
redirect="${fields[i+1]}"
|
||||
echo "Redirecting ${original} -> ${redirect}"
|
||||
jq_args+=(--arg "key$((i/2))" "${original}")
|
||||
jq_args+=(--arg "value$((i/2))" "${redirect}")
|
||||
jq_query+="| .items |= (. + [{op: \"upsert\", item_key: \$key$((i/2)), item_value: \$value$((i/2))}])"
|
||||
done
|
||||
json="$(jq "${jq_args[@]}" "${jq_query}" <<<'{"items": []}')"
|
||||
|
||||
# Post the JSON body
|
||||
curl \
|
||||
--fail \
|
||||
--silent \
|
||||
--output /dev/null \
|
||||
--request "PATCH" \
|
||||
--header "Fastly-Key: $FASTLY_API_KEY" \
|
||||
--header "Content-type: application/json" \
|
||||
--header "Accept: application/json" \
|
||||
--data "$json"\
|
||||
"https://api.fastly.com/service/$FASTLY_SERVICE_ID/dictionary/$FASTLY_DICTIONARY_ID/items"
|
||||
fi
|
||||
|
||||
# Perform a purge of the surrogate key.
|
||||
if [ -z "$NO_PURGE" ]; then
|
||||
echo "Purging Fastly cache..."
|
||||
|
||||
462
website/source/api/agent.html.md
Normal file
462
website/source/api/agent.html.md
Normal file
@@ -0,0 +1,462 @@
|
||||
---
|
||||
layout: api
|
||||
page_title: Agent - HTTP API
|
||||
sidebar_current: api-agent
|
||||
description: |-
|
||||
The /agent endpoints interact with the local Nomad agent to interact with
|
||||
members and servers.
|
||||
---
|
||||
|
||||
# Agent HTTP API
|
||||
|
||||
The `/agent` endpoints are used to interact with the local Nomad agent.
|
||||
|
||||
## List Members
|
||||
|
||||
This endpoint queries the agent for the known peers in the gossip pool. This
|
||||
endpoint is only applicable to servers. Due to the nature of gossip, this is
|
||||
eventually consistent.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ---------------------------- | -------------------------- |
|
||||
| `GET` | `/agent/members` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `NO` | `none` |
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/agent/members
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
{
|
||||
"ServerName": "bacon-mac",
|
||||
"ServerRegion": "global",
|
||||
"ServerDC": "dc1",
|
||||
"Members": [
|
||||
{
|
||||
"Name": "bacon-mac.global",
|
||||
"Addr": "127.0.0.1",
|
||||
"Port": 4648,
|
||||
"Tags": {
|
||||
"mvn": "1",
|
||||
"build": "0.5.5dev",
|
||||
"port": "4647",
|
||||
"bootstrap": "1",
|
||||
"role": "nomad",
|
||||
"region": "global",
|
||||
"dc": "dc1",
|
||||
"vsn": "1"
|
||||
},
|
||||
"Status": "alive",
|
||||
"ProtocolMin": 1,
|
||||
"ProtocolMax": 5,
|
||||
"ProtocolCur": 2,
|
||||
"DelegateMin": 2,
|
||||
"DelegateMax": 4,
|
||||
"DelegateCur": 4
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## List Servers
|
||||
|
||||
This endpoint lists the known server nodes. The `servers` endpoint is used to
|
||||
query an agent in client mode for its list of known servers. Client nodes
|
||||
register themselves with these server addresses so that they may dequeue work.
|
||||
The servers endpoint can be used to keep this configuration up to date if there
|
||||
are changes in the cluster.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ---------------------------- | -------------------------- |
|
||||
| `GET` | `/agent/servers` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `NO` | `none` |
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/agent/servers
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
[
|
||||
"127.0.0.1:4647"
|
||||
]
|
||||
```
|
||||
|
||||
## Update Servers
|
||||
|
||||
This endpoint updates the list of known servers to the provided list. This
|
||||
**replaces** all previous server addresses with the new list.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ---------------------------- | -------------------------- |
|
||||
| `POST` | `/agent/servers` | `(empty body)` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `NO` | `none` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `address` `(string: <required>)` - Specifies the list of addresses in the
|
||||
format `ip:port`. This is specified as a query string!
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
--request POST \
|
||||
https://nomad.rocks/v1/agent/servers?address=1.2.3.4:4647&addres=5.6.7.8:4647
|
||||
```
|
||||
|
||||
## Query Self
|
||||
|
||||
This endpoint queries the state of the target agent (self).
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ---------------------------- | -------------------------- |
|
||||
| `POST` | `/agent/self` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | Consistency Modes | ACL Required |
|
||||
| ---------------- | ----------------- | ------------ |
|
||||
| `NO` | `none` | `none` |
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/agent/self
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
{
|
||||
"config": {
|
||||
"Addresses": {
|
||||
"HTTP": "127.0.0.1",
|
||||
"RPC": "127.0.0.1",
|
||||
"Serf": "127.0.0.1"
|
||||
},
|
||||
"AdvertiseAddrs": {
|
||||
"HTTP": "127.0.0.1:4646",
|
||||
"RPC": "127.0.0.1:4647",
|
||||
"Serf": "127.0.0.1:4648"
|
||||
},
|
||||
"Atlas": {
|
||||
"Endpoint": "",
|
||||
"Infrastructure": "",
|
||||
"Join": false
|
||||
},
|
||||
"BindAddr": "127.0.0.1",
|
||||
"Client": {
|
||||
"AllocDir": "",
|
||||
"ChrootEnv": {},
|
||||
"ClientMaxPort": 14512,
|
||||
"ClientMinPort": 14000,
|
||||
"Enabled": true,
|
||||
"GCDiskUsageThreshold": 99,
|
||||
"GCInodeUsageThreshold": 99,
|
||||
"GCInterval": 600000000000,
|
||||
"MaxKillTimeout": "30s",
|
||||
"Meta": {},
|
||||
"NetworkInterface": "lo0",
|
||||
"NetworkSpeed": 0,
|
||||
"NodeClass": "",
|
||||
"Options": {
|
||||
"driver.docker.volumes": "true"
|
||||
},
|
||||
"Reserved": {
|
||||
"CPU": 0,
|
||||
"DiskMB": 0,
|
||||
"IOPS": 0,
|
||||
"MemoryMB": 0,
|
||||
"ParsedReservedPorts": null,
|
||||
"ReservedPorts": ""
|
||||
},
|
||||
"Servers": null,
|
||||
"StateDir": ""
|
||||
},
|
||||
"Consul": {
|
||||
"Addr": "",
|
||||
"Auth": "",
|
||||
"AutoAdvertise": true,
|
||||
"CAFile": "",
|
||||
"CertFile": "",
|
||||
"ChecksUseAdvertise": false,
|
||||
"ClientAutoJoin": true,
|
||||
"ClientServiceName": "nomad-client",
|
||||
"EnableSSL": false,
|
||||
"KeyFile": "",
|
||||
"ServerAutoJoin": true,
|
||||
"ServerServiceName": "nomad",
|
||||
"Timeout": 5000000000,
|
||||
"Token": "",
|
||||
"VerifySSL": false
|
||||
},
|
||||
"DataDir": "",
|
||||
"Datacenter": "dc1",
|
||||
"DevMode": true,
|
||||
"DisableAnonymousSignature": true,
|
||||
"DisableUpdateCheck": false,
|
||||
"EnableDebug": true,
|
||||
"EnableSyslog": false,
|
||||
"Files": null,
|
||||
"HTTPAPIResponseHeaders": {},
|
||||
"LeaveOnInt": false,
|
||||
"LeaveOnTerm": false,
|
||||
"LogLevel": "DEBUG",
|
||||
"NodeName": "",
|
||||
"Ports": {
|
||||
"HTTP": 4646,
|
||||
"RPC": 4647,
|
||||
"Serf": 4648
|
||||
},
|
||||
"Region": "global",
|
||||
"Revision": "f551dcb83e3ac144c9dbb90583b6e82d234662e9",
|
||||
"Server": {
|
||||
"BootstrapExpect": 0,
|
||||
"DataDir": "",
|
||||
"Enabled": true,
|
||||
"EnabledSchedulers": null,
|
||||
"HeartbeatGrace": "",
|
||||
"NodeGCThreshold": "",
|
||||
"NumSchedulers": 0,
|
||||
"ProtocolVersion": 0,
|
||||
"RejoinAfterLeave": false,
|
||||
"RetryInterval": "30s",
|
||||
"RetryJoin": [],
|
||||
"RetryMaxAttempts": 0,
|
||||
"StartJoin": []
|
||||
},
|
||||
"SyslogFacility": "LOCAL0",
|
||||
"TLSConfig": {
|
||||
"CAFile": "",
|
||||
"CertFile": "",
|
||||
"EnableHTTP": false,
|
||||
"EnableRPC": false,
|
||||
"KeyFile": "",
|
||||
"VerifyServerHostname": false
|
||||
},
|
||||
"Telemetry": {
|
||||
"CirconusAPIApp": "",
|
||||
"CirconusAPIToken": "",
|
||||
"CirconusAPIURL": "",
|
||||
"CirconusBrokerID": "",
|
||||
"CirconusBrokerSelectTag": "",
|
||||
"CirconusCheckDisplayName": "",
|
||||
"CirconusCheckForceMetricActivation": "",
|
||||
"CirconusCheckID": "",
|
||||
"CirconusCheckInstanceID": "",
|
||||
"CirconusCheckSearchTag": "",
|
||||
"CirconusCheckSubmissionURL": "",
|
||||
"CirconusCheckTags": "",
|
||||
"CirconusSubmissionInterval": "",
|
||||
"CollectionInterval": "1s",
|
||||
"DataDogAddr": "",
|
||||
"DisableHostname": false,
|
||||
"PublishAllocationMetrics": false,
|
||||
"PublishNodeMetrics": false,
|
||||
"StatsdAddr": "",
|
||||
"StatsiteAddr": "",
|
||||
"UseNodeName": false
|
||||
},
|
||||
"Vault": {
|
||||
"Addr": "https://vault.service.consul:8200",
|
||||
"AllowUnauthenticated": true,
|
||||
"ConnectionRetryIntv": 30000000000,
|
||||
"Enabled": null,
|
||||
"Role": "",
|
||||
"TLSCaFile": "",
|
||||
"TLSCaPath": "",
|
||||
"TLSCertFile": "",
|
||||
"TLSKeyFile": "",
|
||||
"TLSServerName": "",
|
||||
"TLSSkipVerify": null,
|
||||
"TaskTokenTTL": "",
|
||||
"Token": "root"
|
||||
},
|
||||
"Version": "0.5.5",
|
||||
"VersionPrerelease": "dev"
|
||||
},
|
||||
"member": {
|
||||
"Addr": "127.0.0.1",
|
||||
"DelegateCur": 4,
|
||||
"DelegateMax": 4,
|
||||
"DelegateMin": 2,
|
||||
"Name": "bacon-mac.global",
|
||||
"Port": 4648,
|
||||
"ProtocolCur": 2,
|
||||
"ProtocolMax": 5,
|
||||
"ProtocolMin": 1,
|
||||
"Status": "alive",
|
||||
"Tags": {
|
||||
"role": "nomad",
|
||||
"region": "global",
|
||||
"dc": "dc1",
|
||||
"vsn": "1",
|
||||
"mvn": "1",
|
||||
"build": "0.5.5dev",
|
||||
"port": "4647",
|
||||
"bootstrap": "1"
|
||||
}
|
||||
},
|
||||
"stats": {
|
||||
"runtime": {
|
||||
"cpu_count": "8",
|
||||
"kernel.name": "darwin",
|
||||
"arch": "amd64",
|
||||
"version": "go1.8",
|
||||
"max_procs": "7",
|
||||
"goroutines": "79"
|
||||
},
|
||||
"nomad": {
|
||||
"server": "true",
|
||||
"leader": "true",
|
||||
"leader_addr": "127.0.0.1:4647",
|
||||
"bootstrap": "false",
|
||||
"known_regions": "1"
|
||||
},
|
||||
"raft": {
|
||||
"num_peers": "0",
|
||||
"fsm_pending": "0",
|
||||
"last_snapshot_index": "0",
|
||||
"last_log_term": "2",
|
||||
"commit_index": "144",
|
||||
"term": "2",
|
||||
"last_log_index": "144",
|
||||
"protocol_version_max": "3",
|
||||
"snapshot_version_max": "1",
|
||||
"latest_configuration_index": "1",
|
||||
"latest_configuration": "[{Suffrage:Voter ID:127.0.0.1:4647 Address:127.0.0.1:4647}]",
|
||||
"last_contact": "never",
|
||||
"applied_index": "144",
|
||||
"protocol_version": "1",
|
||||
"protocol_version_min": "0",
|
||||
"snapshot_version_min": "0",
|
||||
"state": "Leader",
|
||||
"last_snapshot_term": "0"
|
||||
},
|
||||
"client": {
|
||||
"heartbeat_ttl": "17.79568937s",
|
||||
"node_id": "fb2170a8-257d-3c64-b14d-bc06cc94e34c",
|
||||
"known_servers": "127.0.0.1:4647",
|
||||
"num_allocations": "0",
|
||||
"last_heartbeat": "10.107423052s"
|
||||
},
|
||||
"serf": {
|
||||
"event_time": "1",
|
||||
"event_queue": "0",
|
||||
"encrypted": "false",
|
||||
"member_time": "1",
|
||||
"query_time": "1",
|
||||
"intent_queue": "0",
|
||||
"query_queue": "0",
|
||||
"members": "1",
|
||||
"failed": "0",
|
||||
"left": "0",
|
||||
"health_score": "0"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Join Agent
|
||||
|
||||
This endpoint introduces a new member to the gossip pool. This endpoint is only
|
||||
eligible for servers.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ---------------------------- | -------------------------- |
|
||||
| `POST` | `/agent/join` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `NO` | `none` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `address` `(string: <required>)` - Specifies the address to join in the
|
||||
`ip:port` format. This is provided as a query parameter and may be specified
|
||||
multiple times to join multiple servers.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
--request POST \
|
||||
https://nomad.rocks/v1/agent/join?address=1.2.3.4&address=5.6.7.8
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
{
|
||||
"error": "",
|
||||
"num_joined": 2
|
||||
}
|
||||
```
|
||||
|
||||
## Force Leave Agent
|
||||
|
||||
This endpoint forces a member of the gossip pool from the `"failed"` state to
|
||||
the `"left"` state. This allows the consensus protocol to remove the peer and
|
||||
stop attempting replication. This is only applicable for servers.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ---------------------------- | -------------------------- |
|
||||
| `POST` | `/agent/force-leave` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `NO` | `none` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `node` `(string: <required>)` - Specifies the name of the node to force leave.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
--request POST \
|
||||
https://nomad.rocks/v1/agent/force-leave?node=client-ab2e23dc
|
||||
```
|
||||
524
website/source/api/allocations.html.md
Normal file
524
website/source/api/allocations.html.md
Normal file
@@ -0,0 +1,524 @@
|
||||
---
|
||||
layout: api
|
||||
page_title: Allocations - HTTP API
|
||||
sidebar_current: api-allocations
|
||||
description: |-
|
||||
The /allocation endpoints are used to query for and interact with allocations.
|
||||
---
|
||||
|
||||
# Allocations HTTP API
|
||||
|
||||
The `/allocation` endpoints are used to query for and interact with allocations.
|
||||
|
||||
## List Allocations
|
||||
|
||||
This endpoint lists all allocations.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ------------------------- | -------------------------- |
|
||||
| `GET` | `/v1/allocations` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `YES` | `none` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `prefix` `(string: "")`- Specifies a string to filter allocations on based on
|
||||
an index prefix. This is specified as a querystring parameter.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/allocations
|
||||
```
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/allocations?prefix=a8198d79
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"ID": "a8198d79-cfdb-6593-a999-1e9adabcba2e",
|
||||
"EvalID": "5456bd7a-9fc0-c0dd-6131-cbee77f57577",
|
||||
"Name": "example.cache[0]",
|
||||
"NodeID": "fb2170a8-257d-3c64-b14d-bc06cc94e34c",
|
||||
"JobID": "example",
|
||||
"TaskGroup": "cache",
|
||||
"DesiredStatus": "run",
|
||||
"DesiredDescription": "",
|
||||
"ClientStatus": "running",
|
||||
"ClientDescription": "",
|
||||
"TaskStates": {
|
||||
"redis": {
|
||||
"State": "running",
|
||||
"Failed": false,
|
||||
"Events": [
|
||||
{
|
||||
"Type": "Received",
|
||||
"Time": 1495747371795703800,
|
||||
"FailsTask": false,
|
||||
"RestartReason": "",
|
||||
"SetupError": "",
|
||||
"DriverError": "",
|
||||
"ExitCode": 0,
|
||||
"Signal": 0,
|
||||
"Message": "",
|
||||
"KillTimeout": 0,
|
||||
"KillError": "",
|
||||
"KillReason": "",
|
||||
"StartDelay": 0,
|
||||
"DownloadError": "",
|
||||
"ValidationError": "",
|
||||
"DiskLimit": 0,
|
||||
"FailedSibling": "",
|
||||
"VaultError": "",
|
||||
"TaskSignalReason": "",
|
||||
"TaskSignal": "",
|
||||
"DriverMessage": ""
|
||||
},
|
||||
{
|
||||
"Type": "Driver",
|
||||
"Time": 1495747371798867200,
|
||||
"FailsTask": false,
|
||||
"RestartReason": "",
|
||||
"SetupError": "",
|
||||
"DriverError": "",
|
||||
"ExitCode": 0,
|
||||
"Signal": 0,
|
||||
"Message": "",
|
||||
"KillTimeout": 0,
|
||||
"KillError": "",
|
||||
"KillReason": "",
|
||||
"StartDelay": 0,
|
||||
"DownloadError": "",
|
||||
"ValidationError": "",
|
||||
"DiskLimit": 0,
|
||||
"FailedSibling": "",
|
||||
"VaultError": "",
|
||||
"TaskSignalReason": "",
|
||||
"TaskSignal": "",
|
||||
"DriverMessage": "Downloading image redis:3.2"
|
||||
},
|
||||
{
|
||||
"Type": "Started",
|
||||
"Time": 1495747379525667800,
|
||||
"FailsTask": false,
|
||||
"RestartReason": "",
|
||||
"SetupError": "",
|
||||
"DriverError": "",
|
||||
"ExitCode": 0,
|
||||
"Signal": 0,
|
||||
"Message": "",
|
||||
"KillTimeout": 0,
|
||||
"KillError": "",
|
||||
"KillReason": "",
|
||||
"StartDelay": 0,
|
||||
"DownloadError": "",
|
||||
"ValidationError": "",
|
||||
"DiskLimit": 0,
|
||||
"FailedSibling": "",
|
||||
"VaultError": "",
|
||||
"TaskSignalReason": "",
|
||||
"TaskSignal": "",
|
||||
"DriverMessage": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"CreateIndex": 54,
|
||||
"ModifyIndex": 57,
|
||||
"CreateTime": 1495747371794276400
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Read Allocation
|
||||
|
||||
This endpoint reads information about a specific allocation.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | -------------------------- | -------------------------- |
|
||||
| `GET` | `/v1/allocation/:alloc_id` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `YES` | `none` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `:alloc_id` `(string: <required>)`- Specifies the UUID of the allocation. This
|
||||
must be the full UUID, not the short 8-character one. This is specified as
|
||||
part of the path.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/allocation/5456bd7a-9fc0-c0dd-6131-cbee77f57577
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
{
|
||||
"ID": "a8198d79-cfdb-6593-a999-1e9adabcba2e",
|
||||
"EvalID": "5456bd7a-9fc0-c0dd-6131-cbee77f57577",
|
||||
"Name": "example.cache[0]",
|
||||
"NodeID": "fb2170a8-257d-3c64-b14d-bc06cc94e34c",
|
||||
"JobID": "example",
|
||||
"Job": {
|
||||
"Region": "global",
|
||||
"ID": "example",
|
||||
"ParentID": "",
|
||||
"Name": "example",
|
||||
"Type": "service",
|
||||
"Priority": 50,
|
||||
"AllAtOnce": false,
|
||||
"Datacenters": [
|
||||
"dc1"
|
||||
],
|
||||
"Constraints": null,
|
||||
"TaskGroups": [
|
||||
{
|
||||
"Name": "cache",
|
||||
"Count": 1,
|
||||
"Constraints": null,
|
||||
"RestartPolicy": {
|
||||
"Attempts": 10,
|
||||
"Interval": 300000000000,
|
||||
"Delay": 25000000000,
|
||||
"Mode": "delay"
|
||||
},
|
||||
"Tasks": [
|
||||
{
|
||||
"Name": "redis",
|
||||
"Driver": "docker",
|
||||
"User": "",
|
||||
"Config": {
|
||||
"port_map": [
|
||||
{
|
||||
"db": 6379
|
||||
}
|
||||
],
|
||||
"image": "redis:3.2"
|
||||
},
|
||||
"Env": null,
|
||||
"Services": [
|
||||
{
|
||||
"Name": "global-redis-check",
|
||||
"PortLabel": "db",
|
||||
"Tags": [
|
||||
"global",
|
||||
"cache"
|
||||
],
|
||||
"Checks": [
|
||||
{
|
||||
"Name": "alive",
|
||||
"Type": "tcp",
|
||||
"Command": "",
|
||||
"Args": null,
|
||||
"Path": "",
|
||||
"Protocol": "",
|
||||
"PortLabel": "",
|
||||
"Interval": 10000000000,
|
||||
"Timeout": 2000000000,
|
||||
"InitialStatus": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"Vault": null,
|
||||
"Templates": null,
|
||||
"Constraints": null,
|
||||
"Resources": {
|
||||
"CPU": 500,
|
||||
"MemoryMB": 10,
|
||||
"DiskMB": 0,
|
||||
"IOPS": 0,
|
||||
"Networks": [
|
||||
{
|
||||
"Device": "",
|
||||
"CIDR": "",
|
||||
"IP": "",
|
||||
"MBits": 10,
|
||||
"ReservedPorts": null,
|
||||
"DynamicPorts": [
|
||||
{
|
||||
"Label": "db",
|
||||
"Value": 0
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"DispatchPayload": null,
|
||||
"Meta": null,
|
||||
"KillTimeout": 5000000000,
|
||||
"LogConfig": {
|
||||
"MaxFiles": 10,
|
||||
"MaxFileSizeMB": 10
|
||||
},
|
||||
"Artifacts": null,
|
||||
"Leader": false
|
||||
}
|
||||
],
|
||||
"EphemeralDisk": {
|
||||
"Sticky": false,
|
||||
"SizeMB": 300,
|
||||
"Migrate": false
|
||||
},
|
||||
"Meta": null
|
||||
}
|
||||
],
|
||||
"Update": {
|
||||
"Stagger": 10000000000,
|
||||
"MaxParallel": 0
|
||||
},
|
||||
"Periodic": null,
|
||||
"ParameterizedJob": null,
|
||||
"Payload": null,
|
||||
"Meta": null,
|
||||
"VaultToken": "",
|
||||
"Status": "pending",
|
||||
"StatusDescription": "",
|
||||
"CreateIndex": 52,
|
||||
"ModifyIndex": 52,
|
||||
"JobModifyIndex": 52
|
||||
},
|
||||
"TaskGroup": "cache",
|
||||
"Resources": {
|
||||
"CPU": 500,
|
||||
"MemoryMB": 10,
|
||||
"DiskMB": 300,
|
||||
"IOPS": 0,
|
||||
"Networks": [
|
||||
{
|
||||
"Device": "lo0",
|
||||
"CIDR": "",
|
||||
"IP": "127.0.0.1",
|
||||
"MBits": 10,
|
||||
"ReservedPorts": null,
|
||||
"DynamicPorts": [
|
||||
{
|
||||
"Label": "db",
|
||||
"Value": 23116
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"SharedResources": {
|
||||
"CPU": 0,
|
||||
"MemoryMB": 0,
|
||||
"DiskMB": 300,
|
||||
"IOPS": 0,
|
||||
"Networks": null
|
||||
},
|
||||
"TaskResources": {
|
||||
"redis": {
|
||||
"CPU": 500,
|
||||
"MemoryMB": 10,
|
||||
"DiskMB": 0,
|
||||
"IOPS": 0,
|
||||
"Networks": [
|
||||
{
|
||||
"Device": "lo0",
|
||||
"CIDR": "",
|
||||
"IP": "127.0.0.1",
|
||||
"MBits": 10,
|
||||
"ReservedPorts": null,
|
||||
"DynamicPorts": [
|
||||
{
|
||||
"Label": "db",
|
||||
"Value": 23116
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"Metrics": {
|
||||
"NodesEvaluated": 1,
|
||||
"NodesFiltered": 0,
|
||||
"NodesAvailable": {
|
||||
"dc1": 1
|
||||
},
|
||||
"ClassFiltered": null,
|
||||
"ConstraintFiltered": null,
|
||||
"NodesExhausted": 0,
|
||||
"ClassExhausted": null,
|
||||
"DimensionExhausted": null,
|
||||
"Scores": {
|
||||
"fb2170a8-257d-3c64-b14d-bc06cc94e34c.binpack": 0.6205732522109244
|
||||
},
|
||||
"AllocationTime": 31729,
|
||||
"CoalescedFailures": 0
|
||||
},
|
||||
"DesiredStatus": "run",
|
||||
"DesiredDescription": "",
|
||||
"ClientStatus": "running",
|
||||
"ClientDescription": "",
|
||||
"TaskStates": {
|
||||
"redis": {
|
||||
"State": "running",
|
||||
"Failed": false,
|
||||
"Events": [
|
||||
{
|
||||
"Type": "Received",
|
||||
"Time": 1495747371795703800,
|
||||
"FailsTask": false,
|
||||
"RestartReason": "",
|
||||
"SetupError": "",
|
||||
"DriverError": "",
|
||||
"ExitCode": 0,
|
||||
"Signal": 0,
|
||||
"Message": "",
|
||||
"KillTimeout": 0,
|
||||
"KillError": "",
|
||||
"KillReason": "",
|
||||
"StartDelay": 0,
|
||||
"DownloadError": "",
|
||||
"ValidationError": "",
|
||||
"DiskLimit": 0,
|
||||
"FailedSibling": "",
|
||||
"VaultError": "",
|
||||
"TaskSignalReason": "",
|
||||
"TaskSignal": "",
|
||||
"DriverMessage": ""
|
||||
},
|
||||
{
|
||||
"Type": "Driver",
|
||||
"Time": 1495747371798867200,
|
||||
"FailsTask": false,
|
||||
"RestartReason": "",
|
||||
"SetupError": "",
|
||||
"DriverError": "",
|
||||
"ExitCode": 0,
|
||||
"Signal": 0,
|
||||
"Message": "",
|
||||
"KillTimeout": 0,
|
||||
"KillError": "",
|
||||
"KillReason": "",
|
||||
"StartDelay": 0,
|
||||
"DownloadError": "",
|
||||
"ValidationError": "",
|
||||
"DiskLimit": 0,
|
||||
"FailedSibling": "",
|
||||
"VaultError": "",
|
||||
"TaskSignalReason": "",
|
||||
"TaskSignal": "",
|
||||
"DriverMessage": "Downloading image redis:3.2"
|
||||
},
|
||||
{
|
||||
"Type": "Started",
|
||||
"Time": 1495747379525667800,
|
||||
"FailsTask": false,
|
||||
"RestartReason": "",
|
||||
"SetupError": "",
|
||||
"DriverError": "",
|
||||
"ExitCode": 0,
|
||||
"Signal": 0,
|
||||
"Message": "",
|
||||
"KillTimeout": 0,
|
||||
"KillError": "",
|
||||
"KillReason": "",
|
||||
"StartDelay": 0,
|
||||
"DownloadError": "",
|
||||
"ValidationError": "",
|
||||
"DiskLimit": 0,
|
||||
"FailedSibling": "",
|
||||
"VaultError": "",
|
||||
"TaskSignalReason": "",
|
||||
"TaskSignal": "",
|
||||
"DriverMessage": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"PreviousAllocation": "",
|
||||
"CreateIndex": 54,
|
||||
"ModifyIndex": 57,
|
||||
"AllocModifyIndex": 54,
|
||||
"CreateTime": 1495747371794276400
|
||||
}
|
||||
```
|
||||
|
||||
#### Field Reference
|
||||
|
||||
- `TaskStates` - A map of tasks to their current state and the latest events
|
||||
that have effected the state.
|
||||
|
||||
A task can be in the following states:
|
||||
|
||||
- `TaskStatePending` - The task is waiting to be run, either for the first
|
||||
time or due to a restart.
|
||||
|
||||
- `TaskStateRunning` - The task is currently running.
|
||||
|
||||
- `TaskStateDead` - The task is dead and will not run again.
|
||||
|
||||
Further the state contains the `StartedAt` and `FinishedAt` times of the
|
||||
task. `StartedAt` can be updated multiple times if the task restarts but
|
||||
`FinishedAt` is set only when the task transitions to `TaskStateDead`
|
||||
|
||||
- `Events` - An event contains metadata about the event. The latest 10 events
|
||||
are stored per task. Each event is timestamped (unix nano-seconds) and has one
|
||||
of the following types:
|
||||
|
||||
- `Setup Failure` - The task could not be started because there was a
|
||||
failure setting up the task prior to it running.
|
||||
|
||||
- `Driver Failure` - The task could not be started due to a failure in the
|
||||
driver.
|
||||
|
||||
- `Started` - The task was started; either for the first time or due to a
|
||||
restart.
|
||||
|
||||
- `Terminated` - The task was started and exited.
|
||||
|
||||
- `Killing` - The task has been sent the kill signal.
|
||||
|
||||
- `Killed` - The task was killed by an user.
|
||||
|
||||
- `Received` - The task has been pulled by the client at the given timestamp.
|
||||
|
||||
- `Failed Validation` - The task was invalid and as such it didn't run.
|
||||
|
||||
- `Restarting` - The task terminated and is being restarted.
|
||||
|
||||
- `Not Restarting` - the task has failed and is not being restarted because
|
||||
it has exceeded its restart policy.
|
||||
|
||||
- `Downloading Artifacts` - The task is downloading the artifact(s)
|
||||
- specified in the task.
|
||||
|
||||
- `Failed Artifact Download` - Artifact(s) specified in the task failed to
|
||||
download.
|
||||
|
||||
- `Restart Signaled` - The task was singled to be restarted.
|
||||
|
||||
- `Signaling` - The task was is being sent a signal.
|
||||
|
||||
- `Sibling Task Failed` - A task in the same task group failed.
|
||||
|
||||
- `Leader Task Dead` - The group's leader task is dead.
|
||||
|
||||
- `Driver` - A message from the driver.
|
||||
|
||||
- `Task Setup` - Task setup messages.
|
||||
|
||||
Depending on the type the event will have applicable annotations.
|
||||
562
website/source/api/client.html.md
Normal file
562
website/source/api/client.html.md
Normal file
@@ -0,0 +1,562 @@
|
||||
---
|
||||
layout: api
|
||||
page_title: Client - HTTP API
|
||||
sidebar_current: api-client
|
||||
description: |-
|
||||
The /client endpoints interact with the local Nomad agent to interact with
|
||||
client members.
|
||||
---
|
||||
|
||||
# Client HTTP API
|
||||
|
||||
The `/client` endpoints are used to interact with the Nomad clients. The API
|
||||
endpoints are hosted by the Nomad client and requests have to be made to the
|
||||
Client where the particular allocation was placed.
|
||||
|
||||
## Read Stats
|
||||
|
||||
This endpoint queries the actual resources consumed on a node. The API endpoint
|
||||
is hosted by the Nomad client and requests have to be made to the nomad client
|
||||
whose resource usage metrics are of interest.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ---------------------------- | -------------------------- |
|
||||
| `GET` | `/client/stats` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `NO` | `none` |
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/client/stats
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
{
|
||||
"AllocDirStats": {
|
||||
"Available": 142943150080,
|
||||
"Device": "",
|
||||
"InodesUsedPercent": 0.05312946180421879,
|
||||
"Mountpoint": "",
|
||||
"Size": 249783500800,
|
||||
"Used": 106578206720,
|
||||
"UsedPercent": 42.668233241448746
|
||||
},
|
||||
"CPU": [
|
||||
{
|
||||
"CPU": "cpu0",
|
||||
"Idle": 80,
|
||||
"System": 11,
|
||||
"Total": 20,
|
||||
"User": 9
|
||||
},
|
||||
{
|
||||
"CPU": "cpu1",
|
||||
"Idle": 99,
|
||||
"System": 0,
|
||||
"Total": 1,
|
||||
"User": 1
|
||||
},
|
||||
{
|
||||
"CPU": "cpu2",
|
||||
"Idle": 89,
|
||||
"System": 7.000000000000001,
|
||||
"Total": 11,
|
||||
"User": 4
|
||||
},
|
||||
{
|
||||
"CPU": "cpu3",
|
||||
"Idle": 100,
|
||||
"System": 0,
|
||||
"Total": 0,
|
||||
"User": 0
|
||||
},
|
||||
{
|
||||
"CPU": "cpu4",
|
||||
"Idle": 92.92929292929293,
|
||||
"System": 4.040404040404041,
|
||||
"Total": 7.07070707070707,
|
||||
"User": 3.0303030303030303
|
||||
},
|
||||
{
|
||||
"CPU": "cpu5",
|
||||
"Idle": 99,
|
||||
"System": 1,
|
||||
"Total": 1,
|
||||
"User": 0
|
||||
},
|
||||
{
|
||||
"CPU": "cpu6",
|
||||
"Idle": 92.07920792079209,
|
||||
"System": 4.9504950495049505,
|
||||
"Total": 7.920792079207921,
|
||||
"User": 2.9702970297029703
|
||||
},
|
||||
{
|
||||
"CPU": "cpu7",
|
||||
"Idle": 99,
|
||||
"System": 0,
|
||||
"Total": 1,
|
||||
"User": 1
|
||||
}
|
||||
],
|
||||
"CPUTicksConsumed": 1126.8044804480448,
|
||||
"DiskStats": [
|
||||
{
|
||||
"Available": 142943150080,
|
||||
"Device": "/dev/disk1",
|
||||
"InodesUsedPercent": 0.05312946180421879,
|
||||
"Mountpoint": "/",
|
||||
"Size": 249783500800,
|
||||
"Used": 106578206720,
|
||||
"UsedPercent": 42.668233241448746
|
||||
}
|
||||
],
|
||||
"Memory": {
|
||||
"Available": 6232244224,
|
||||
"Free": 470618112,
|
||||
"Total": 17179869184,
|
||||
"Used": 10947624960
|
||||
},
|
||||
"Timestamp": 1495743032992498200,
|
||||
"Uptime": 193520
|
||||
}
|
||||
```
|
||||
|
||||
## Read Allocation
|
||||
|
||||
The client `allocation` endpoint is used to query the actual resources consumed
|
||||
by an allocation. The API endpoint is hosted by the Nomad client and requests
|
||||
have to be made to the nomad client whose resource usage metrics are of
|
||||
interest.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ------------------------------------ | -------------------------- |
|
||||
| `GET` | `/client/allocation/:alloc_id/stats` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `NO` | `none` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `:alloc_id` `(string: <required>)` - Specifies the allocation ID to query.
|
||||
This is specified as part of the URL. Note, this must be the _full_ allocation
|
||||
ID, not the short 8-character one. This is specified as part of the path.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/client/allocation/5fc98185-17ff-26bc-a802-0c74fa471c99/stats
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
{
|
||||
"ResourceUsage": {
|
||||
"CpuStats": {
|
||||
"Measured": [
|
||||
"Throttled Periods",
|
||||
"Throttled Time",
|
||||
"Percent"
|
||||
],
|
||||
"Percent": 0.14159538847117795,
|
||||
"SystemMode": 0,
|
||||
"ThrottledPeriods": 0,
|
||||
"ThrottledTime": 0,
|
||||
"TotalTicks": 3.256693934837093,
|
||||
"UserMode": 0
|
||||
},
|
||||
"MemoryStats": {
|
||||
"Cache": 1744896,
|
||||
"KernelMaxUsage": 0,
|
||||
"KernelUsage": 0,
|
||||
"MaxUsage": 4710400,
|
||||
"Measured": [
|
||||
"RSS",
|
||||
"Cache",
|
||||
"Swap",
|
||||
"Max Usage"
|
||||
],
|
||||
"RSS": 1486848,
|
||||
"Swap": 0
|
||||
}
|
||||
},
|
||||
"Tasks": {
|
||||
"redis": {
|
||||
"Pids": null,
|
||||
"ResourceUsage": {
|
||||
"CpuStats": {
|
||||
"Measured": [
|
||||
"Throttled Periods",
|
||||
"Throttled Time",
|
||||
"Percent"
|
||||
],
|
||||
"Percent": 0.14159538847117795,
|
||||
"SystemMode": 0,
|
||||
"ThrottledPeriods": 0,
|
||||
"ThrottledTime": 0,
|
||||
"TotalTicks": 3.256693934837093,
|
||||
"UserMode": 0
|
||||
},
|
||||
"MemoryStats": {
|
||||
"Cache": 1744896,
|
||||
"KernelMaxUsage": 0,
|
||||
"KernelUsage": 0,
|
||||
"MaxUsage": 4710400,
|
||||
"Measured": [
|
||||
"RSS",
|
||||
"Cache",
|
||||
"Swap",
|
||||
"Max Usage"
|
||||
],
|
||||
"RSS": 1486848,
|
||||
"Swap": 0
|
||||
}
|
||||
},
|
||||
"Timestamp": 1495743243970720000
|
||||
}
|
||||
},
|
||||
"Timestamp": 1495743243970720000
|
||||
}
|
||||
```
|
||||
|
||||
## Read File
|
||||
|
||||
This endpoint reads the contents of a file in an allocation directory.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ---------------------------- | -------------------------- |
|
||||
| `GET` | `/client/fs/cat/:alloc_id` | `text/plain` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `NO` | `none` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `:alloc_id` `(string: <required>)` - Specifies the allocation ID to query.
|
||||
This is specified as part of the URL. Note, this must be the _full_ allocation
|
||||
ID, not the short 8-character one. This is specified as part of the path.
|
||||
|
||||
- `path` `(string: "/")` - Specifies the path of the file to read, relative to
|
||||
the root of the allocation directory.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/client/fs/cat/5fc98185-17ff-26bc-a802-0c74fa471c99
|
||||
```
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/client/fs/cat/5fc98185-17ff-26bc-a802-0c74fa471c99?path=alloc/file.json
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```text
|
||||
(whatever was in the file...)
|
||||
```
|
||||
|
||||
|
||||
## Read File at Offset
|
||||
|
||||
This endpoint reads the contents of a file in an allocation directory at a
|
||||
particular offset and limit.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ----------------------------- | -------------------------- |
|
||||
| `GET` | `/client/fs/readat/:alloc_id` | `text/plain` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `NO` | `none` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `:alloc_id` `(string: <required>)` - Specifies the allocation ID to query.
|
||||
This is specified as part of the URL. Note, this must be the _full_ allocation
|
||||
ID, not the short 8-character one. This is specified as part of the path.
|
||||
|
||||
- `path` `(string: "/")` - Specifies the path of the file to read, relative to
|
||||
the root of the allocation directory.
|
||||
|
||||
- `offset` `(int: <required>)` - Specifies the byte offset from where content
|
||||
will be read.
|
||||
|
||||
- `limit` `(int: <required>)` - Specifies the number of bytes to read from the
|
||||
offset.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/client/fs/readat/5fc98185-17ff-26bc-a802-0c74fa471c99?path=/alloc/foo&offset=1323&limit=19303
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```text
|
||||
(whatever was in the file, starting from offset, up to limit bytes...)
|
||||
```
|
||||
|
||||
## Stream File
|
||||
|
||||
This endpoint streams the contents of a file in an allocation directory.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ----------------------------- | -------------------------- |
|
||||
| `GET` | `/client/fs/stream/:alloc_id` | `text/plain` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `NO` | `none` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `:alloc_id` `(string: <required>)` - Specifies the allocation ID to query.
|
||||
This is specified as part of the URL. Note, this must be the _full_ allocation
|
||||
ID, not the short 8-character one. This is specified as part of the path.
|
||||
|
||||
- `path` `(string: "/")` - Specifies the path of the file to read, relative to
|
||||
the root of the allocation directory.
|
||||
|
||||
- `offset` `(int: <required>)` - Specifies the byte offset from where content
|
||||
will be read.
|
||||
|
||||
- `origin` `(string: "start|end")` - Applies the relative offset to either the
|
||||
start or end of the file.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/client/fs/stream/5fc98185-17ff-26bc-a802-0c74fa471c99?path=/alloc/logs/redis.log
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
{
|
||||
"File": "alloc/logs/redis.log",
|
||||
"Offset": 3604480,
|
||||
"Data": "NTMxOTMyCjUzMTkzMwo1MzE5MzQKNTMx..."
|
||||
},
|
||||
{
|
||||
"File": "alloc/logs/redis.log",
|
||||
"FileEvent": "file deleted"
|
||||
}
|
||||
```
|
||||
|
||||
#### Field Reference
|
||||
|
||||
The return value is a stream of frames. These frames contain the following
|
||||
fields:
|
||||
|
||||
- `Data` - A base64 encoding of the bytes being streamed.
|
||||
|
||||
- `FileEvent` - An event that could cause a change in the streams position. The
|
||||
possible values are "file deleted" and "file truncated".
|
||||
|
||||
- `Offset` - Offset is the offset into the stream.
|
||||
|
||||
- `File` - The name of the file being streamed.
|
||||
|
||||
## Stream Logs
|
||||
|
||||
This endpoint streams a task's stderr/stdout logs.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ---------------------------- | -------------------------- |
|
||||
| `GET` | `/client/fs/logs/:alloc_id` | `text/plain` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `NO` | `none` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `:alloc_id` `(string: <required>)` - Specifies the allocation ID to query.
|
||||
This is specified as part of the URL. Note, this must be the _full_ allocation
|
||||
ID, not the short 8-character one. This is specified as part of the path.
|
||||
|
||||
- `task` `(string: <required>)` - Specifies the name of the task inside the
|
||||
allocation to stream logs from.
|
||||
|
||||
- `follow` `(bool: false)`- Specifies whether to tail the logs.
|
||||
|
||||
- `type` `(string: "stderr|stdout")` - Specifies the stream to stream.
|
||||
|
||||
- `offset` `(int: 0)` - Specifies the offset to start streaming from.
|
||||
|
||||
- `origin` `(string: "start|end")` - Specifies either "start" or "end" and
|
||||
applies the offset relative to either the start or end of the logs
|
||||
respectively. Defaults to "start".
|
||||
|
||||
- `plain` `(bool: false)` - Return just the plain text without framing. This can
|
||||
be useful when viewing logs in a browser.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/client/fs/logs/5fc98185-17ff-26bc-a802-0c74fa471c99
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
{
|
||||
"File": "alloc/logs/redis.stdout.0",
|
||||
"Offset": 3604480,
|
||||
"Data": "NTMxOTMyCjUzMTkzMwo1MzE5MzQKNTMx..."
|
||||
},
|
||||
{
|
||||
"File": "alloc/logs/redis.stdout.0",
|
||||
"FileEvent": "file deleted"
|
||||
}
|
||||
```
|
||||
|
||||
#### Field Reference
|
||||
|
||||
The return value is a stream of frames. These frames contain the following
|
||||
fields:
|
||||
|
||||
- `Data` - A base64 encoding of the bytes being streamed.
|
||||
|
||||
- `FileEvent` - An event that could cause a change in the streams position. The
|
||||
possible values are "file deleted" and "file truncated".
|
||||
|
||||
- `Offset` - Offset is the offset into the stream.
|
||||
|
||||
- `File` - The name of the file being streamed.
|
||||
|
||||
## List Files
|
||||
|
||||
This endpoint lists files in an allocation directory.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ---------------------------- | -------------------------- |
|
||||
| `GET` | `/client/fs/ls/:alloc_id` | `text/plain` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `NO` | `none` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `:alloc_id` `(string: <required>)` - Specifies the allocation ID to query.
|
||||
This is specified as part of the URL. Note, this must be the _full_ allocation
|
||||
ID, not the short 8-character one. This is specified as part of the path.
|
||||
|
||||
- `path` `(string: "/")` - Specifies the path of the file to read, relative to
|
||||
the root of the allocation directory.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/client/fs/ls/5fc98185-17ff-26bc-a802-0c74fa471c99
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"Name": "alloc",
|
||||
"IsDir": true,
|
||||
"Size": 4096,
|
||||
"FileMode": "drwxrwxr-x",
|
||||
"ModTime": "2016-03-15T15:40:00.414236712-07:00"
|
||||
},
|
||||
{
|
||||
"Name": "redis",
|
||||
"IsDir": true,
|
||||
"Size": 4096,
|
||||
"FileMode": "drwxrwxr-x",
|
||||
"ModTime": "2016-03-15T15:40:56.810238153-07:00"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Stat File
|
||||
|
||||
This endpoint stats a file in an allocation.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ---------------------------- | -------------------------- |
|
||||
| `GET` | `/client/fs/stat/:alloc_id` | `text/plain` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `NO` | `none` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `:alloc_id` `(string: <required>)` - Specifies the allocation ID to query.
|
||||
This is specified as part of the URL. Note, this must be the _full_ allocation
|
||||
ID, not the short 8-character one. This is specified as part of the path.
|
||||
|
||||
- `path` `(string: "/")` - Specifies the path of the file to read, relative to
|
||||
the root of the allocation directory.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/client/fs/stat/5fc98185-17ff-26bc-a802-0c74fa471c99
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
{
|
||||
"Name": "redis-syslog-collector.out",
|
||||
"IsDir": false,
|
||||
"Size": 96,
|
||||
"FileMode": "-rw-rw-r--",
|
||||
"ModTime": "2016-03-15T15:40:56.822238153-07:00"
|
||||
}
|
||||
```
|
||||
266
website/source/api/evaluations.html.md
Normal file
266
website/source/api/evaluations.html.md
Normal file
@@ -0,0 +1,266 @@
|
||||
---
|
||||
layout: api
|
||||
page_title: Evaluations - HTTP API
|
||||
sidebar_current: api-evaluations
|
||||
description: |-
|
||||
The /evaluation are used to query for and interact with evaluations.
|
||||
---
|
||||
|
||||
# Evaluations HTTP API
|
||||
|
||||
The `/evaluation` endpoints are used to query for and interact with evaluations.
|
||||
|
||||
## List Evaluations
|
||||
|
||||
This endpoint lists all evaluations.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ------------------------ | -------------------------- |
|
||||
| `GET` | `/v1/evaluations` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `YES` | `none` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `prefix` `(string: "")`- Specifies a string to filter evaluations on based on
|
||||
an index prefix. This is specified as a querystring parameter.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/evaluations
|
||||
```
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/evaluations?prefix=25ba81c
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"ID": "5456bd7a-9fc0-c0dd-6131-cbee77f57577",
|
||||
"Priority": 50,
|
||||
"Type": "service",
|
||||
"TriggeredBy": "job-register",
|
||||
"JobID": "example",
|
||||
"JobModifyIndex": 52,
|
||||
"NodeID": "",
|
||||
"NodeModifyIndex": 0,
|
||||
"Status": "complete",
|
||||
"StatusDescription": "",
|
||||
"Wait": 0,
|
||||
"NextEval": "",
|
||||
"PreviousEval": "",
|
||||
"BlockedEval": "",
|
||||
"FailedTGAllocs": null,
|
||||
"ClassEligibility": null,
|
||||
"EscapedComputedClass": false,
|
||||
"AnnotatePlan": false,
|
||||
"SnapshotIndex": 53,
|
||||
"QueuedAllocations": {
|
||||
"cache": 0
|
||||
},
|
||||
"CreateIndex": 53,
|
||||
"ModifyIndex": 55
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Read Evaluation
|
||||
|
||||
This endpoint reads information about a specific evaluation by ID.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ------------------------- | -------------------------- |
|
||||
| `GET` | `/v1/evaluation/:eval_id` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `YES` | `none` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `:eval_id` `(string: <required>)`- Specifies the UUID of the evaluation. This
|
||||
must be the full UUID, not the short 8-character one. This is specified as
|
||||
part of the path.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/evaluation/5456bd7a-9fc0-c0dd-6131-cbee77f57577
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
{
|
||||
"ID": "5456bd7a-9fc0-c0dd-6131-cbee77f57577",
|
||||
"Priority": 50,
|
||||
"Type": "service",
|
||||
"TriggeredBy": "job-register",
|
||||
"JobID": "example",
|
||||
"JobModifyIndex": 52,
|
||||
"NodeID": "",
|
||||
"NodeModifyIndex": 0,
|
||||
"Status": "complete",
|
||||
"StatusDescription": "",
|
||||
"Wait": 0,
|
||||
"NextEval": "",
|
||||
"PreviousEval": "",
|
||||
"BlockedEval": "",
|
||||
"FailedTGAllocs": null,
|
||||
"ClassEligibility": null,
|
||||
"EscapedComputedClass": false,
|
||||
"AnnotatePlan": false,
|
||||
"SnapshotIndex": 53,
|
||||
"QueuedAllocations": {
|
||||
"cache": 0
|
||||
},
|
||||
"CreateIndex": 53,
|
||||
"ModifyIndex": 55
|
||||
}
|
||||
```
|
||||
|
||||
## List Allocations for Evaluation
|
||||
|
||||
This endpoint lists the allocations created or modified for the given
|
||||
evaluation.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ------------------------------------- | -------------------------- |
|
||||
| `GET` | `/v1/evaluation/:eval_id/allocations` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `YES` | `none` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `:eval_id` `(string: <required>)`- Specifies the UUID of the evaluation. This
|
||||
must be the full UUID, not the short 8-character one. This is specified as
|
||||
part of the path.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/evaluation/5456bd7a-9fc0-c0dd-6131-cbee77f57577/allocations
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"ID": "a8198d79-cfdb-6593-a999-1e9adabcba2e",
|
||||
"EvalID": "5456bd7a-9fc0-c0dd-6131-cbee77f57577",
|
||||
"Name": "example.cache[0]",
|
||||
"NodeID": "fb2170a8-257d-3c64-b14d-bc06cc94e34c",
|
||||
"JobID": "example",
|
||||
"TaskGroup": "cache",
|
||||
"DesiredStatus": "run",
|
||||
"DesiredDescription": "",
|
||||
"ClientStatus": "running",
|
||||
"ClientDescription": "",
|
||||
"TaskStates": {
|
||||
"redis": {
|
||||
"State": "running",
|
||||
"Failed": false,
|
||||
"Events": [
|
||||
{
|
||||
"Type": "Received",
|
||||
"Time": 1495747371795703800,
|
||||
"FailsTask": false,
|
||||
"RestartReason": "",
|
||||
"SetupError": "",
|
||||
"DriverError": "",
|
||||
"ExitCode": 0,
|
||||
"Signal": 0,
|
||||
"Message": "",
|
||||
"KillTimeout": 0,
|
||||
"KillError": "",
|
||||
"KillReason": "",
|
||||
"StartDelay": 0,
|
||||
"DownloadError": "",
|
||||
"ValidationError": "",
|
||||
"DiskLimit": 0,
|
||||
"FailedSibling": "",
|
||||
"VaultError": "",
|
||||
"TaskSignalReason": "",
|
||||
"TaskSignal": "",
|
||||
"DriverMessage": ""
|
||||
},
|
||||
{
|
||||
"Type": "Driver",
|
||||
"Time": 1495747371798867200,
|
||||
"FailsTask": false,
|
||||
"RestartReason": "",
|
||||
"SetupError": "",
|
||||
"DriverError": "",
|
||||
"ExitCode": 0,
|
||||
"Signal": 0,
|
||||
"Message": "",
|
||||
"KillTimeout": 0,
|
||||
"KillError": "",
|
||||
"KillReason": "",
|
||||
"StartDelay": 0,
|
||||
"DownloadError": "",
|
||||
"ValidationError": "",
|
||||
"DiskLimit": 0,
|
||||
"FailedSibling": "",
|
||||
"VaultError": "",
|
||||
"TaskSignalReason": "",
|
||||
"TaskSignal": "",
|
||||
"DriverMessage": "Downloading image redis:3.2"
|
||||
},
|
||||
{
|
||||
"Type": "Started",
|
||||
"Time": 1495747379525667800,
|
||||
"FailsTask": false,
|
||||
"RestartReason": "",
|
||||
"SetupError": "",
|
||||
"DriverError": "",
|
||||
"ExitCode": 0,
|
||||
"Signal": 0,
|
||||
"Message": "",
|
||||
"KillTimeout": 0,
|
||||
"KillError": "",
|
||||
"KillReason": "",
|
||||
"StartDelay": 0,
|
||||
"DownloadError": "",
|
||||
"ValidationError": "",
|
||||
"DiskLimit": 0,
|
||||
"FailedSibling": "",
|
||||
"VaultError": "",
|
||||
"TaskSignalReason": "",
|
||||
"TaskSignal": "",
|
||||
"DriverMessage": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"CreateIndex": 54,
|
||||
"ModifyIndex": 57,
|
||||
"CreateTime": 1495747371794276400
|
||||
}
|
||||
]
|
||||
```
|
||||
186
website/source/api/index.html.md
Normal file
186
website/source/api/index.html.md
Normal file
@@ -0,0 +1,186 @@
|
||||
---
|
||||
layout: api
|
||||
page_title: HTTP API
|
||||
sidebar_current: api-overview
|
||||
description: |-
|
||||
Nomad exposes a RESTful HTTP API to control almost every aspect of the
|
||||
Nomad agent.
|
||||
---
|
||||
|
||||
# HTTP API
|
||||
|
||||
The main interface to Nomad is a RESTful HTTP API. The API can query the current
|
||||
state of the system as well as modify the state of the system. The Nomad CLI
|
||||
actually invokes Nomad's HTTP for many commands.
|
||||
|
||||
## Version Prefix
|
||||
|
||||
All API routes are prefixed with `/v1/`.
|
||||
|
||||
This documentation is only for the v1 API.
|
||||
|
||||
~> **Backwards compatibility:** At the current version, Nomad does not yet
|
||||
promise backwards compatibility even with the v1 prefix. We'll remove this
|
||||
warning when this policy changes. We expect to reach API stability by Nomad
|
||||
1.0.
|
||||
|
||||
## Addressing & Ports
|
||||
|
||||
Nomad binds to a specific set of addresses and ports. The HTTP API is served via
|
||||
the `http` address and port. This `address:port` must be accessible locally. If
|
||||
you bind to `127.0.0.1:4646`, the API is only available _from that host_. If you
|
||||
bind to a private internal IP, the API will be available from within that
|
||||
network. If you bind to a public IP, the API will be available from the public
|
||||
Internet (not recommended).
|
||||
|
||||
The default port for the Nomad HTTP API is `4646`. This can be overridden via
|
||||
the Nomad configuration block. Here is an example curl request to query a Nomad
|
||||
server with the default configuration:
|
||||
|
||||
```text
|
||||
$ curl http://127.0.0.1:4646/v1/agent/members
|
||||
```
|
||||
|
||||
The conventions used in the API documentation do not list a port and use the
|
||||
standard URL `nomad.rocks`. Be sure to replace this with your Nomad agent URL
|
||||
when using the examples.
|
||||
|
||||
## Data Model and Layout
|
||||
|
||||
There are four primary nouns in Nomad:
|
||||
|
||||
- jobs
|
||||
- nodes
|
||||
- allocations
|
||||
- evaluations
|
||||
|
||||
[](/assets/images/nomad-data-model.png)
|
||||
|
||||
Jobs are submitted by users and represent a _desired state_. A job is a
|
||||
declarative description of tasks to run which are bounded by constraints and
|
||||
require resources. Nodes are the servers in the clusters that tasks can be
|
||||
scheduled on. The mapping of tasks in a job to nodes is done using allocations.
|
||||
An allocation is used to declare that a set of tasks in a job should be run on a
|
||||
particular node. Scheduling is the process of determining the appropriate
|
||||
allocations and is done as part of an evaluation.
|
||||
|
||||
The API is modeled closely on the underlying data model. Use the links to the
|
||||
left for documentation about specific endpoints. There are also "Agent" APIs
|
||||
which interact with a specific agent and not the broader cluster used for
|
||||
administration.
|
||||
|
||||
## ACLs
|
||||
|
||||
The Nomad API does not support ACLs at this time.
|
||||
|
||||
## Authentication
|
||||
|
||||
The Nomad API does not support authentication at this time.
|
||||
|
||||
## Blocking Queries
|
||||
|
||||
Many endpoints in Nomad support a feature known as "blocking queries". A
|
||||
blocking query is used to wait for a potential change using long polling. Not
|
||||
all endpoints support blocking, but each endpoint uniquely documents its support
|
||||
for blocking queries in the documentation.
|
||||
|
||||
Endpoints that support blocking queries return an HTTP header named
|
||||
`X-Nomad-Index`. This is a unique identifier representing the current state of
|
||||
the requested resource.
|
||||
|
||||
On subsequent requests for this resource, the client can set the `index` query
|
||||
string parameter to the value of `X-Nomad-Index`, indicating that the client
|
||||
wishes to wait for any changes subsequent to that index.
|
||||
|
||||
When this is provided, the HTTP request will "hang" until a change in the system
|
||||
occurs, or the maximum timeout is reached. A critical note is that the return of
|
||||
a blocking request is **no guarantee** of a change. It is possible that the
|
||||
timeout was reached or that there was an idempotent write that does not affect
|
||||
the result of the query.
|
||||
|
||||
In addition to `index`, endpoints that support blocking will also honor a `wait`
|
||||
parameter specifying a maximum duration for the blocking request. This is
|
||||
limited to 10 minutes. If not set, the wait time defaults to 5 minutes. This
|
||||
value can be specified in the form of "10s" or "5m" (i.e., 10 seconds or 5
|
||||
minutes, respectively). A small random amount of additional wait time is added
|
||||
to the supplied maximum `wait` time to spread out the wake up time of any
|
||||
concurrent requests. This adds up to `wait / 16` additional time to the maximum
|
||||
duration.
|
||||
|
||||
## Consistency Modes
|
||||
|
||||
Most of the read query endpoints support multiple levels of consistency. Since
|
||||
no policy will suit all clients' needs, these consistency modes allow the user
|
||||
to have the ultimate say in how to balance the trade-offs inherent in a
|
||||
distributed system.
|
||||
|
||||
The two read modes are:
|
||||
|
||||
- `default` - If not specified, the default is strongly consistent in almost all
|
||||
cases. However, there is a small window in which a new leader may be elected
|
||||
during which the old leader may service stale values. The trade-off is fast
|
||||
reads but potentially stale values. The condition resulting in stale reads is
|
||||
hard to trigger, and most clients should not need to worry about this case.
|
||||
Also, note that this race condition only applies to reads, not writes.
|
||||
|
||||
- `stale` - This mode allows any server to service the read regardless of
|
||||
whether it is the leader. This means reads can be arbitrarily stale; however,
|
||||
results are generally consistent to within 50 milliseconds of the leader. The
|
||||
trade-off is very fast and scalable reads with a higher likelihood of stale
|
||||
values. Since this mode allows reads without a leader, a cluster that is
|
||||
unavailable will still be able to respond to queries.
|
||||
|
||||
To switch these modes, use the `stale` query parameter on requests.
|
||||
|
||||
To support bounding the acceptable staleness of data, responses provide the
|
||||
`X-Nomad-LastContact` header containing the time in milliseconds that a server
|
||||
was last contacted by the leader node. The `X-Nomad-KnownLeader` header also
|
||||
indicates if there is a known leader. These can be used by clients to gauge the
|
||||
staleness of a result and take appropriate action.
|
||||
|
||||
## Cross-Region Requests
|
||||
|
||||
By default, any request to the HTTP API will default to the region on which the
|
||||
machine is servicing the request. If the agent runs in "region1", the request
|
||||
will query the region "region1". A target region can be explicitly request using
|
||||
the `?region` query parameter. The request will be transparently forwarded and
|
||||
serviced by a server in the requested region.
|
||||
|
||||
## Compressed Responses
|
||||
|
||||
The HTTP API will gzip the response if the HTTP request denotes that the client
|
||||
accepts gzip compression. This is achieved by passing the accept encoding:
|
||||
|
||||
```
|
||||
$ curl \
|
||||
--header "Accept-Encoding: gzip" \
|
||||
https://nomad.rocks/v1/...
|
||||
```
|
||||
|
||||
## Formatted JSON Output
|
||||
|
||||
By default, the output of all HTTP API requests is minimized JSON. If the client
|
||||
passes `pretty` on the query string, formatted JSON will be returned.
|
||||
|
||||
In general, clients should prefer a client-side parser like `jq` instead of
|
||||
server-formatted data. Asking the server to format the data takes away
|
||||
processing cycles from more important tasks.
|
||||
|
||||
```
|
||||
$ curl https://nomad.rocks/v1/page?pretty
|
||||
```
|
||||
|
||||
## HTTP Methods
|
||||
|
||||
Nomad's API aims to be RESTful, although there are some exceptions. The API
|
||||
responds to the standard HTTP verbs GET, PUT, and DELETE. Each API method will
|
||||
clearly document the verb(s) it responds to and the generated response. The same
|
||||
path with different verbs may trigger different behavior. For example:
|
||||
|
||||
```text
|
||||
PUT /v1/jobs
|
||||
GET /v1/jobs
|
||||
```
|
||||
|
||||
Even though these share a path, the `PUT` operation creates a new job whereas
|
||||
the `GET` operation reads all jobs.
|
||||
1236
website/source/api/jobs.html.md
Normal file
1236
website/source/api/jobs.html.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,124 +1,129 @@
|
||||
---
|
||||
layout: "http"
|
||||
page_title: "HTTP API: JSON Job Specification"
|
||||
sidebar_current: "docs-http-json-jobs"
|
||||
layout: api
|
||||
page_title: JSON Job Specification - HTTP API
|
||||
sidebar_current: api-jobs
|
||||
description: |-
|
||||
Jobs can also be specified via the HTTP API using a JSON format. This guide
|
||||
discusses the job specification in JSON format.
|
||||
---
|
||||
|
||||
# Job Specification
|
||||
# JSON Job Specification
|
||||
|
||||
This guide covers the JSON syntax for submitting jobs to Nomad. A useful command
|
||||
for generating valid JSON versions of HCL jobs is
|
||||
`nomad run -output <job.nomad>` which will emit a JSON version of the job.
|
||||
for generating valid JSON versions of HCL jobs is:
|
||||
|
||||
## JSON Syntax
|
||||
```shell
|
||||
$ nomad run -output my-job.nomad
|
||||
```
|
||||
|
||||
## Syntax
|
||||
|
||||
Below is an example of a JSON object that submits a `periodic` job to Nomad:
|
||||
|
||||
```json
|
||||
{
|
||||
"Job":{
|
||||
"Region":"global",
|
||||
"ID":"example",
|
||||
"Name":"example",
|
||||
"Type":"batch",
|
||||
"Priority":50,
|
||||
"AllAtOnce":false,
|
||||
"Datacenters":[
|
||||
"Job": {
|
||||
"Region": "global",
|
||||
"ID": "example",
|
||||
"Name": "example",
|
||||
"Type": "batch",
|
||||
"Priority": 50,
|
||||
"AllAtOnce": false,
|
||||
"Datacenters": [
|
||||
"dc1"
|
||||
],
|
||||
"Constraints":[
|
||||
"Constraints": [
|
||||
{
|
||||
"LTarget":"${attr.kernel.name}",
|
||||
"RTarget":"linux",
|
||||
"Operand":"="
|
||||
"LTarget": "${attr.kernel.name}",
|
||||
"RTarget": "linux",
|
||||
"Operand": "="
|
||||
}
|
||||
],
|
||||
"TaskGroups":[
|
||||
"TaskGroups": [
|
||||
{
|
||||
"Name":"cache",
|
||||
"Count":1,
|
||||
"Constraints":null,
|
||||
"Tasks":[
|
||||
"Name": "cache",
|
||||
"Count": 1,
|
||||
"Constraints": null,
|
||||
"Tasks": [
|
||||
{
|
||||
"Name":"redis",
|
||||
"Driver":"docker",
|
||||
"User":"foo-user",
|
||||
"Config":{
|
||||
"image":"redis:latest",
|
||||
"port_map":[
|
||||
"Name": "redis",
|
||||
"Driver": "docker",
|
||||
"User": "foo-user",
|
||||
"Config": {
|
||||
"image": "redis:latest",
|
||||
"port_map": [
|
||||
{
|
||||
"db":6379
|
||||
"db": 6379
|
||||
}
|
||||
]
|
||||
},
|
||||
"Constraints":null,
|
||||
"Env":{
|
||||
"foo":"bar",
|
||||
"baz":"pipe"
|
||||
"Constraints": null,
|
||||
"Env": {
|
||||
"foo": "bar",
|
||||
"baz": "pipe"
|
||||
},
|
||||
"Services":[
|
||||
"Services": [
|
||||
{
|
||||
"Name":"cache-redis",
|
||||
"Tags":[
|
||||
"Name": "cache-redis",
|
||||
"Tags": [
|
||||
"global",
|
||||
"cache"
|
||||
],
|
||||
"PortLabel":"db",
|
||||
"Checks":[
|
||||
"PortLabel": "db",
|
||||
"Checks": [
|
||||
{
|
||||
"Id":"",
|
||||
"Name":"alive",
|
||||
"Type":"tcp",
|
||||
"Command":"",
|
||||
"Args":null,
|
||||
"Path":"",
|
||||
"Protocol":"",
|
||||
"Interval":10000000000,
|
||||
"Timeout":2000000000
|
||||
"Id": "",
|
||||
"Name": "alive",
|
||||
"Type": "tcp",
|
||||
"Command": "",
|
||||
"Args": null,
|
||||
"Path": "",
|
||||
"Protocol": "",
|
||||
"Interval": 10000000000,
|
||||
"Timeout": 2000000000
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"Vault": {
|
||||
"Policies": ["policy-name"],
|
||||
"Policies": [
|
||||
"policy-name"
|
||||
],
|
||||
"Env": true,
|
||||
"ChangeMode": "restart",
|
||||
"ChangeSignal": ""
|
||||
},
|
||||
"Resources":{
|
||||
"CPU":500,
|
||||
"MemoryMB":256,
|
||||
"IOPS":0,
|
||||
"Networks":[
|
||||
"Resources": {
|
||||
"CPU": 500,
|
||||
"MemoryMB": 256,
|
||||
"IOPS": 0,
|
||||
"Networks": [
|
||||
{
|
||||
"ReservedPorts":[
|
||||
"ReservedPorts": [
|
||||
{
|
||||
"Label":"rpc",
|
||||
"Value":25566
|
||||
"Label": "rpc",
|
||||
"Value": 25566
|
||||
}
|
||||
],
|
||||
"DynamicPorts":[
|
||||
"DynamicPorts": [
|
||||
{
|
||||
"Label":"db"
|
||||
"Label": "db"
|
||||
}
|
||||
],
|
||||
"MBits":10
|
||||
"MBits": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
"Meta":{
|
||||
"foo":"bar",
|
||||
"baz":"pipe"
|
||||
"Meta": {
|
||||
"foo": "bar",
|
||||
"baz": "pipe"
|
||||
},
|
||||
"KillTimeout":5000000000,
|
||||
"LogConfig":{
|
||||
"MaxFiles":10,
|
||||
"MaxFileSizeMB":10
|
||||
"KillTimeout": 5000000000,
|
||||
"LogConfig": {
|
||||
"MaxFiles": 10,
|
||||
"MaxFileSizeMB": 10
|
||||
},
|
||||
"Templates":[
|
||||
"Templates": [
|
||||
{
|
||||
"SourcePath": "local/config.conf.tpl",
|
||||
"DestPath": "local/config.conf",
|
||||
@@ -128,13 +133,13 @@ Below is an example of a JSON object that submits a `periodic` job to Nomad:
|
||||
"Splay": 5000000000
|
||||
}
|
||||
],
|
||||
"Artifacts":[
|
||||
"Artifacts": [
|
||||
{
|
||||
"GetterSource":"http://foo.com/artifact.tar.gz",
|
||||
"GetterOptions":{
|
||||
"checksum":"md5:c4aa853ad2215426eb7d70a21922e794"
|
||||
"GetterSource": "http://foo.com/artifact.tar.gz",
|
||||
"GetterOptions": {
|
||||
"checksum": "md5:c4aa853ad2215426eb7d70a21922e794"
|
||||
},
|
||||
"RelativeDest":"local/"
|
||||
"RelativeDest": "local/"
|
||||
}
|
||||
],
|
||||
"DispatchPayload": {
|
||||
@@ -142,31 +147,31 @@ Below is an example of a JSON object that submits a `periodic` job to Nomad:
|
||||
}
|
||||
}
|
||||
],
|
||||
"RestartPolicy":{
|
||||
"Interval":300000000000,
|
||||
"Attempts":10,
|
||||
"Delay":25000000000,
|
||||
"Mode":"delay"
|
||||
"RestartPolicy": {
|
||||
"Interval": 300000000000,
|
||||
"Attempts": 10,
|
||||
"Delay": 25000000000,
|
||||
"Mode": "delay"
|
||||
},
|
||||
"Meta":{
|
||||
"foo":"bar",
|
||||
"baz":"pipe"
|
||||
"Meta": {
|
||||
"foo": "bar",
|
||||
"baz": "pipe"
|
||||
}
|
||||
}
|
||||
],
|
||||
"Update":{
|
||||
"Stagger":10000000000,
|
||||
"MaxParallel":1
|
||||
"Update": {
|
||||
"Stagger": 10000000000,
|
||||
"MaxParallel": 1
|
||||
},
|
||||
"Periodic":{
|
||||
"Enabled":true,
|
||||
"Spec":"* * * * *",
|
||||
"SpecType":"cron",
|
||||
"ProhibitOverlap":true
|
||||
"Periodic": {
|
||||
"Enabled": true,
|
||||
"Spec": "- *",
|
||||
"SpecType": "cron",
|
||||
"ProhibitOverlap": true
|
||||
},
|
||||
"Meta":{
|
||||
"foo":"bar",
|
||||
"baz":"pipe"
|
||||
"Meta": {
|
||||
"foo": "bar",
|
||||
"baz": "pipe"
|
||||
},
|
||||
"ParameterizedJob": {
|
||||
"Payload": "required",
|
||||
@@ -175,7 +180,7 @@ Below is an example of a JSON object that submits a `periodic` job to Nomad:
|
||||
],
|
||||
"MetaOptional": [
|
||||
"bar"
|
||||
]
|
||||
]
|
||||
},
|
||||
"Payload": null
|
||||
}
|
||||
@@ -184,65 +189,65 @@ Below is an example of a JSON object that submits a `periodic` job to Nomad:
|
||||
|
||||
## Syntax Reference
|
||||
|
||||
Following is a syntax reference for the possible keys that are supported
|
||||
and their default values if any for each type of object.
|
||||
Following is a syntax reference for the possible keys that are supported and
|
||||
their default values if any for each type of object.
|
||||
|
||||
### Job
|
||||
|
||||
The `Job` object supports the following keys:
|
||||
|
||||
* `AllAtOnce` - Controls if the entire set of tasks in the job must
|
||||
- `AllAtOnce` - Controls if the entire set of tasks in the job must
|
||||
be placed atomically or if they can be scheduled incrementally.
|
||||
This should only be used for special circumstances. Defaults to `false`.
|
||||
|
||||
* `Constraints` - A list to define additional constraints where a job can be
|
||||
- `Constraints` - A list to define additional constraints where a job can be
|
||||
run. See the constraint reference for more details.
|
||||
|
||||
* `Datacenters` - A list of datacenters in the region which are eligible
|
||||
- `Datacenters` - A list of datacenters in the region which are eligible
|
||||
for task placement. This must be provided, and does not have a default.
|
||||
|
||||
* `TaskGroups` - A list to define additional task groups. See the task group
|
||||
- `TaskGroups` - A list to define additional task groups. See the task group
|
||||
reference for more details.
|
||||
|
||||
* `Meta` - Annotates the job with opaque metadata.
|
||||
- `Meta` - Annotates the job with opaque metadata.
|
||||
|
||||
* `ParameterizedJob` - Specifies the job as a paramterized job such that it can
|
||||
- `ParameterizedJob` - Specifies the job as a paramterized job such that it can
|
||||
be dispatched against. The `ParamaterizedJob` object supports the following
|
||||
attributes:
|
||||
|
||||
* `MetaOptional` - Specifies the set of metadata keys that may be provided
|
||||
- `MetaOptional` - Specifies the set of metadata keys that may be provided
|
||||
when dispatching against the job as a string array.
|
||||
|
||||
* `MetaRequired` - Specifies the set of metadata keys that must be provided
|
||||
- `MetaRequired` - Specifies the set of metadata keys that must be provided
|
||||
when dispatching against the job as a string array.
|
||||
|
||||
* `Payload` - Specifies the requirement of providing a payload when
|
||||
- `Payload` - Specifies the requirement of providing a payload when
|
||||
dispatching against the parameterized job. The options for this field are
|
||||
"optional", "required" and "forbidden". The default value is "optional".
|
||||
|
||||
* `Payload` - The payload may not be set when submitting a job but may appear in
|
||||
- `Payload` - The payload may not be set when submitting a job but may appear in
|
||||
a dispatched job. The `Payload` will be a base64 encoded string containing the
|
||||
payload that the job was dispatched with. The `payload` has a **maximum size
|
||||
of 16 KiB**.
|
||||
|
||||
* `Priority` - Specifies the job priority which is used to prioritize
|
||||
- `Priority` - Specifies the job priority which is used to prioritize
|
||||
scheduling and access to resources. Must be between 1 and 100 inclusively,
|
||||
and defaults to 50.
|
||||
|
||||
* `Region` - The region to run the job in, defaults to "global".
|
||||
- `Region` - The region to run the job in, defaults to "global".
|
||||
|
||||
* `Type` - Specifies the job type and switches which scheduler
|
||||
- `Type` - Specifies the job type and switches which scheduler
|
||||
is used. Nomad provides the `service`, `system` and `batch` schedulers,
|
||||
and defaults to `service`. To learn more about each scheduler type visit
|
||||
[here](/docs/runtime/schedulers.html)
|
||||
|
||||
* `Update` - Specifies the task's update strategy. When omitted, rolling
|
||||
- `Update` - Specifies the task's update strategy. When omitted, rolling
|
||||
updates are disabled. The `Update` object supports the following attributes:
|
||||
|
||||
* `MaxParallel` - `MaxParallel` is given as an integer value and specifies
|
||||
- `MaxParallel` - `MaxParallel` is given as an integer value and specifies
|
||||
the number of tasks that can be updated at the same time.
|
||||
|
||||
* `Stagger` - `Stagger` introduces a delay between sets of task updates and
|
||||
- `Stagger` - `Stagger` introduces a delay between sets of task updates and
|
||||
is given in nanoseconds.
|
||||
|
||||
An example `Update` block:
|
||||
@@ -256,29 +261,29 @@ The `Job` object supports the following keys:
|
||||
}
|
||||
```
|
||||
|
||||
* `Periodic` - `Periodic` allows the job to be scheduled at fixed times, dates
|
||||
- `Periodic` - `Periodic` allows the job to be scheduled at fixed times, dates
|
||||
or intervals. The periodic expression is always evaluated in the UTC
|
||||
timezone to ensure consistent evaluation when Nomad Servers span multiple
|
||||
time zones. The `Periodic` object is optional and supports the following attributes:
|
||||
|
||||
* `Enabled` - `Enabled` determines whether the periodic job will spawn child
|
||||
- `Enabled` - `Enabled` determines whether the periodic job will spawn child
|
||||
jobs.
|
||||
|
||||
* `time_zone` - Specifies the time zone to evaluate the next launch interval
|
||||
- `time_zone` - Specifies the time zone to evaluate the next launch interval
|
||||
against. This is useful when wanting to account for day light savings in
|
||||
various time zones. The time zone must be parsable by Golang's
|
||||
[LoadLocation](https://golang.org/pkg/time/#LoadLocation). The default is
|
||||
UTC.
|
||||
|
||||
* `SpecType` - `SpecType` determines how Nomad is going to interpret the
|
||||
- `SpecType` - `SpecType` determines how Nomad is going to interpret the
|
||||
periodic expression. `cron` is the only supported `SpecType` currently.
|
||||
|
||||
* `Spec` - A cron expression configuring the interval the job is launched
|
||||
- `Spec` - A cron expression configuring the interval the job is launched
|
||||
at. Supports predefined expressions such as "@daily" and "@weekly" See
|
||||
[here](https://github.com/gorhill/cronexpr#implementation) for full
|
||||
documentation of supported cron specs and the predefined expressions.
|
||||
|
||||
* <a id="prohibit_overlap">`ProhibitOverlap`</a> - `ProhibitOverlap` can
|
||||
- <a id="prohibit_overlap">`ProhibitOverlap`</a> - `ProhibitOverlap` can
|
||||
be set to true to enforce that the periodic job doesn't spawn a new
|
||||
instance of the job if any of the previous jobs are still running. It is
|
||||
defaulted to false.
|
||||
@@ -288,7 +293,7 @@ The `Job` object supports the following keys:
|
||||
```json
|
||||
{
|
||||
"Periodic": {
|
||||
"Spec": "*/15 * * * * *"
|
||||
"Spec": "*/15 - *"
|
||||
"SpecType": "cron",
|
||||
"Enabled": true,
|
||||
"ProhibitOverlap": true
|
||||
@@ -301,51 +306,51 @@ The `Job` object supports the following keys:
|
||||
`TaskGroups` is a list of `TaskGroup` objects, each supports the following
|
||||
attributes:
|
||||
|
||||
* `Constraints` - This is a list of `Constraint` objects. See the constraint
|
||||
- `Constraints` - This is a list of `Constraint` objects. See the constraint
|
||||
reference for more details.
|
||||
|
||||
* `Count` - Specifies the number of the task groups that should
|
||||
- `Count` - Specifies the number of the task groups that should
|
||||
be running. Must be non-negative, defaults to one.
|
||||
|
||||
* `Meta` - A key-value map that annotates the task group with opaque metadata.
|
||||
- `Meta` - A key-value map that annotates the task group with opaque metadata.
|
||||
|
||||
* `Name` - The name of the task group. Must be specified.
|
||||
- `Name` - The name of the task group. Must be specified.
|
||||
|
||||
* `RestartPolicy` - Specifies the restart policy to be applied to tasks in this group.
|
||||
- `RestartPolicy` - Specifies the restart policy to be applied to tasks in this group.
|
||||
If omitted, a default policy for batch and non-batch jobs is used based on the
|
||||
job type. See the [restart policy reference](#restart_policy) for more details.
|
||||
|
||||
* `EphemeralDisk` - Specifies the group's ephemeral disk requirements. See the
|
||||
- `EphemeralDisk` - Specifies the group's ephemeral disk requirements. See the
|
||||
[ephemeral disk reference](#ephemeral_disk) for more details.
|
||||
|
||||
* `Tasks` - A list of `Task` object that are part of the task group.
|
||||
- `Tasks` - A list of `Task` object that are part of the task group.
|
||||
|
||||
### Task
|
||||
|
||||
The `Task` object supports the following keys:
|
||||
|
||||
* `Artifacts` - `Artifacts` is a list of `Artifact` objects which define
|
||||
- `Artifacts` - `Artifacts` is a list of `Artifact` objects which define
|
||||
artifacts to be downloaded before the task is run. See the artifacts
|
||||
reference for more details.
|
||||
|
||||
* `Config` - A map of key-value configuration passed into the driver
|
||||
- `Config` - A map of key-value configuration passed into the driver
|
||||
to start the task. The details of configurations are specific to
|
||||
each driver.
|
||||
|
||||
* `Constraints` - This is a list of `Constraint` objects. See the constraint
|
||||
- `Constraints` - This is a list of `Constraint` objects. See the constraint
|
||||
reference for more details.
|
||||
|
||||
- `DispatchPayload` - Configures the task to have access to dispatch payloads.
|
||||
The `DispatchPayload` object supports the following attributes:
|
||||
|
||||
* `File` - Specifies the file name to write the content of dispatch payload
|
||||
- `File` - Specifies the file name to write the content of dispatch payload
|
||||
to. The file is written relative to the task's local directory.
|
||||
|
||||
* `Driver` - Specifies the task driver that should be used to run the
|
||||
- `Driver` - Specifies the task driver that should be used to run the
|
||||
task. See the [driver documentation](/docs/drivers/index.html) for what
|
||||
is available. Examples include `docker`, `qemu`, `java`, and `exec`.
|
||||
|
||||
* `Env` - A map of key-value representing environment variables that
|
||||
- `Env` - A map of key-value representing environment variables that
|
||||
will be passed along to the running process. Nomad variables are
|
||||
interpreted when set in the environment variable values. See the table of
|
||||
interpreted variables [here](/docs/runtime/interpolation.html).
|
||||
@@ -360,34 +365,34 @@ The `Task` object supports the following keys:
|
||||
}
|
||||
```
|
||||
|
||||
* `KillTimeout` - `KillTimeout` is a time duration in nanoseconds. It can be
|
||||
- `KillTimeout` - `KillTimeout` is a time duration in nanoseconds. It can be
|
||||
used to configure the time between signaling a task it will be killed and
|
||||
actually killing it. Drivers first sends a task the `SIGINT` signal and then
|
||||
sends `SIGTERM` if the task doesn't die after the `KillTimeout` duration has
|
||||
elapsed. The default `KillTimeout` is 5 seconds.
|
||||
|
||||
* `leader` - Specifies whether the task is the leader task of the task group. If
|
||||
- `leader` - Specifies whether the task is the leader task of the task group. If
|
||||
set to true, when the leader task completes, all other tasks within the task
|
||||
group will be gracefully shutdown.
|
||||
|
||||
* `LogConfig` - This allows configuring log rotation for the `stdout` and `stderr`
|
||||
- `LogConfig` - This allows configuring log rotation for the `stdout` and `stderr`
|
||||
buffers of a Task. See the log rotation reference below for more details.
|
||||
|
||||
* `Meta` - Annotates the task group with opaque metadata.
|
||||
- `Meta` - Annotates the task group with opaque metadata.
|
||||
|
||||
* `Name` - The name of the task. This field is required.
|
||||
- `Name` - The name of the task. This field is required.
|
||||
|
||||
* `Resources` - Provides the resource requirements of the task.
|
||||
- `Resources` - Provides the resource requirements of the task.
|
||||
See the resources reference for more details.
|
||||
|
||||
* `Services` - `Services` is a list of `Service` objects. Nomad integrates with
|
||||
- `Services` - `Services` is a list of `Service` objects. Nomad integrates with
|
||||
Consul for service discovery. A `Service` object represents a routable and
|
||||
discoverable service on the network. Nomad automatically registers when a task
|
||||
is started and de-registers it when the task transitions to the dead state.
|
||||
[Click here](/docs/service-discovery/index.html) to learn more about
|
||||
services. Below is the fields in the `Service` object:
|
||||
|
||||
* `Name`: An explicit name for the Service. Nomad will replace `${JOB}`,
|
||||
- `Name`: An explicit name for the Service. Nomad will replace `${JOB}`,
|
||||
`${TASKGROUP}` and `${TASK}` by the name of the job, task group or task,
|
||||
respectively. `${BASE}` expands to the equivalent of
|
||||
`${JOB}-${TASKGROUP}-${TASK}`, and is the default name for a Service.
|
||||
@@ -398,78 +403,78 @@ The `Task` object supports the following keys:
|
||||
limited to alphanumeric and hyphen characters (i.e. `[a-z0-9\-]`), and be
|
||||
less than 64 characters in length.
|
||||
|
||||
* `Tags`: A list of string tags associated with this Service. String
|
||||
- `Tags`: A list of string tags associated with this Service. String
|
||||
interpolation is supported in tags.
|
||||
|
||||
* `PortLabel`: `PortLabel` is an optional string and is used to associate
|
||||
- `PortLabel`: `PortLabel` is an optional string and is used to associate
|
||||
a port with the service. If specified, the port label must match one
|
||||
defined in the resources block. This could be a label of either a
|
||||
dynamic or a static port.
|
||||
|
||||
* `Checks`: `Checks` is an array of check objects. A check object defines a
|
||||
- `Checks`: `Checks` is an array of check objects. A check object defines a
|
||||
health check associated with the service. Nomad supports the `script`,
|
||||
`http` and `tcp` Consul Checks. Script checks are not supported for the
|
||||
qemu driver since the Nomad client doesn't have access to the file system
|
||||
of a task using the Qemu driver.
|
||||
|
||||
* `Type`: This indicates the check types supported by Nomad. Valid
|
||||
- `Type`: This indicates the check types supported by Nomad. Valid
|
||||
options are currently `script`, `http` and `tcp`.
|
||||
|
||||
* `Name`: The name of the health check.
|
||||
- `Name`: The name of the health check.
|
||||
|
||||
* `Interval`: This indicates the frequency of the health checks that
|
||||
- `Interval`: This indicates the frequency of the health checks that
|
||||
Consul will perform.
|
||||
|
||||
* `Timeout`: This indicates how long Consul will wait for a health
|
||||
- `Timeout`: This indicates how long Consul will wait for a health
|
||||
check query to succeed.
|
||||
|
||||
* `Path`: The path of the http endpoint which Consul will query to query
|
||||
- `Path`: The path of the http endpoint which Consul will query to query
|
||||
the health of a service if the type of the check is `http`. Nomad
|
||||
will add the IP of the service and the port, users are only required
|
||||
to add the relative URL of the health check endpoint.
|
||||
|
||||
* `Protocol`: This indicates the protocol for the http checks. Valid
|
||||
- `Protocol`: This indicates the protocol for the http checks. Valid
|
||||
options are `http` and `https`. We default it to `http`.
|
||||
|
||||
* `Command`: This is the command that the Nomad client runs for doing
|
||||
- `Command`: This is the command that the Nomad client runs for doing
|
||||
script based health check.
|
||||
|
||||
* `Args`: Additional arguments to the `command` for script based health
|
||||
- `Args`: Additional arguments to the `command` for script based health
|
||||
checks.
|
||||
|
||||
* `TLSSkipVerify`: If true, Consul will not attempt to verify the
|
||||
- `TLSSkipVerify`: If true, Consul will not attempt to verify the
|
||||
certificate when performing HTTPS checks. Requires Consul >= 0.7.2.
|
||||
|
||||
* `Templates` - Specifies the set of [`Template`](#template) objects to render for the task.
|
||||
- `Templates` - Specifies the set of [`Template`](#template) objects to render for the task.
|
||||
Templates can be used to inject both static and dynamic configuration with
|
||||
data populated from environment variables, Consul and Vault.
|
||||
|
||||
* `User` - Set the user that will run the task. It defaults to the same user
|
||||
- `User` - Set the user that will run the task. It defaults to the same user
|
||||
the Nomad client is being run as. This can only be set on Linux platforms.
|
||||
|
||||
### Resources
|
||||
|
||||
The `Resources` object supports the following keys:
|
||||
|
||||
* `CPU` - The CPU required in MHz.
|
||||
- `CPU` - The CPU required in MHz.
|
||||
|
||||
* `IOPS` - The number of IOPS required given as a weight between 10-1000.
|
||||
- `IOPS` - The number of IOPS required given as a weight between 10-1000.
|
||||
|
||||
* `MemoryMB` - The memory required in MB.
|
||||
- `MemoryMB` - The memory required in MB.
|
||||
|
||||
* `Networks` - A list of network objects.
|
||||
- `Networks` - A list of network objects.
|
||||
|
||||
The Network object supports the following keys:
|
||||
|
||||
* `MBits` - The number of MBits in bandwidth required.
|
||||
- `MBits` - The number of MBits in bandwidth required.
|
||||
|
||||
Nomad can allocate two types of ports to a task - Dynamic and Static/Reserved
|
||||
ports. A network object allows the user to specify a list of `DynamicPorts` and
|
||||
`ReservedPorts`. Each object supports the following attributes:
|
||||
|
||||
* `Value` - The port number for static ports. If the port is dynamic, then this
|
||||
- `Value` - The port number for static ports. If the port is dynamic, then this
|
||||
attribute is ignored.
|
||||
* `Label` - The label to annotate a port so that it can be referred in the
|
||||
- `Label` - The label to annotate a port so that it can be referred in the
|
||||
service discovery block or environment variables.
|
||||
|
||||
<a id="ephemeral_disk"></a>
|
||||
@@ -478,14 +483,14 @@ ports. A network object allows the user to specify a list of `DynamicPorts` and
|
||||
|
||||
The `EphemeralDisk` object supports the following keys:
|
||||
|
||||
* `Migrate` - Specifies that the Nomad client should make a best-effort attempt
|
||||
- `Migrate` - Specifies that the Nomad client should make a best-effort attempt
|
||||
to migrate the data from a remote machine if placement cannot be made on the
|
||||
original node. During data migration, the task will block starting until the
|
||||
data migration has completed. Value is a boolean and the default is false.
|
||||
|
||||
* `SizeMB` - Specifies the size of the ephemeral disk in MB. Default is 300.
|
||||
- `SizeMB` - Specifies the size of the ephemeral disk in MB. Default is 300.
|
||||
|
||||
* `Sticky` - Specifies that Nomad should make a best-effort attempt to place the
|
||||
- `Sticky` - Specifies that Nomad should make a best-effort attempt to place the
|
||||
updated allocation on the same machine. This will move the `local/` and
|
||||
`alloc/data` directories to the new allocation. Value is a boolean and the
|
||||
default is false.
|
||||
@@ -496,45 +501,45 @@ The `EphemeralDisk` object supports the following keys:
|
||||
|
||||
The `RestartPolicy` object supports the following keys:
|
||||
|
||||
* `Attempts` - `Attempts` is the number of restarts allowed in an `Interval`.
|
||||
- `Attempts` - `Attempts` is the number of restarts allowed in an `Interval`.
|
||||
|
||||
* `Interval` - `Interval` is a time duration that is specified in nanoseconds.
|
||||
- `Interval` - `Interval` is a time duration that is specified in nanoseconds.
|
||||
The `Interval` begins when the first task starts and ensures that only
|
||||
`Attempts` number of restarts happens within it. If more than `Attempts`
|
||||
number of failures happen, behavior is controlled by `Mode`.
|
||||
|
||||
* `Delay` - A duration to wait before restarting a task. It is specified in
|
||||
- `Delay` - A duration to wait before restarting a task. It is specified in
|
||||
nanoseconds. A random jitter of up to 25% is added to the delay.
|
||||
|
||||
* `Mode` - `Mode` is given as a string and controls the behavior when the task
|
||||
- `Mode` - `Mode` is given as a string and controls the behavior when the task
|
||||
fails more than `Attempts` times in an `Interval`. Possible values are listed
|
||||
below:
|
||||
|
||||
* `delay` - `delay` will delay the next restart until the next `Interval` is
|
||||
- `delay` - `delay` will delay the next restart until the next `Interval` is
|
||||
reached.
|
||||
|
||||
* `fail` - `fail` will not restart the task again.
|
||||
- `fail` - `fail` will not restart the task again.
|
||||
|
||||
### Constraint
|
||||
|
||||
The `Constraint` object supports the following keys:
|
||||
|
||||
* `LTarget` - Specifies the attribute to examine for the
|
||||
- `LTarget` - Specifies the attribute to examine for the
|
||||
constraint. See the table of attributes [here](/docs/runtime/interpolation.html#interpreted_node_vars).
|
||||
|
||||
* `RTarget` - Specifies the value to compare the attribute against.
|
||||
- `RTarget` - Specifies the value to compare the attribute against.
|
||||
This can be a literal value, another attribute or a regular expression if
|
||||
the `Operator` is in "regexp" mode.
|
||||
|
||||
* `Operand` - Specifies the test to be performed on the two targets. It takes on the
|
||||
- `Operand` - Specifies the test to be performed on the two targets. It takes on the
|
||||
following values:
|
||||
|
||||
* `regexp` - Allows the `RTarget` to be a regular expression to be matched.
|
||||
- `regexp` - Allows the `RTarget` to be a regular expression to be matched.
|
||||
|
||||
* `set_contains` - Allows the `RTarget` to be a comma separated list of values
|
||||
- `set_contains` - Allows the `RTarget` to be a comma separated list of values
|
||||
that should be contained in the LTarget's value.
|
||||
|
||||
* `distinct_host` - If set, the scheduler will not co-locate any task groups on the same
|
||||
- `distinct_host` - If set, the scheduler will not co-locate any task groups on the same
|
||||
machine. This can be specified as a job constraint which applies the
|
||||
constraint to all task groups in the job, or as a task group constraint which
|
||||
scopes the effect to just that group. The constraint may not be
|
||||
@@ -545,7 +550,7 @@ The `Constraint` object supports the following keys:
|
||||
to all task groups. When specified, `LTarget` and `RTarget` should be
|
||||
omitted.
|
||||
|
||||
* `distinct_property` - If set, the scheduler selects nodes that have a
|
||||
- `distinct_property` - If set, the scheduler selects nodes that have a
|
||||
distinct value of the specified property for each allocation. This can
|
||||
be specified as a job constraint which applies the constraint to all
|
||||
task groups in the job, or as a task group constraint which scopes the
|
||||
@@ -557,7 +562,7 @@ The `Constraint` object supports the following keys:
|
||||
to all task groups. When specified, `LTarget` should be the property
|
||||
that should be distinct and and `RTarget` should be omitted.
|
||||
|
||||
* Comparison Operators - `=`, `==`, `is`, `!=`, `not`, `>`, `>=`, `<`, `<=`. The
|
||||
- Comparison Operators - `=`, `==`, `is`, `!=`, `not`, `>`, `>=`, `<`, `<=`. The
|
||||
ordering is compared lexically.
|
||||
|
||||
### Log Rotation
|
||||
@@ -565,10 +570,10 @@ The `Constraint` object supports the following keys:
|
||||
The `LogConfig` object configures the log rotation policy for a task's `stdout` and
|
||||
`stderr`. The `LogConfig` object supports the following attributes:
|
||||
|
||||
* `MaxFiles` - The maximum number of rotated files Nomad will retain for
|
||||
- `MaxFiles` - The maximum number of rotated files Nomad will retain for
|
||||
`stdout` and `stderr`, each tracked individually.
|
||||
|
||||
* `MaxFileSizeMB` - The size of each rotated file. The size is specified in
|
||||
- `MaxFileSizeMB` - The size of each rotated file. The size is specified in
|
||||
`MB`.
|
||||
|
||||
If the amount of disk resource requested for the task is less than the total
|
||||
@@ -605,12 +610,12 @@ is started.
|
||||
|
||||
The `Artifact` object supports the following keys:
|
||||
|
||||
* `GetterSource` - The path to the artifact to download.
|
||||
- `GetterSource` - The path to the artifact to download.
|
||||
|
||||
* `RelativeDest` - An optional path to download the artifact into relative to the
|
||||
- `RelativeDest` - An optional path to download the artifact into relative to the
|
||||
root of the task's directory. If omitted, it will default to `local/`.
|
||||
|
||||
* `GetterOptions` - A `map[string]string` block of options for `go-getter`.
|
||||
- `GetterOptions` - A `map[string]string` block of options for `go-getter`.
|
||||
Full documentation of supported options are available
|
||||
[here](https://github.com/hashicorp/go-getter/tree/ef5edd3d8f6f482b775199be2f3734fd20e04d4a#protocol-specific-options-1).
|
||||
An example is given below:
|
||||
@@ -708,35 +713,35 @@ README][ct].
|
||||
- `"restart"` - restart the task
|
||||
- `"signal"` - send a configurable signal to the task
|
||||
|
||||
* `ChangeSignal` - Specifies the signal to send to the task as a string like
|
||||
- `ChangeSignal` - Specifies the signal to send to the task as a string like
|
||||
"SIGUSR1" or "SIGINT". This option is required if the `ChangeMode` is
|
||||
`signal`.
|
||||
|
||||
* `DestPath` - Specifies the location where the resulting template should be
|
||||
- `DestPath` - Specifies the location where the resulting template should be
|
||||
rendered, relative to the task directory.
|
||||
|
||||
* `EmbeddedTmpl` - Specifies the raw template to execute. One of `SourcePath`
|
||||
- `EmbeddedTmpl` - Specifies the raw template to execute. One of `SourcePath`
|
||||
or `EmbeddedTmpl` must be specified, but not both. This is useful for smaller
|
||||
templates, but we recommend using `SourcePath` for larger templates.
|
||||
|
||||
* `LeftDelim` - Specifies the left delimiter to use in the template. The default
|
||||
- `LeftDelim` - Specifies the left delimiter to use in the template. The default
|
||||
is "{{" for some templates, it may be easier to use a different delimiter that
|
||||
does not conflict with the output file itself.
|
||||
|
||||
* `Perms` - Specifies the rendered template's permissions. File permissions are
|
||||
- `Perms` - Specifies the rendered template's permissions. File permissions are
|
||||
given as octal of the unix file permissions rwxrwxrwx.
|
||||
|
||||
* `RightDelim` - Specifies the right delimiter to use in the template. The default
|
||||
- `RightDelim` - Specifies the right delimiter to use in the template. The default
|
||||
is "}}" for some templates, it may be easier to use a different delimiter that
|
||||
does not conflict with the output file itself.
|
||||
|
||||
* `SourcePath` - Specifies the path to the template to be rendered. `SourcePath`
|
||||
- `SourcePath` - Specifies the path to the template to be rendered. `SourcePath`
|
||||
is mutually exclusive with `EmbeddedTmpl` attribute. The source can be fetched
|
||||
using an [`Artifact`](#artifact) resource. The template must exist on the
|
||||
machine prior to starting the task; it is not possible to reference a template
|
||||
inside of a Docker container, for example.
|
||||
|
||||
* `Splay` - Specifies a random amount of time to wait between 0ms and the given
|
||||
- `Splay` - Specifies a random amount of time to wait between 0ms and the given
|
||||
splay value before invoking the change mode. Should be specified in
|
||||
nanoseconds.
|
||||
|
||||
33
website/source/api/libraries-and-sdks.html.md
Normal file
33
website/source/api/libraries-and-sdks.html.md
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
layout: api
|
||||
page_title: Libraries and SDKs - HTTP API
|
||||
sidebar_current: api-libraries-and-sdks
|
||||
description: |-
|
||||
There are many third-party libraries for interacting with Nomad's HTTP API.
|
||||
This page lists the HashiCorp and community-maintained Nomad HTTP API client
|
||||
libraries.
|
||||
---
|
||||
|
||||
# Client Libraries & SDKs
|
||||
|
||||
The programming libraries listed on this page can be used to consume the API
|
||||
more conveniently. Some are officially maintained while others are provided by
|
||||
the community.
|
||||
|
||||
## Official Libraries
|
||||
|
||||
- [`api`](https://github.com/hashicorp/nomad/tree/master/api) - Official Golang
|
||||
client for the Nomad HTTP API
|
||||
|
||||
- [`nomad-java-sdk`](https://github.com/hashicorp/nomad-java-sdk) - Official
|
||||
Java client for the Nomad HTTP API.
|
||||
|
||||
- [`nomad-ruby`](https://github.com/hashicorp/nomad-ruby) - Official Ruby client
|
||||
for the Nomad HTTP API
|
||||
|
||||
- [`nomad-scala-sdk`](https://github.com/hashicorp/nomad-scala-sdk) - Official
|
||||
Scala client for the Nomad HTTP API.
|
||||
|
||||
## Third-Party Libraries
|
||||
|
||||
_Want to see your library here? [Submit a Pull Request](https://github.com/hashicorp/nomad)._
|
||||
267
website/source/api/nodes.html.md
Normal file
267
website/source/api/nodes.html.md
Normal file
@@ -0,0 +1,267 @@
|
||||
---
|
||||
layout: api
|
||||
page_title: Nodes - HTTP API
|
||||
sidebar_current: api-nodes
|
||||
description: |-
|
||||
The /node endpoints are used to query for and interact with client nodes.
|
||||
---
|
||||
|
||||
# Nodes HTTP API
|
||||
|
||||
The `/node` endpoints are used to query for and interact with client nodes.
|
||||
|
||||
### List Nodes
|
||||
|
||||
This endpoint lists all nodes registered with Nomad.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ------------------------- | -------------------------- |
|
||||
| `GET` | `/v1/nodes` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `YES` | `none` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `prefix` `(string: "")`- Specifies a string to filter nodes on based on an
|
||||
index prefix. This is specified as a querystring parameter.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/nodes
|
||||
```
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/nodes?prefix=prod
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"ID": "fb2170a8-257d-3c64-b14d-bc06cc94e34c",
|
||||
"Datacenter": "dc1",
|
||||
"Name": "bacon-mac",
|
||||
"NodeClass": "",
|
||||
"Drain": false,
|
||||
"Status": "ready",
|
||||
"StatusDescription": "",
|
||||
"CreateIndex": 5,
|
||||
"ModifyIndex": 45
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Read Node
|
||||
|
||||
This endpoint queries the status of a client node.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ------------------------- | -------------------------- |
|
||||
| `GET` | `/v1/node/:node_id` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `YES` | `none` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `:node_id` `(string: <required>)`- Specifies the ID of the node. This must be
|
||||
the full UUID, not the short 8-character one. This is specified as part of the
|
||||
path.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/node/fb2170a8-257d-3c64-b14d-bc06cc94e34c
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
{
|
||||
"ID": "fb2170a8-257d-3c64-b14d-bc06cc94e34c",
|
||||
"SecretID": "",
|
||||
"Datacenter": "dc1",
|
||||
"Name": "bacon-mac",
|
||||
"HTTPAddr": "127.0.0.1:4646",
|
||||
"TLSEnabled": false,
|
||||
"Attributes": {
|
||||
"os.version": "10.12.5",
|
||||
"cpu.modelname": "Intel(R) Core(TM) i7-3615QM CPU @ 2.30GHz",
|
||||
"nomad.revision": "f551dcb83e3ac144c9dbb90583b6e82d234662e9",
|
||||
"driver.docker.volumes.enabled": "1",
|
||||
"driver.docker": "1",
|
||||
"cpu.frequency": "2300",
|
||||
"memory.totalbytes": "17179869184",
|
||||
"driver.mock_driver": "1",
|
||||
"kernel.version": "16.6.0",
|
||||
"unique.network.ip-address": "127.0.0.1",
|
||||
"nomad.version": "0.5.5dev",
|
||||
"unique.hostname": "bacon-mac",
|
||||
"cpu.arch": "amd64",
|
||||
"os.name": "darwin",
|
||||
"kernel.name": "darwin",
|
||||
"unique.storage.volume": "/dev/disk1",
|
||||
"driver.docker.version": "17.03.1-ce",
|
||||
"cpu.totalcompute": "18400",
|
||||
"unique.storage.bytestotal": "249783500800",
|
||||
"cpu.numcores": "8",
|
||||
"os.signals": "SIGCONT,SIGSTOP,SIGSYS,SIGINT,SIGIOT,SIGXCPU,SIGSEGV,SIGUSR1,SIGTTIN,SIGURG,SIGUSR2,SIGABRT,SIGALRM,SIGCHLD,SIGFPE,SIGTSTP,SIGIO,SIGKILL,SIGQUIT,SIGXFSZ,SIGBUS,SIGHUP,SIGPIPE,SIGPROF,SIGTRAP,SIGTTOU,SIGILL,SIGTERM",
|
||||
"driver.raw_exec": "1",
|
||||
"unique.storage.bytesfree": "142954643456"
|
||||
},
|
||||
"Resources": {
|
||||
"CPU": 18400,
|
||||
"MemoryMB": 16384,
|
||||
"DiskMB": 136332,
|
||||
"IOPS": 0,
|
||||
"Networks": [
|
||||
{
|
||||
"Device": "lo0",
|
||||
"CIDR": "127.0.0.1/32",
|
||||
"IP": "127.0.0.1",
|
||||
"MBits": 1000,
|
||||
"ReservedPorts": null,
|
||||
"DynamicPorts": null
|
||||
}
|
||||
]
|
||||
},
|
||||
"Reserved": {
|
||||
"CPU": 0,
|
||||
"MemoryMB": 0,
|
||||
"DiskMB": 0,
|
||||
"IOPS": 0,
|
||||
"Networks": null
|
||||
},
|
||||
"Links": null,
|
||||
"Meta": null,
|
||||
"NodeClass": "",
|
||||
"ComputedClass": "v1:10952212473894849978",
|
||||
"Drain": false,
|
||||
"Status": "ready",
|
||||
"StatusDescription": "",
|
||||
"StatusUpdatedAt": 1495748907,
|
||||
"CreateIndex": 5,
|
||||
"ModifyIndex": 45
|
||||
}
|
||||
```
|
||||
|
||||
## Create Node Evaluation
|
||||
|
||||
This endpoint creates a new evaluation for the given node. This can be used to
|
||||
force a run of the scheduling logic.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------- | ---------------------------- | -------------------------- |
|
||||
| `POST` | `/v1/node/:node_id/evaluate` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `NO` | `none` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `:node_id` `(string: <required>)`- Specifies the UUID of the node. This must
|
||||
be the full UUID, not the short 8-character one. This is specified as part of
|
||||
the path.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/node/fb2170a8-257d-3c64-b14d-bc06cc94e34c/evaluate
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
{
|
||||
"HeartbeatTTL": 0,
|
||||
"EvalIDs": [
|
||||
"4ff1c7a2-c650-4058-f509-d5028ff9566e"
|
||||
],
|
||||
"EvalCreateIndex": 85,
|
||||
"NodeModifyIndex": 0,
|
||||
"LeaderRPCAddr": "127.0.0.1:4647",
|
||||
"NumNodes": 1,
|
||||
"Servers": [
|
||||
{
|
||||
"RPCAdvertiseAddr": "127.0.0.1:4647",
|
||||
"RPCMajorVersion": 1,
|
||||
"RPCMinorVersion": 1,
|
||||
"Datacenter": "dc1"
|
||||
}
|
||||
],
|
||||
"Index": 85,
|
||||
"LastContact": 0,
|
||||
"KnownLeader": false
|
||||
}
|
||||
```
|
||||
|
||||
## Drain Node
|
||||
|
||||
This endpoint toggles the drain mode of the node. When draining is enabled, no
|
||||
further allocations will be assigned to this node, and existing allocations will
|
||||
be migrated to new nodes.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------- | ------------------------- | -------------------------- |
|
||||
| `POST` | `/v1/node/:node_id/drain` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `NO` | `none` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `:node_id` `(string: <required>)`- Specifies the UUID of the node. This must
|
||||
be the full UUID, not the short 8-character one. This is specified as part of
|
||||
the path.
|
||||
|
||||
- `enable` `(bool: <required>)` - Specifies if drain mode should be enabled.
|
||||
This is specified as a query string parameter.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/node/fb2170a8-257d-3c64-b14d-bc06cc94e34c/drain?enable=true
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
{
|
||||
"EvalIDs": [
|
||||
"253ec083-22a7-76c9-b8b6-2bf3d4b27bfb"
|
||||
],
|
||||
"EvalCreateIndex": 91,
|
||||
"NodeModifyIndex": 90,
|
||||
"Index": 90,
|
||||
"LastContact": 0,
|
||||
"KnownLeader": false
|
||||
}
|
||||
```
|
||||
124
website/source/api/operator.html.md
Normal file
124
website/source/api/operator.html.md
Normal file
@@ -0,0 +1,124 @@
|
||||
---
|
||||
layout: api
|
||||
page_title: Operator - HTTP API
|
||||
sidebar_current: api-operator
|
||||
description: |-
|
||||
The /operator endpoints provides cluster-level tools for Nomad operators, such
|
||||
as interacting with the Raft subsystem.
|
||||
---
|
||||
# /v1/operator
|
||||
|
||||
The `/operator` endpoint provides cluster-level tools for Nomad operators, such
|
||||
as interacting with the Raft subsystem.
|
||||
|
||||
~> Use this interface with extreme caution, as improper use could lead to a
|
||||
Nomad outage and even loss of data.
|
||||
|
||||
See the [Outage Recovery](/guides/outage.html) guide for some examples of how
|
||||
these capabilities are used. For a CLI to perform these operations manually,
|
||||
please see the documentation for the
|
||||
[`nomad operator`](/docs/commands/operator-index.html) command.
|
||||
|
||||
|
||||
## Read Raft Configuration
|
||||
|
||||
This endpoint queries the status of a client node registered with Nomad.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | --------------------------------- | -------------------------- |
|
||||
| `GET` | `/v1/operator/raft/configuration` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `NO` | `none` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `stale` - Specifies if the cluster should respond without an active leader.
|
||||
This is specified as a querystring parameter.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/operator/raft/configuration
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
{
|
||||
"Index": 1,
|
||||
"Servers": [
|
||||
{
|
||||
"Address": "127.0.0.1:4647",
|
||||
"ID": "127.0.0.1:4647",
|
||||
"Leader": true,
|
||||
"Node": "bacon-mac.global",
|
||||
"Voter": true
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
#### Field Reference
|
||||
|
||||
- `Index` `(int)` - The `Index` value is the Raft corresponding to this
|
||||
configuration. The latest configuration may not yet be committed if changes
|
||||
are in flight.
|
||||
|
||||
- `Servers` `(array: Server)` - The returned `Servers` array has information
|
||||
about the servers in the Raft peer configuration.
|
||||
|
||||
- `ID` `(string)` - The ID of the server. This is the same as the `Address`
|
||||
but may be upgraded to a GUID in a future version of Nomad.
|
||||
|
||||
- `Node` `(string)` - The node name of the server, as known to Nomad, or
|
||||
`"(unknown)"` if the node is stale and not known.
|
||||
|
||||
- `Address` `(string)` - The `ip:port` for the server.
|
||||
|
||||
- `Leader` `(bool)` - is either "true" or "false" depending on the server's
|
||||
role in the Raft configuration.
|
||||
|
||||
- `Voter` `(bool)` - is "true" or "false", indicating if the server has a vote
|
||||
in the Raft configuration. Future versions of Nomad may add support for
|
||||
non-voting servers.
|
||||
|
||||
## Remove Raft Peer
|
||||
|
||||
This endpoint removes a Nomad server with given address from the Raft
|
||||
configuration. The return code signifies success or failure.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| -------- | ---------------------------| -------------------------- |
|
||||
| `DELETE` | `/v1/operator/raft/peer` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `NO` | `none` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `address` `(string: <required>)` - Specifies the server to remove as
|
||||
`ip:port`. This may be provided multiple times and is provided as a
|
||||
querystring parameter.
|
||||
|
||||
- `stale` - Specifies if the cluster should respond without an active leader.
|
||||
This is specified as a querystring parameter.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
--request DELETE \
|
||||
https://nomad.rocks/v1/operator/raft/peer?address=1.2.3.4
|
||||
```
|
||||
41
website/source/api/regions.html.md
Normal file
41
website/source/api/regions.html.md
Normal file
@@ -0,0 +1,41 @@
|
||||
---
|
||||
layout: api
|
||||
page_title: Regions - HTTP API
|
||||
sidebar_current: api-regions
|
||||
description: |-
|
||||
The /regions endpoints list all known regions.
|
||||
---
|
||||
|
||||
# Regions HTTP API
|
||||
|
||||
The `/regions` endpoints list all known regions.
|
||||
|
||||
## List Regions
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ---------------------------- | -------------------------- |
|
||||
| `GET` | `/status/regions` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `NO` | `none` |
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/status/regions
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
[
|
||||
"region1",
|
||||
"region2"
|
||||
]
|
||||
```
|
||||
71
website/source/api/status.html.md
Normal file
71
website/source/api/status.html.md
Normal file
@@ -0,0 +1,71 @@
|
||||
---
|
||||
layout: api
|
||||
page_title: Status - HTTP API
|
||||
sidebar_current: api-status
|
||||
description: |-
|
||||
The /status endpoints query the Nomad system status.
|
||||
---
|
||||
|
||||
# Status HTTP API
|
||||
|
||||
The `/status` endpoints query the Nomad system status.
|
||||
|
||||
## Read Leader
|
||||
|
||||
This endpoint returns the address of the current leader in the region.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ---------------------------- | -------------------------- |
|
||||
| `GET` | `/status/leader` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `NO` | `none` |
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/status/leader
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
"127.0.0.1:4647"
|
||||
```
|
||||
|
||||
## List Peers
|
||||
|
||||
This endpoint returns the set of raft peers in the region.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ---------------------------- | -------------------------- |
|
||||
| `GET` | `/status/peers` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `NO` | `none` |
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/status/peers
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
|
||||
```json
|
||||
[
|
||||
"127.0.0.1:4647"
|
||||
]
|
||||
```
|
||||
59
website/source/api/system.html.md
Normal file
59
website/source/api/system.html.md
Normal file
@@ -0,0 +1,59 @@
|
||||
---
|
||||
layout: api
|
||||
page_title: System - HTTP API
|
||||
sidebar_current: api-system
|
||||
description: |-
|
||||
The /system endpoints are used for system maintenance.
|
||||
---
|
||||
|
||||
# System HTTP API
|
||||
|
||||
The `/system` endpoints are used to for system maintenance and should not be
|
||||
necessary for most users.
|
||||
|
||||
## Force GC
|
||||
|
||||
This endpoint initializes a garbage collection of jobs, evals, allocations, and
|
||||
nodes. This is an asynchronous operation.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | ---------------------------| -------------------------- |
|
||||
| `PUT` | `/v1/system/gc` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `NO` | `none` |
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
--request PUT \
|
||||
https://nomad.rocks/v1/system/gc
|
||||
```
|
||||
|
||||
## Reconcile Summaries
|
||||
|
||||
This endpoint reconciles the summaries of all registered jobs.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------ | --------------------------------- | -------------------------- |
|
||||
| `PUT` | `/v1/system/reconcile/summaries` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `NO` | `none` |
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
https://nomad.rocks/v1/system/reconcile/summaries
|
||||
```
|
||||
63
website/source/api/validate.html.md
Normal file
63
website/source/api/validate.html.md
Normal file
@@ -0,0 +1,63 @@
|
||||
---
|
||||
layout: api
|
||||
page_title: Validate - HTTP API
|
||||
sidebar_current: api-validate
|
||||
description: |-
|
||||
The /validate endpoints are used to validate object structs, fields, and
|
||||
types.
|
||||
---
|
||||
|
||||
# Validate HTTP API
|
||||
|
||||
The `/validate` endpoints are used to validate object structs, fields, and
|
||||
types.
|
||||
|
||||
## Validate Job
|
||||
|
||||
This endpoint validates a Nomad job file. The local Nomad agent forwards the
|
||||
request to a server. In the event a server can't be reached the agent verifies
|
||||
the job file locally but skips validating driver configurations.
|
||||
|
||||
~> This endpoint accepts a **JSON job file**, not an HCL job file.
|
||||
|
||||
| Method | Path | Produces |
|
||||
| ------- | ------------------------- | -------------------------- |
|
||||
| `POST` | `/v1/validate/job` | `application/json` |
|
||||
|
||||
The table below shows this endpoint's support for
|
||||
[blocking queries](/api/index.html#blocking-queries) and
|
||||
[required ACLs](/api/index.html#acls).
|
||||
|
||||
| Blocking Queries | ACL Required |
|
||||
| ---------------- | ------------ |
|
||||
| `NO` | `none` |
|
||||
|
||||
### Parameters
|
||||
|
||||
There are no parameters, but the request _body_ contains the entire job file.
|
||||
|
||||
### Sample Payload
|
||||
|
||||
```text
|
||||
(any valid nomad job IN JSON FORMAT)
|
||||
```
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
$ curl \
|
||||
--request POST \
|
||||
--data @my-job.nomad \
|
||||
https://nomad.rocks/v1/validate/job
|
||||
```
|
||||
|
||||
### Sample Response
|
||||
```json
|
||||
{
|
||||
"DriverConfigValidated": true,
|
||||
"ValidationErrors": [
|
||||
"Task group cache validation failed: 1 error(s) occurred:\n\n* Task redis validation failed: 1 error(s) occurred:\n\n* 1 error(s) occurred:\n\n* minimum CPU value is 20; got 1"
|
||||
],
|
||||
"Error": "1 error(s) occurred:\n\n* Task group cache validation failed: 1 error(s) occurred:\n\n* Task redis validation failed: 1 error(s) occurred:\n\n* 1 error(s) occurred:\n\n* minimum CPU value is 20; got 1"
|
||||
}
|
||||
```
|
||||
@@ -8,120 +8,131 @@ description: |-
|
||||
<h1>Community</h1>
|
||||
|
||||
<p>
|
||||
Nomad is an open source project with a growing community. There are
|
||||
active, dedicated users willing to help you through various mediums.
|
||||
Nomad is an open source project with a growing community. There are
|
||||
active, dedicated users willing to help you through various mediums.
|
||||
</p>
|
||||
<p>
|
||||
<strong>Gitter:</strong> <a href="https://gitter.im/hashicorp-nomad/Lobby">Nomad Gitter Room</a>
|
||||
<strong>Gitter:</strong> <a href="https://gitter.im/hashicorp-nomad/Lobby">Nomad Gitter Room</a>
|
||||
</p>
|
||||
<p>
|
||||
<strong>IRC:</strong> Use the <a href="https://irc.gitter.im">Gitter IRC bridge</a>
|
||||
<strong>IRC:</strong> Use the <a href="https://irc.gitter.im">Gitter IRC bridge</a>
|
||||
</p>
|
||||
<p>
|
||||
<strong>Mailing list:</strong>
|
||||
<a href="https://groups.google.com/group/nomad-tool">Nomad Google Group</a>
|
||||
<strong>Mailing list:</strong>
|
||||
<a href="https://groups.google.com/group/nomad-tool">Nomad Google Group</a>
|
||||
</p>
|
||||
<p>
|
||||
<strong>Bug Tracker:</strong>
|
||||
<a href="https://github.com/hashicorp/nomad/issues">Issue tracker
|
||||
on GitHub</a>. Please only use this for reporting bugs. Do not ask
|
||||
for general help here. Use IRC or the mailing list for that.
|
||||
<strong>Bug Tracker:</strong>
|
||||
<a href="https://github.com/hashicorp/nomad/issues">Issue tracker
|
||||
on GitHub</a>. Please only use this for reporting bugs. Do not ask
|
||||
for general help here. Use IRC or the mailing list for that.
|
||||
</p>
|
||||
|
||||
<h2>Community Tools</h2>
|
||||
<p>
|
||||
These Nomad projects are created and managed by the amazing members of the
|
||||
Nomad community:
|
||||
</p>
|
||||
|
||||
<h1>Community Tools</h1>
|
||||
<p>These Nomad projects are created and managed by the amazing members of the Nomad community:</p>
|
||||
<ul>
|
||||
<li><a href="https://github.com/iverberk/nomad-ui">nomad-ui</a> - Nomad UI is a simple to deploy, web based UI for interacting with Nomad</li>
|
||||
<li><a href="https://github.com/iverberk/jenkins-nomad">nomad-jenkins</a> - This project uses Nomad to provision new Jenkins build slaves based on workload</li>
|
||||
<li><a href="https://github.com/iverberk/nomad-ui">nomad-ui</a> - Nomad UI is a simple to deploy, web based UI for interacting with Nomad</li>
|
||||
<li><a href="https://github.com/iverberk/jenkins-nomad">nomad-jenkins</a> - This project uses Nomad to provision new Jenkins build slaves based on workload</li>
|
||||
</ul>
|
||||
|
||||
<h1>People</h1>
|
||||
<em>
|
||||
Want to see your library here?
|
||||
<a href="https://github.com/hashicorp/nomad">Submit a Pull Request</a>.
|
||||
We also have a full list of <a href="/api/libraries-and-sdks.html">HTTP API Libraries</a>.
|
||||
</em>
|
||||
|
||||
|
||||
<h2>People</h2>
|
||||
|
||||
<p>
|
||||
The following people are some of the faces behind Nomad. They each
|
||||
contribute to Nomad in some core way. Over time, faces may appear and
|
||||
disappear from this list as contributors come and go. In addition to
|
||||
the faces below, Nomad is a project by
|
||||
<a href="https://www.hashicorp.com">HashiCorp</a>, so many HashiCorp
|
||||
employees actively contribute to Nomad.
|
||||
The following people are some of the faces behind Nomad. They each contribute
|
||||
to Nomad in some core way. Over time, faces may appear and disappear from this
|
||||
list as contributors come and go. In addition to the faces below, Nomad is a
|
||||
project by <a href="https://www.hashicorp.com">HashiCorp</a>, so many
|
||||
HashiCorp employees actively contribute to Nomad.
|
||||
</p>
|
||||
|
||||
<div class="people">
|
||||
|
||||
<div class="person">
|
||||
<img class="pull-left" src="https://www.gravatar.com/avatar/11ba9630c9136eef9a70d26473d355d5.png?s=125">
|
||||
<div class="bio">
|
||||
<h3>Armon Dadgar (<a href="https://github.com/armon">@armon</a>)</h3>
|
||||
<p>
|
||||
Armon Dadgar is a creator of Nomad. He works on all aspects of Nomad,
|
||||
focusing on core architecture. Armon is also an author or
|
||||
core contributor to:
|
||||
<a href="https://www.vaultproject.io">Vault</a>,
|
||||
<a href="https://www.consul.io">Consul</a>,
|
||||
<a href="https://www.serf.io">Serf</a>,
|
||||
<a href="https://www.terraform.io">Terraform</a>,
|
||||
and <a href="https://github.com/armon/statsite">Statsite</a>.
|
||||
</p>
|
||||
</div>
|
||||
<div class="person">
|
||||
<img class="pull-left" src="https://www.gravatar.com/avatar/11ba9630c9136eef9a70d26473d355d5.png?s=125">
|
||||
<div class="bio">
|
||||
<h3>Armon Dadgar (<a href="https://github.com/armon">@armon</a>)</h3>
|
||||
<p>
|
||||
Armon Dadgar is a creator of Nomad. He works on all aspects of Nomad,
|
||||
focusing on core architecture. Armon is also an author or
|
||||
core contributor to:
|
||||
<a href="https://www.vaultproject.io">Vault</a>,
|
||||
<a href="https://www.consul.io">Consul</a>,
|
||||
<a href="https://www.serf.io">Serf</a>,
|
||||
<a href="https://www.terraform.io">Terraform</a>,
|
||||
and <a href="https://github.com/armon/statsite">Statsite</a>.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="person">
|
||||
<img class="pull-left" src="https://www.gravatar.com/avatar/c55cecc94ea228ef48787481b2355575.png?s=125">
|
||||
<div class="bio">
|
||||
<h3>Ryan Uber (<a href="https://github.com/ryanuber">@ryanuber</a>)</h3>
|
||||
<p>
|
||||
Ryan Uber is a HashiCorp employee and core contributor to Nomad, with a
|
||||
focus on the agent, API client, and command-line interface. Ryan is also
|
||||
an active contributor to both <a href="https://www.consul.io">Consul</a>
|
||||
and <a href="https://www.serf.io">Serf</a>.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="person">
|
||||
<img class="pull-left" src="https://www.gravatar.com/avatar/bc8b1b69d830b589fbe36cfc89e55a78?s=125">
|
||||
<div class="bio">
|
||||
<h3>Alex Dadgar (<a href="https://github.com/dadgar">@dadgar</a>)</h3>
|
||||
<p>
|
||||
Alex is a HashiCorp employee and a core contributor to Nomad. He works on
|
||||
resource isolation and Drivers, among other things.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="person">
|
||||
<img class="pull-left" src="https://www.gravatar.com/avatar/dfb3948650131e4f0385c3328187cfca.png?s=125">
|
||||
<div class="bio">
|
||||
<h3>Clint Shryock (<a href="https://github.com/catsby">@catsby</a>)</h3>
|
||||
<p>
|
||||
Clint Shryock is a HashiCorp employee and core developer on Nomad,
|
||||
mostly focusing on Drivers and Fingerprinters. Mostly. Clint is also
|
||||
a core developer on <a href="https://www.terraform.io">Terraform</a>, and
|
||||
contributes to <a href="https://www.packer.io">Packer</a>.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="person">
|
||||
<img class="pull-left" src="https://avatars3.githubusercontent.com/u/649798?v=3&s=125">
|
||||
<div class="bio">
|
||||
<h3>Chris Bednarski (<a href="https://github.com/cbednarski">@cbednarski</a>)</h3>
|
||||
<p>
|
||||
Chris works at HashiCorp where he helps build Nomad and
|
||||
<a href="https://www.packer.io">Packer</a>, making sure all
|
||||
parts of the stack play nice together. Chris created
|
||||
<a href="https://github.com/cbednarski/hostess">Hostess</a>.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="person">
|
||||
<img class="pull-left" src="https://www.gravatar.com/avatar/c55cecc94ea228ef48787481b2355575.png?s=125">
|
||||
<div class="bio">
|
||||
<h3>Ryan Uber (<a href="https://github.com/ryanuber">@ryanuber</a>)</h3>
|
||||
<p>
|
||||
Ryan Uber is a HashiCorp employee and core contributor to Nomad, with a
|
||||
focus on the agent, API client, and command-line interface. Ryan is also
|
||||
an active contributor to both <a href="https://www.consul.io">Consul</a>
|
||||
and <a href="https://www.serf.io">Serf</a>.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="person">
|
||||
<img class="pull-left" src="https://pbs.twimg.com/profile_images/621788193642278912/CbESlBsV.jpg">
|
||||
<div class="bio">
|
||||
<h3>Jonathan Thomas - JT (<a href="https://github.com/captainill">@captainill</a>)</h3>
|
||||
<p>
|
||||
JT is an employee at Hashicorp where he works on the identity of all the open source projects.
|
||||
JT will take the designs and cut up responsive HTML/CSS for each project.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
<img class="pull-left" src="https://www.gravatar.com/avatar/bc8b1b69d830b589fbe36cfc89e55a78?s=125">
|
||||
<div class="bio">
|
||||
<h3>Alex Dadgar (<a href="https://github.com/dadgar">@dadgar</a>)</h3>
|
||||
<p>
|
||||
Alex is a HashiCorp employee and a core contributor to Nomad. He works on
|
||||
resource isolation and Drivers, among other things.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="clearfix"></div>
|
||||
<div class="person">
|
||||
<img class="pull-left" src="https://www.gravatar.com/avatar/dfb3948650131e4f0385c3328187cfca.png?s=125">
|
||||
<div class="bio">
|
||||
<h3>Clint Shryock (<a href="https://github.com/catsby">@catsby</a>)</h3>
|
||||
<p>
|
||||
Clint Shryock is a HashiCorp employee and core developer on Nomad,
|
||||
mostly focusing on Drivers and Fingerprinters. Mostly. Clint is also
|
||||
a core developer on <a href="https://www.terraform.io">Terraform</a>, and
|
||||
contributes to <a href="https://www.packer.io">Packer</a>.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="person">
|
||||
<img class="pull-left" src="https://avatars3.githubusercontent.com/u/649798?v=3&s=125">
|
||||
<div class="bio">
|
||||
<h3>Chris Bednarski (<a href="https://github.com/cbednarski">@cbednarski</a>)</h3>
|
||||
<p>
|
||||
Chris works at HashiCorp where he helps build Nomad and
|
||||
<a href="https://www.packer.io">Packer</a>, making sure all
|
||||
parts of the stack play nice together. Chris created
|
||||
<a href="https://github.com/cbednarski/hostess">Hostess</a>.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="person">
|
||||
<img class="pull-left" src="https://pbs.twimg.com/profile_images/621788193642278912/CbESlBsV.jpg">
|
||||
<div class="bio">
|
||||
<h3>Jonathan Thomas - JT (<a href="https://github.com/captainill">@captainill</a>)</h3>
|
||||
<p>
|
||||
JT is an employee at Hashicorp where he works on the identity of all the open source projects.
|
||||
JT will take the designs and cut up responsive HTML/CSS for each project.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -100,6 +100,12 @@ client {
|
||||
- `gc_inode_usage_threshold` `(float: 70)` - Specifies the inode usage percent
|
||||
which Nomad tries to maintain by garbage collecting terminal allocations.
|
||||
|
||||
- `gc_max_allocs` `(int: 50)` - Specifies the maximum number of allocations
|
||||
which a client will track before triggering a garbage collection of terminal
|
||||
allocations. This will *not* limit the number of allocations a node can run at
|
||||
a time, however after `gc_max_allocs` every new allocation will cause terminal
|
||||
allocations to be GC'd.
|
||||
|
||||
- `gc_parallel_destroys` `(int: 2)` - Specifies the maximum number of
|
||||
parallel destroys allowed by the garbage collector. This value should be
|
||||
relatively low to avoid high resource usage during garbage collections.
|
||||
|
||||
@@ -18,7 +18,7 @@ nomad inspect [options] <job>
|
||||
|
||||
The `inspect` command requires a single argument, a submitted job's name, and
|
||||
will retrieve the JSON version of the job. This JSON is valid to be submitted to
|
||||
the [Job HTTP API](/docs/http/job.html). This command is useful to inspect what
|
||||
the [Job HTTP API](/api/jobs.html). This command is useful to inspect what
|
||||
version of a job Nomad is running.
|
||||
|
||||
## General Options
|
||||
|
||||
@@ -9,11 +9,11 @@ description: >
|
||||
# Command: `operator raft list-peers`
|
||||
|
||||
The Raft list-peers command is used to display the current Raft peer
|
||||
configuration.
|
||||
configuration.
|
||||
|
||||
See the [Outage Recovery](/guides/outage.html) guide for some examples of how
|
||||
this command is used. For an API to perform these operations programatically,
|
||||
please see the documentation for the [Operator](/docs/http/operator.html)
|
||||
please see the documentation for the [Operator](/api/operator.html)
|
||||
endpoint.
|
||||
|
||||
## Usage
|
||||
@@ -36,7 +36,7 @@ server.
|
||||
## Examples
|
||||
|
||||
An example output with three servers is as follows:
|
||||
|
||||
|
||||
```
|
||||
$ nomad operator raft list-peers
|
||||
Node ID Address State Voter
|
||||
@@ -58,4 +58,3 @@ Raft configuration.
|
||||
|
||||
- `Voter` is "true" or "false", indicating if the server has a vote in the Raft
|
||||
configuration. Future versions of Nomad may add support for non-voting servers.
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ command.
|
||||
|
||||
See the [Outage Recovery](/guides/outage.html) guide for some examples of how
|
||||
this command is used. For an API to perform these operations programatically,
|
||||
please see the documentation for the [Operator](/docs/http/operator.html)
|
||||
please see the documentation for the [Operator](/api/operator.html)
|
||||
endpoint.
|
||||
|
||||
## Usage
|
||||
@@ -38,4 +38,3 @@ nomad operator raft remove-peer [options]
|
||||
|
||||
* `-peer-address`: Remove a Nomad server with given address from the Raft
|
||||
configuration. The format is "IP:port"
|
||||
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
---
|
||||
layout: "http"
|
||||
page_title: "HTTP API: /v1/agent/force-leave"
|
||||
sidebar_current: "docs-http-agent-force-leave"
|
||||
description: |-
|
||||
The '/1/agent/force-leave' endpoint is force a gossip member to leave.
|
||||
---
|
||||
|
||||
# /v1/agent/force-leave
|
||||
|
||||
The `force-leave` endpoint is used to force a member of the gossip pool from
|
||||
the "failed" state into the "left" state. This allows the consensus protocol to
|
||||
remove the peer and stop attempting replication. This is only applicable for
|
||||
servers.
|
||||
|
||||
## PUT / POST
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Force a failed gossip member into the left state.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>PUT or POST</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/agent/force-leave`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">node</span>
|
||||
<span class="param-flags">required</span>
|
||||
The name of the node to force leave.
|
||||
</li>
|
||||
</ul>
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
A `200` status code on success.
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
---
|
||||
layout: "http"
|
||||
page_title: "HTTP API: /v1/agent/join"
|
||||
sidebar_current: "docs-http-agent-join"
|
||||
description: |-
|
||||
The '/1/agent/join' endpoint is used to cluster the Nomad servers.
|
||||
---
|
||||
|
||||
# /v1/agent/join
|
||||
|
||||
The `join` endpoint is used to cluster the Nomad servers using a gossip pool.
|
||||
The servers participate in a peer-to-peer gossip, and `join` is used to introduce
|
||||
a member to the pool. This is only applicable for servers.
|
||||
|
||||
## PUT / POST
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Initiate a join between the agent and target peers.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>PUT or POST</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/agent/join`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">address</span>
|
||||
<span class="param-flags">required</span>
|
||||
The address to join. Can be provided multiple times
|
||||
to attempt joining multiple peers.
|
||||
</li>
|
||||
</ul>
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
{
|
||||
"num_joined": 1,
|
||||
"error": ""
|
||||
}
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
@@ -1,70 +0,0 @@
|
||||
---
|
||||
layout: "http"
|
||||
page_title: "HTTP API: /v1/agent/members"
|
||||
sidebar_current: "docs-http-agent-members"
|
||||
description: |-
|
||||
The '/1/agent/members' endpoint is used to query the gossip peers.
|
||||
---
|
||||
|
||||
# /v1/agent/members
|
||||
|
||||
The `members` endpoint is used to query the agent for the known peers in
|
||||
the gossip pool. This is only applicable to servers.
|
||||
|
||||
## GET
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Lists the known members of the gossip pool.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/agent/members`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
None
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
{
|
||||
"ServerName": "DIPTANUs-MBP",
|
||||
"ServerRegion": "global",
|
||||
"ServerDC": "dc1",
|
||||
"Members": [
|
||||
{
|
||||
"Name": "DIPTANUs-MBP.global",
|
||||
"Addr": "127.0.0.1",
|
||||
"Port": 4648,
|
||||
"Tags": {
|
||||
"mvn": "1",
|
||||
"build": "0.5.0rc2",
|
||||
"port": "4647",
|
||||
"bootstrap": "1",
|
||||
"role": "nomad",
|
||||
"region": "global",
|
||||
"dc": "dc1",
|
||||
"vsn": "1"
|
||||
},
|
||||
"Status": "alive",
|
||||
"ProtocolMin": 1,
|
||||
"ProtocolMax": 4,
|
||||
"ProtocolCur": 2,
|
||||
"DelegateMin": 2,
|
||||
"DelegateMax": 4,
|
||||
"DelegateCur": 4
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
@@ -1,163 +0,0 @@
|
||||
---
|
||||
layout: "http"
|
||||
page_title: "HTTP API: /v1/agent/self"
|
||||
sidebar_current: "docs-http-agent-self"
|
||||
description: |-
|
||||
The '/1/agent/self' endpoint is used to query the state of the agent.
|
||||
---
|
||||
|
||||
# /v1/agent/self
|
||||
|
||||
The `self` endpoint is used to query the state of the target agent.
|
||||
|
||||
## GET
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Query the state of the target agent.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/agent/self`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
None
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
{
|
||||
"config": {
|
||||
"Region": "global",
|
||||
"Datacenter": "dc1",
|
||||
"NodeName": "",
|
||||
"DataDir": "",
|
||||
"LogLevel": "DEBUG",
|
||||
"BindAddr": "127.0.0.1",
|
||||
"EnableDebug": true,
|
||||
"Ports": {
|
||||
"HTTP": 4646,
|
||||
"RPC": 4647,
|
||||
"Serf": 4648
|
||||
},
|
||||
"Addresses": {
|
||||
"HTTP": "",
|
||||
"RPC": "",
|
||||
"Serf": ""
|
||||
},
|
||||
"AdvertiseAddrs": {
|
||||
"RPC": "",
|
||||
"Serf": ""
|
||||
},
|
||||
"Client": {
|
||||
"Enabled": true,
|
||||
"StateDir": "",
|
||||
"AllocDir": "",
|
||||
"Servers": null,
|
||||
"NodeID": "",
|
||||
"NodeClass": "",
|
||||
"Meta": null
|
||||
},
|
||||
"Server": {
|
||||
"Enabled": true,
|
||||
"Bootstrap": false,
|
||||
"BootstrapExpect": 0,
|
||||
"DataDir": "",
|
||||
"ProtocolVersion": 0,
|
||||
"NumSchedulers": 0,
|
||||
"EnabledSchedulers": null
|
||||
},
|
||||
"Telemetry": null,
|
||||
"LeaveOnInt": false,
|
||||
"LeaveOnTerm": false,
|
||||
"EnableSyslog": false,
|
||||
"SyslogFacility": "",
|
||||
"DisableUpdateCheck": false,
|
||||
"DisableAnonymousSignature": true,
|
||||
"Revision": "",
|
||||
"Version": "0.1.0",
|
||||
"VersionPrerelease": "dev",
|
||||
"DevMode": true,
|
||||
"Atlas": null
|
||||
},
|
||||
"member": {
|
||||
"Name": "Armons-MacBook-Air.local.global",
|
||||
"Addr": "127.0.0.1",
|
||||
"Port": 4648,
|
||||
"Tags": {
|
||||
"bootstrap": "1",
|
||||
"build": "0.1.0dev",
|
||||
"dc": "dc1",
|
||||
"port": "4647",
|
||||
"region": "global",
|
||||
"role": "nomad",
|
||||
"vsn": "1"
|
||||
},
|
||||
"Status": "alive",
|
||||
"ProtocolMin": 1,
|
||||
"ProtocolMax": 3,
|
||||
"ProtocolCur": 2,
|
||||
"DelegateMin": 2,
|
||||
"DelegateMax": 4,
|
||||
"DelegateCur": 4
|
||||
},
|
||||
"stats": {
|
||||
"client": {
|
||||
"heartbeat_ttl": "19116443712",
|
||||
"known_servers": "0",
|
||||
"last_heartbeat": "8222075779",
|
||||
"num_allocations": "0"
|
||||
},
|
||||
"nomad": {
|
||||
"bootstrap": "false",
|
||||
"known_regions": "1",
|
||||
"leader": "true",
|
||||
"server": "true"
|
||||
},
|
||||
"raft": {
|
||||
"applied_index": "91",
|
||||
"commit_index": "91",
|
||||
"fsm_pending": "0",
|
||||
"last_contact": "never",
|
||||
"last_log_index": "91",
|
||||
"last_log_term": "1",
|
||||
"last_snapshot_index": "0",
|
||||
"last_snapshot_term": "0",
|
||||
"num_peers": "0",
|
||||
"state": "Leader",
|
||||
"term": "1"
|
||||
},
|
||||
"runtime": {
|
||||
"arch": "amd64",
|
||||
"cpu_count": "4",
|
||||
"goroutines": "58",
|
||||
"kernel.name": "darwin",
|
||||
"max_procs": "1",
|
||||
"version": "go1.4.2"
|
||||
},
|
||||
"serf": {
|
||||
"encrypted": "false",
|
||||
"event_queue": "0",
|
||||
"event_time": "1",
|
||||
"failed": "0",
|
||||
"intent_queue": "0",
|
||||
"left": "0",
|
||||
"member_time": "1",
|
||||
"members": "1",
|
||||
"query_queue": "0",
|
||||
"query_time": "1"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
@@ -1,82 +0,0 @@
|
||||
---
|
||||
layout: "http"
|
||||
page_title: "HTTP API: /v1/agent/servers"
|
||||
sidebar_current: "docs-http-agent-servers"
|
||||
description: |-
|
||||
The '/v1/agent/servers' endpoint is used to query and update the servers list.
|
||||
---
|
||||
|
||||
# /v1/agent/servers
|
||||
|
||||
The `servers` endpoint is used to query an agent in client mode for its list
|
||||
of known servers. Client nodes register themselves with these server addresses
|
||||
so that they may dequeue work. The `servers` endpoint can be used to keep this
|
||||
configuration up to date if there are changes in the cluster.
|
||||
|
||||
## GET
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Lists the known server nodes.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/agent/servers`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
None
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
[
|
||||
"server1.local:4647",
|
||||
"server2.local:4647"
|
||||
]
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
## PUT / POST
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Updates the list of known servers to the provided list. Replaces
|
||||
all previous server addresses with the new list.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>PUT or POST</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/agent/servers`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">address</span>
|
||||
<span class="param-flags">required</span>
|
||||
The address of a server node in host:port format. This
|
||||
parameter may be specified multiple times to configure
|
||||
multiple servers on the client.
|
||||
</li>
|
||||
</ul>
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
A 200 status code on success.
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
|
||||
@@ -1,281 +0,0 @@
|
||||
---
|
||||
layout: "http"
|
||||
page_title: "HTTP API: /v1/allocation"
|
||||
sidebar_current: "docs-http-alloc-"
|
||||
description: |-
|
||||
The '/1/allocation' endpoint is used to query a specific allocation.
|
||||
---
|
||||
|
||||
# /v1/allocation
|
||||
|
||||
The `allocation` endpoint is used to query a specific allocation.
|
||||
By default, the agent's local region is used; another region can
|
||||
be specified using the `?region=` query parameter.
|
||||
|
||||
## GET
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Query a specific allocation.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/allocation/<ID>`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
None
|
||||
</dd>
|
||||
|
||||
<dt>Blocking Queries</dt>
|
||||
<dd>
|
||||
[Supported](/docs/http/index.html#blocking-queries)
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
{
|
||||
"ID": "203266e5-e0d6-9486-5e05-397ed2b184af",
|
||||
"EvalID": "e68125ed-3fba-fb46-46cc-291addbc4455",
|
||||
"Name": "example.cache[0]",
|
||||
"NodeID": "e02b6169-83bd-9df6-69bd-832765f333eb",
|
||||
"JobID": "example",
|
||||
"ModifyIndex": 9,
|
||||
"Resources": {
|
||||
"Networks": [
|
||||
{
|
||||
"DynamicPorts": [
|
||||
{
|
||||
"Value": 20802,
|
||||
"Label": "db"
|
||||
}
|
||||
],
|
||||
"ReservedPorts": null,
|
||||
"MBits": 10,
|
||||
"IP": "",
|
||||
"CIDR": "",
|
||||
"Device": ""
|
||||
}
|
||||
],
|
||||
"IOPS": 0,
|
||||
"DiskMB": 0,
|
||||
"MemoryMB": 256,
|
||||
"CPU": 500
|
||||
},
|
||||
"TaskGroup": "cache",
|
||||
"Job": {
|
||||
"ModifyIndex": 5,
|
||||
"CreateIndex": 5,
|
||||
"StatusDescription": "",
|
||||
"Status": "",
|
||||
"Meta": null,
|
||||
"Update": {
|
||||
"MaxParallel": 1,
|
||||
"Stagger": 1e+10
|
||||
},
|
||||
"TaskGroups": [
|
||||
{
|
||||
"Meta": null,
|
||||
"Tasks": [
|
||||
{
|
||||
"Meta": null,
|
||||
"Resources": {
|
||||
"Networks": [
|
||||
{
|
||||
"DynamicPorts": [
|
||||
{
|
||||
"Value": 20802,
|
||||
"Label": "db"
|
||||
}
|
||||
],
|
||||
"ReservedPorts": null,
|
||||
"MBits": 0,
|
||||
"IP": "127.0.0.1",
|
||||
"CIDR": "",
|
||||
"Device": "lo"
|
||||
}
|
||||
],
|
||||
"IOPS": 0,
|
||||
"DiskMB": 0,
|
||||
"MemoryMB": 256,
|
||||
"CPU": 500
|
||||
},
|
||||
"Constraints": null,
|
||||
"Services": [
|
||||
{
|
||||
"Checks": [
|
||||
{
|
||||
"Timeout": 2e+09,
|
||||
"Interval": 1e+10,
|
||||
"Protocol": "",
|
||||
"Http": "",
|
||||
"Script": "",
|
||||
"Type": "tcp",
|
||||
"Name": "alive",
|
||||
"Id": ""
|
||||
}
|
||||
],
|
||||
"PortLabel": "db",
|
||||
"Tags": [
|
||||
"global",
|
||||
"cache"
|
||||
],
|
||||
"Name": "example-cache-redis",
|
||||
"Id": ""
|
||||
}
|
||||
],
|
||||
"Env": null,
|
||||
"Config": {
|
||||
"port_map": [
|
||||
{
|
||||
"db": 6379
|
||||
}
|
||||
],
|
||||
"image": "redis:latest"
|
||||
},
|
||||
"Driver": "docker",
|
||||
"Name": "redis"
|
||||
}
|
||||
],
|
||||
"RestartPolicy": {
|
||||
"Delay": 2.5e+10,
|
||||
"Interval": 3e+11,
|
||||
"Attempts": 10
|
||||
},
|
||||
"Constraints": null,
|
||||
"Count": 1,
|
||||
"Name": "cache"
|
||||
}
|
||||
],
|
||||
"Region": "global",
|
||||
"ID": "example",
|
||||
"Name": "example",
|
||||
"Type": "service",
|
||||
"Priority": 50,
|
||||
"AllAtOnce": false,
|
||||
"Datacenters": [
|
||||
"dc1"
|
||||
],
|
||||
"Constraints": [
|
||||
{
|
||||
"Operand": "=",
|
||||
"RTarget": "linux",
|
||||
"LTarget": "${attr.kernel.name}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"TaskResources": {
|
||||
"redis": {
|
||||
"Networks": [
|
||||
{
|
||||
"DynamicPorts": [
|
||||
{
|
||||
"Value": 20802,
|
||||
"Label": "db"
|
||||
}
|
||||
],
|
||||
"ReservedPorts": null,
|
||||
"MBits": 0,
|
||||
"IP": "127.0.0.1",
|
||||
"CIDR": "",
|
||||
"Device": "lo"
|
||||
}
|
||||
],
|
||||
"IOPS": 0,
|
||||
"DiskMB": 0,
|
||||
"MemoryMB": 256,
|
||||
"CPU": 500
|
||||
}
|
||||
},
|
||||
"Metrics": {
|
||||
"CoalescedFailures": 0,
|
||||
"AllocationTime": 1590406,
|
||||
"NodesEvaluated": 1,
|
||||
"NodesFiltered": 0,
|
||||
"ClassFiltered": null,
|
||||
"ConstraintFiltered": null,
|
||||
"NodesExhausted": 0,
|
||||
"ClassExhausted": null,
|
||||
"DimensionExhausted": null,
|
||||
"Scores": {
|
||||
"e02b6169-83bd-9df6-69bd-832765f333eb.binpack": 6.133651487695705
|
||||
}
|
||||
},
|
||||
"DesiredStatus": "run",
|
||||
"DesiredDescription": "",
|
||||
"ClientStatus": "running",
|
||||
"ClientDescription": "",
|
||||
"TaskStates": {
|
||||
"redis": {
|
||||
"Events": [
|
||||
{
|
||||
"KillError": "",
|
||||
"Message": "",
|
||||
"Signal": 0,
|
||||
"ExitCode": 0,
|
||||
"DriverError": "",
|
||||
"Time": 1447806038427841000,
|
||||
"Type": "Started"
|
||||
}
|
||||
],
|
||||
"State": "running"
|
||||
"FinishedAt": "0001-01-01T00:00:00Z",
|
||||
"StartedAt": "2017-03-31T22:51:40.248633594Z",
|
||||
"Failed": false,
|
||||
}
|
||||
},
|
||||
"CreateIndex": 7
|
||||
}
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
### Field Reference
|
||||
|
||||
* `TaskStates` - `TaskStates` is a map of tasks to their current state and the
|
||||
latest events that have effected the state.
|
||||
|
||||
A task can be in the following states:
|
||||
|
||||
* `TaskStatePending` - The task is waiting to be run, either for the first
|
||||
time or due to a restart.
|
||||
* `TaskStateRunning` - The task is currently running.
|
||||
* `TaskStateDead` - The task is dead and will not run again.
|
||||
|
||||
Further the state contains the `StartedAt` and `FinishedAt` times of the
|
||||
task. `StartedAt` can be updated multiple times if the task restarts but
|
||||
`FinishedAt` is set only when the task transistions to `TaskStateDead`
|
||||
|
||||
<p>The latest 10 events are stored per task. Each event is timestamped (unix nano-seconds)
|
||||
and has one of the following types:</p>
|
||||
|
||||
* `Setup Failure` - The task could not be started because there was a
|
||||
failure setting up the task prior to it running.
|
||||
* `Driver Failure` - The task could not be started due to a failure in the
|
||||
driver.
|
||||
* `Started` - The task was started; either for the first time or due to a
|
||||
restart.
|
||||
* `Terminated` - The task was started and exited.
|
||||
* `Killing` - The task has been sent the kill signal.
|
||||
* `Killed` - The task was killed by an user.
|
||||
* `Received` - The task has been pulled by the client at the given timestamp.
|
||||
* `Failed Validation` - The task was invalid and as such it didn't run.
|
||||
* `Restarting` - The task terminated and is being restarted.
|
||||
* `Not Restarting` - the task has failed and is not being restarted because it has exceeded its restart policy.
|
||||
* `Downloading Artifacts` - The task is downloading the artifact(s) specified in the task.
|
||||
* `Failed Artifact Download` - Artifact(s) specified in the task failed to download.
|
||||
* `Restart Signaled` - The task was signalled to be restarted.
|
||||
* `Signaling` - The task was is being sent a signal.
|
||||
* `Sibling Task Failed` - A task in the same task group failed.
|
||||
* `Leader Task Dead` - The group's leader task is dead.
|
||||
* `Driver` - A message from the driver.
|
||||
* `Task Setup` - Task setup messages.
|
||||
|
||||
Depending on the type the event will have applicable annotations.
|
||||
@@ -1,86 +0,0 @@
|
||||
---
|
||||
layout: "http"
|
||||
page_title: "HTTP API: /v1/allocations"
|
||||
sidebar_current: "docs-http-allocs"
|
||||
description: |-
|
||||
The '/1/allocations' endpoint is used to list the allocations.
|
||||
---
|
||||
|
||||
# /v1/allocations
|
||||
|
||||
The `allocations` endpoint is used to query the status of allocations.
|
||||
By default, the agent's local region is used; another region can
|
||||
be specified using the `?region=` query parameter.
|
||||
|
||||
## GET
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Lists all the allocations.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/allocations`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">prefix</span>
|
||||
<span class="param-flags">optional</span>
|
||||
<span class="param-flags">even-length</span>
|
||||
Filter allocations based on an identifier prefix.
|
||||
</li>
|
||||
</ul>
|
||||
</dd>
|
||||
|
||||
<dt>Blocking Queries</dt>
|
||||
<dd>
|
||||
[Supported](/docs/http/index.html#blocking-queries)
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
[
|
||||
{
|
||||
"ID": "203266e5-e0d6-9486-5e05-397ed2b184af",
|
||||
"EvalID": "e68125ed-3fba-fb46-46cc-291addbc4455",
|
||||
"Name": "example.cache[0]",
|
||||
"NodeID": "e02b6169-83bd-9df6-69bd-832765f333eb",
|
||||
"JobID": "example",
|
||||
"TaskGroup": "cache",
|
||||
"DesiredStatus": "run",
|
||||
"DesiredDescription": ""
|
||||
"ClientDescription": "",
|
||||
"ClientStatus": "running",
|
||||
"TaskStates": {
|
||||
"redis": {
|
||||
"Events": [
|
||||
{
|
||||
"KillError": "",
|
||||
"Message": "",
|
||||
"Signal": 0,
|
||||
"ExitCode": 0,
|
||||
"DriverError": "",
|
||||
"Time": 1447806038427841000,
|
||||
"Type": "Started"
|
||||
}
|
||||
],
|
||||
"State": "running"
|
||||
}
|
||||
},
|
||||
"CreateIndex": 7,
|
||||
"ModifyIndex": 9,
|
||||
}
|
||||
...
|
||||
]
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
@@ -1,155 +0,0 @@
|
||||
---
|
||||
layout: "http"
|
||||
page_title: "HTTP API: /v1/client/allocation/stats"
|
||||
sidebar_current: "docs-http-client-allocation-stats"
|
||||
description: |-
|
||||
The '/v1/client/allocation/` endpoint is used to query the actual resources
|
||||
consumed by an allocation.
|
||||
---
|
||||
|
||||
# /v1/client/allocation
|
||||
|
||||
The client `allocation` endpoint is used to query the actual resources consumed
|
||||
by an allocation. The API endpoint is hosted by the Nomad client and requests
|
||||
have to be made to the nomad client whose resource usage metrics are of
|
||||
interest.
|
||||
|
||||
## GET
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Query resource usage of an allocation running on a client.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/client/allocation/<ID>/stats`</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
{
|
||||
"ResourceUsage": {
|
||||
"CpuStats": {
|
||||
"Measured": [
|
||||
"System Mode",
|
||||
"User Mode",
|
||||
"Percent"
|
||||
],
|
||||
"Percent": 105.77854560628487,
|
||||
"SystemMode": 6.860067935411291,
|
||||
"ThrottledPeriods": 0,
|
||||
"ThrottledTime": 0,
|
||||
"TotalTicks": 714.0051828424228,
|
||||
"UserMode": 98.9184820888787
|
||||
},
|
||||
"MemoryStats": {
|
||||
"Cache": 0,
|
||||
"KernelMaxUsage": 0,
|
||||
"KernelUsage": 0,
|
||||
"MaxUsage": 0,
|
||||
"Measured": [
|
||||
"RSS",
|
||||
"Swap"
|
||||
],
|
||||
"RSS": 14098432,
|
||||
"Swap": 0
|
||||
}
|
||||
},
|
||||
"Tasks": {
|
||||
"redis": {
|
||||
"Pids": {
|
||||
"27072": {
|
||||
"CpuStats": {
|
||||
"Measured": [
|
||||
"System Mode",
|
||||
"User Mode",
|
||||
"Percent"
|
||||
],
|
||||
"Percent": 6.8607999603563385,
|
||||
"SystemMode": 5.880684245133524,
|
||||
"ThrottledPeriods": 0,
|
||||
"ThrottledTime": 0,
|
||||
"TotalTicks": 0,
|
||||
"UserMode": 0.9801144039714172
|
||||
},
|
||||
"MemoryStats": {
|
||||
"Cache": 0,
|
||||
"KernelMaxUsage": 0,
|
||||
"KernelUsage": 0,
|
||||
"MaxUsage": 0,
|
||||
"Measured": [
|
||||
"RSS",
|
||||
"Swap"
|
||||
],
|
||||
"RSS": 13418496,
|
||||
"Swap": 0
|
||||
}
|
||||
},
|
||||
"27073": {
|
||||
"CpuStats": {
|
||||
"Measured": [
|
||||
"System Mode",
|
||||
"User Mode",
|
||||
"Percent"
|
||||
],
|
||||
"Percent": 98.91774564592852,
|
||||
"SystemMode": 0.9793836902777665,
|
||||
"ThrottledPeriods": 0,
|
||||
"ThrottledTime": 0,
|
||||
"TotalTicks": 0,
|
||||
"UserMode": 97.93836768490729
|
||||
},
|
||||
"MemoryStats": {
|
||||
"Cache": 0,
|
||||
"KernelMaxUsage": 0,
|
||||
"KernelUsage": 0,
|
||||
"MaxUsage": 0,
|
||||
"Measured": [
|
||||
"RSS",
|
||||
"Swap"
|
||||
],
|
||||
"RSS": 679936,
|
||||
"Swap": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"ResourceUsage": {
|
||||
"CpuStats": {
|
||||
"Measured": [
|
||||
"System Mode",
|
||||
"User Mode",
|
||||
"Percent"
|
||||
],
|
||||
"Percent": 105.77854560628487,
|
||||
"SystemMode": 6.860067935411291,
|
||||
"ThrottledPeriods": 0,
|
||||
"ThrottledTime": 0,
|
||||
"TotalTicks": 714.0051828424228,
|
||||
"UserMode": 98.9184820888787
|
||||
},
|
||||
"MemoryStats": {
|
||||
"Cache": 0,
|
||||
"KernelMaxUsage": 0,
|
||||
"KernelUsage": 0,
|
||||
"MaxUsage": 0,
|
||||
"Measured": [
|
||||
"RSS",
|
||||
"Swap"
|
||||
],
|
||||
"RSS": 14098432,
|
||||
"Swap": 0
|
||||
}
|
||||
},
|
||||
"Timestamp": 1465865820750959600
|
||||
}
|
||||
},
|
||||
"Timestamp": 1465865820750959600
|
||||
}
|
||||
```
|
||||
</dd>
|
||||
</dl>
|
||||
@@ -1,370 +0,0 @@
|
||||
---
|
||||
layout: "http"
|
||||
page_title: "HTTP API: /v1/client/fs"
|
||||
sidebar_current: "docs-http-client-fs"
|
||||
description: |-
|
||||
The '/v1/client/fs` endpoints are used to read the contents of an allocation
|
||||
directory.
|
||||
---
|
||||
|
||||
# /v1/client/fs
|
||||
|
||||
The client `fs` endpoints are used to read the contents of files and
|
||||
directories inside an allocation directory. The API endpoints are hosted by the
|
||||
Nomad client and requests have to be made to the Client where the particular
|
||||
allocation was placed.
|
||||
|
||||
## GET
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Read contents of a file in an allocation directory.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/client/fs/cat/<Allocation-ID>`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">path</span>
|
||||
<span class="param-flags">required</span>
|
||||
The path relative to the root of the allocation directory. It
|
||||
defaults to `/`
|
||||
</li>
|
||||
</ul>
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```
|
||||
...
|
||||
07:49 docker/3e8f0f4a67c2[924]: 1:M 22 Jun 21:07:49.110 # Server started, Redis version 3.2.1
|
||||
07:49 docker/3e8f0f4a67c2[924]: 1:M 22 Jun 21:07:49.110 * The server is now ready to accept connections on port 6379
|
||||
...
|
||||
```
|
||||
|
||||
</dd>
|
||||
|
||||
</dl>
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Read contents of a file in an allocation directory at a particular offset.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/client/fs/readat/<Allocation-ID>`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">path</span>
|
||||
<span class="param-flags">required</span>
|
||||
The path relative to the root of the allocation directory. It
|
||||
defaults to `/`
|
||||
</li>
|
||||
</ul>
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">offset</span>
|
||||
<span class="param-flags">required</span>
|
||||
The byte offset from where content is going to be read.
|
||||
</li>
|
||||
</ul>
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">limit</span>
|
||||
<span class="param-flags">required</span>
|
||||
The number of bytes to read from the offset.
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```
|
||||
...
|
||||
07:49 docker/3e8f0f4a67c2[924]: 1:M 22 Jun 21:07:49.110 # Server started, Redis version 3.2.1
|
||||
07:49 docker/3e8f0f4a67c2[924]: 1:M 22 Jun 21:07:49.110 * The server is now ready to accept connections on port 6379
|
||||
...
|
||||
```
|
||||
|
||||
</dd>
|
||||
|
||||
</dl>
|
||||
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Stream contents of a file in an allocation directory.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/client/fs/stream/<Allocation-ID>`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">path</span>
|
||||
<span class="param-flags">required</span>
|
||||
The path relative to the root of the allocation directory. It
|
||||
defaults to `/`
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">offset</span>
|
||||
The offset to start streaming from. Defaults to 0.
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">origin</span>
|
||||
Origin can be either "start" or "end" and applies the offset relative to
|
||||
either the start or end of the file respectively. Defaults to "start".
|
||||
</li>
|
||||
</ul>
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```
|
||||
...
|
||||
{
|
||||
"File":"alloc/logs/redis.stdout.0",
|
||||
"Offset":3604480
|
||||
"Data": "NTMxOTMyCjUzMTkzMwo1MzE5MzQKNTMx..."
|
||||
}
|
||||
{
|
||||
"File":"alloc/logs/redis.stdout.0",
|
||||
"FileEvent": "file deleted"
|
||||
}
|
||||
```
|
||||
|
||||
</dd>
|
||||
|
||||
|
||||
<dt>Field Reference</dt>
|
||||
<dd>
|
||||
The return value is a stream of frames. These frames contain the following
|
||||
fields:
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">Data</span>
|
||||
A base64 encoding of the bytes being streamed.
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">FileEvent</span>
|
||||
An event that could cause a change in the streams position. The possible
|
||||
values are "file deleted" and "file truncated".
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">Offset</span>
|
||||
Offset is the offset into the stream.
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">File</span>
|
||||
The name of the file being streamed.
|
||||
</li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
<a id="logs"></a>
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Stream a task's stdout/stderr logs.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/client/fs/logs/<Allocation-ID>`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">task</span>
|
||||
<span class="param-flags">required</span>
|
||||
The name of the task inside the allocation to stream logs from.
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">follow</span>
|
||||
<span class="param-flags">required</span>
|
||||
A boolean of whether to follow logs.
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">type</span>
|
||||
Either, "stdout" or "stderr", defaults to "stdout" if omitted.
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">offset</span>
|
||||
The offset to start streaming from. Defaults to 0.
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">origin</span>
|
||||
Origin can be either "start" or "end" and applies the offset relative to
|
||||
either the start or end of the logs respectively. Defaults to "start".
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">plain</span>
|
||||
A boolean of whether to return just the plain text without framing.
|
||||
This can be usef when viewing logs in a browser.
|
||||
</li>
|
||||
</ul>
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```
|
||||
...
|
||||
{
|
||||
"File":"alloc/logs/redis.stdout.0",
|
||||
"Offset":3604480
|
||||
"Data": "NTMxOTMyCjUzMTkzMwo1MzE5MzQKNTMx..."
|
||||
}
|
||||
{
|
||||
"File":"alloc/logs/redis.stdout.0",
|
||||
"FileEvent": "file deleted"
|
||||
}
|
||||
```
|
||||
|
||||
</dd>
|
||||
|
||||
|
||||
<dt>Field Reference</dt>
|
||||
<dd>
|
||||
The return value is a stream of frames. These frames contain the following
|
||||
fields:
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">Data</span>
|
||||
A base64 encoding of the bytes being streamed.
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">FileEvent</span>
|
||||
An event that could cause a change in the streams position. The possible
|
||||
values are "file deleted" and "file truncated".
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">Offset</span>
|
||||
Offset is the offset into the stream.
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">File</span>
|
||||
The name of the file being streamed.
|
||||
</li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
List files in an allocation directory.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/client/fs/ls/<Allocation-ID>`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">path</span>
|
||||
<span class="param-flags">required</span>
|
||||
The path relative to the root of the allocation directory. It
|
||||
defaults to `/`, the root of the allocation directory.
|
||||
</li>
|
||||
</ul>
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
[
|
||||
{
|
||||
"Name": "alloc",
|
||||
"IsDir": true,
|
||||
"Size": 4096,
|
||||
"FileMode": "drwxrwxr-x",
|
||||
"ModTime": "2016-03-15T15:40:00.414236712-07:00"
|
||||
},
|
||||
{
|
||||
"Name": "redis",
|
||||
"IsDir": true,
|
||||
"Size": 4096,
|
||||
"FileMode": "drwxrwxr-x",
|
||||
"ModTime": "2016-03-15T15:40:56.810238153-07:00"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Stat a file in an allocation directory.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/client/fs/stat/<Allocation-ID>`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">path</span>
|
||||
<span class="param-flags">required</span>
|
||||
The path of the file relative to the root of the allocation directory.
|
||||
</li>
|
||||
</ul>
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
{
|
||||
"Name": "redis-syslog-collector.out",
|
||||
"IsDir": false,
|
||||
"Size": 96,
|
||||
"FileMode": "-rw-rw-r--",
|
||||
"ModTime": "2016-03-15T15:40:56.822238153-07:00"
|
||||
}
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
@@ -1,88 +0,0 @@
|
||||
---
|
||||
layout: "http"
|
||||
page_title: "HTTP API: /v1/client/stats"
|
||||
sidebar_current: "docs-http-client-stats"
|
||||
description: |-
|
||||
The '/v1/client/stats` endpoint is used to query the actual resources consumed
|
||||
on the node.
|
||||
---
|
||||
|
||||
# /v1/client/stats
|
||||
|
||||
The client `stats` endpoint is used to query the actual resources consumed on a node.
|
||||
The API endpoint is hosted by the Nomad client and requests have to be made to
|
||||
the nomad client whose resource usage metrics are of interest.
|
||||
|
||||
## GET
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Query the actual resource usage of a Nomad client
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/client/stats`</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
{
|
||||
"CPU": [
|
||||
{
|
||||
"CPU": "cpu0",
|
||||
"Idle": 89.2156862745098,
|
||||
"System": 4.901960784313726,
|
||||
"Total": 10.784313725490197,
|
||||
"User": 5.88235294117647
|
||||
},
|
||||
{
|
||||
"CPU": "cpu1",
|
||||
"Idle": 100,
|
||||
"System": 0,
|
||||
"Total": 0,
|
||||
"User": 0
|
||||
},
|
||||
{
|
||||
"CPU": "cpu2",
|
||||
"Idle": 94.05940594059405,
|
||||
"System": 2.9702970297029703,
|
||||
"Total": 5.9405940594059405,
|
||||
"User": 2.9702970297029703
|
||||
},
|
||||
{
|
||||
"CPU": "cpu3",
|
||||
"Idle": 99.00990099009901,
|
||||
"System": 0,
|
||||
"Total": 0.9900990099009901,
|
||||
"User": 0.9900990099009901
|
||||
}
|
||||
],
|
||||
"CPUTicksConsumed": 119.5762958648806,
|
||||
"DiskStats": [
|
||||
{
|
||||
"Available": 16997969920,
|
||||
"Device": "/dev/disk1",
|
||||
"InodesUsedPercent": 85.84777164286838,
|
||||
"Mountpoint": "/",
|
||||
"Size": 120108089344,
|
||||
"Used": 102847975424,
|
||||
"UsedPercent": 85.62951586835626
|
||||
}
|
||||
],
|
||||
"Memory": {
|
||||
"Available": 3724746752,
|
||||
"Free": 2446233600,
|
||||
"Total": 8589934592,
|
||||
"Used": 4865187840
|
||||
},
|
||||
"Timestamp": 1465839167993064200,
|
||||
"Uptime": 101149
|
||||
}
|
||||
```
|
||||
</dd>
|
||||
</dl>
|
||||
@@ -1,131 +0,0 @@
|
||||
---
|
||||
layout: "http"
|
||||
page_title: "HTTP API: /v1/evaluation"
|
||||
sidebar_current: "docs-http-eval-"
|
||||
description: |-
|
||||
The '/v1/evaluation' endpoint is used to query a specific evaluation.
|
||||
---
|
||||
|
||||
# /v1/evaluation
|
||||
|
||||
The `evaluation` endpoint is used to query a specific evaluations.
|
||||
By default, the agent's local region is used; another region can
|
||||
be specified using the `?region=` query parameter.
|
||||
|
||||
## GET
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Query a specific evaluation.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/evaluation/<ID>`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
None
|
||||
</dd>
|
||||
|
||||
<dt>Blocking Queries</dt>
|
||||
<dd>
|
||||
[Supported](/docs/http/index.html#blocking-queries)
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
{
|
||||
"ID": "055c0867-8bf7-5068-b3a3-d64e4e84e702",
|
||||
"Priority": 50,
|
||||
"Type": "service",
|
||||
"TriggeredBy": "job-register",
|
||||
"JobID": "example",
|
||||
"JobModifyIndex": 13,
|
||||
"NodeID": "",
|
||||
"NodeModifyIndex": 0,
|
||||
"Status": "complete",
|
||||
"StatusDescription": "",
|
||||
"Wait": 0,
|
||||
"NextEval": "",
|
||||
"PreviousEval": "",
|
||||
"BlockedEval": "fee40e32-aa0f-bf5e-b2fd-b08350875fdb",
|
||||
"FailedTGAllocs": {
|
||||
"cache": {
|
||||
"NodesEvaluated": 1,
|
||||
"NodesFiltered": 0,
|
||||
"NodesAvailable": {
|
||||
"dc1": 1
|
||||
},
|
||||
"ClassFiltered": null,
|
||||
"ConstraintFiltered": null,
|
||||
"NodesExhausted": 1,
|
||||
"ClassExhausted": null,
|
||||
"DimensionExhausted": {
|
||||
"memory exhausted": 1
|
||||
},
|
||||
"Scores": null,
|
||||
"AllocationTime": 61601,
|
||||
"CoalescedFailures": 2
|
||||
}
|
||||
},
|
||||
"CreateIndex": 14,
|
||||
"ModifyIndex": 17
|
||||
}
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Query the allocations created or modified by an evaluation.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/evaluation/<ID>/allocations`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
None
|
||||
</dd>
|
||||
|
||||
<dt>Blocking Queries</dt>
|
||||
<dd>
|
||||
[Supported](/docs/http/index.html#blocking-queries)
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
[
|
||||
{
|
||||
"ID": "3575ba9d-7a12-0c96-7b28-add168c67984",
|
||||
"EvalID": "151accaa-1ac6-90fe-d427-313e70ccbb88",
|
||||
"Name": "binstore-storagelocker.binsl[0]",
|
||||
"NodeID": "a703c3ca-5ff8-11e5-9213-970ee8879d1b",
|
||||
"JobID": "binstore-storagelocker",
|
||||
"TaskGroup": "binsl",
|
||||
"DesiredStatus": "run",
|
||||
"DesiredDescription": "",
|
||||
"ClientStatus": "running",
|
||||
"ClientDescription": "",
|
||||
"CreateIndex": 16,
|
||||
"ModifyIndex": 16
|
||||
},
|
||||
...
|
||||
]
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
@@ -1,73 +0,0 @@
|
||||
---
|
||||
layout: "http"
|
||||
page_title: "HTTP API: /v1/evaluations"
|
||||
sidebar_current: "docs-http-evals"
|
||||
description: |-
|
||||
The '/1/evaluations' endpoint is used to list the evaluations.
|
||||
---
|
||||
|
||||
# /v1/evaluations
|
||||
|
||||
The `evaluations` endpoint is used to query the status of evaluations.
|
||||
By default, the agent's local region is used; another region can
|
||||
be specified using the `?region=` query parameter.
|
||||
|
||||
## GET
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Lists all the evaluations.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/evaluations`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">prefix</span>
|
||||
<span class="param-flags">optional</span>
|
||||
<span class="param-flags">even-length</span>
|
||||
Filter evaluations based on an identifier prefix.
|
||||
</li>
|
||||
</ul>
|
||||
</dd>
|
||||
|
||||
<dt>Blocking Queries</dt>
|
||||
<dd>
|
||||
[Supported](/docs/http/index.html#blocking-queries)
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
[
|
||||
{
|
||||
"ID": "151accaa-1ac6-90fe-d427-313e70ccbb88",
|
||||
"Priority": 50,
|
||||
"Type": "service",
|
||||
"TriggeredBy": "job-register",
|
||||
"JobID": "binstore-storagelocker",
|
||||
"JobModifyIndex": 14,
|
||||
"NodeID": "",
|
||||
"NodeModifyIndex": 0,
|
||||
"Status": "complete",
|
||||
"StatusDescription": "",
|
||||
"Wait": 0,
|
||||
"NextEval": "",
|
||||
"PreviousEval": "",
|
||||
"CreateIndex": 15,
|
||||
"ModifyIndex": 17
|
||||
},
|
||||
...
|
||||
]
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
@@ -1,98 +0,0 @@
|
||||
---
|
||||
layout: "http"
|
||||
page_title: "HTTP API"
|
||||
sidebar_current: "docs-http-overview"
|
||||
description: |-
|
||||
Nomad has an HTTP API that can be used to programmatically use Nomad.
|
||||
---
|
||||
|
||||
# HTTP API
|
||||
|
||||
The Nomad HTTP API is the primary interface to using Nomad, and is used
|
||||
to query the current state of the system as well as to modify it.
|
||||
The Nomad CLI makes use of the Go HTTP client and invokes the HTTP API.
|
||||
|
||||
All API routes are prefixed with `/v1/`. This documentation is only for the v1 API.
|
||||
|
||||
## Data Model and API Layout
|
||||
|
||||
There are four primary "nouns" in Nomad, these are jobs, nodes, allocations, and evaluations:
|
||||
|
||||
[](/assets/images/nomad-data-model.png)
|
||||
|
||||
Jobs are submitted by users and represent a _desired state_. A job is a declarative description
|
||||
of tasks to run which are bounded by constraints and require resources. Nodes are the servers
|
||||
in the clusters that tasks can be scheduled on. The mapping of tasks in a job to nodes is done
|
||||
using allocations. An allocation is used to declare that a set of tasks in a job should be run
|
||||
on a particular node. Scheduling is the process of determining the appropriate allocations and
|
||||
is done as part of an evaluation.
|
||||
|
||||
The API is modeled closely on the underlying data model. Use the links to the left for
|
||||
documentation about specific endpoints. There are also "Agent" APIs which interact with
|
||||
a specific agent and not the broader cluster used for administration.
|
||||
|
||||
<a name="blocking-queries"></a>
|
||||
## Blocking Queries
|
||||
|
||||
Certain endpoints support a feature called a "blocking query." A blocking query
|
||||
is used to wait for a potential change using long polling.
|
||||
|
||||
Not all endpoints support blocking, but those that do are clearly designated in
|
||||
the documentation. Any endpoint that supports blocking will set the HTTP header
|
||||
`X-Nomad-Index`, a unique identifier representing the current state of the
|
||||
requested resource. On subsequent requests for this resource, the client can set
|
||||
the `index` query string parameter to the value of `X-Nomad-Index`, indicating
|
||||
that the client wishes to wait for any changes subsequent to that index.
|
||||
|
||||
In addition to `index`, endpoints that support blocking will also honor a `wait`
|
||||
parameter specifying a maximum duration for the blocking request. This is limited to
|
||||
10 minutes. If not set, the wait time defaults to 5 minutes. This value can be specified
|
||||
in the form of "10s" or "5m" (i.e., 10 seconds or 5 minutes, respectively).
|
||||
|
||||
A critical note is that the return of a blocking request is **no guarantee** of a change. It
|
||||
is possible that the timeout was reached or that there was an idempotent write that does
|
||||
not affect the result of the query.
|
||||
|
||||
## Consistency Modes
|
||||
|
||||
Most of the read query endpoints support multiple levels of consistency. Since no policy will
|
||||
suit all clients' needs, these consistency modes allow the user to have the ultimate say in
|
||||
how to balance the trade-offs inherent in a distributed system.
|
||||
|
||||
The two read modes are:
|
||||
|
||||
* default - If not specified, the default is strongly consistent in almost all cases. However,
|
||||
there is a small window in which a new leader may be elected during which the old leader may
|
||||
service stale values. The trade-off is fast reads but potentially stale values. The condition
|
||||
resulting in stale reads is hard to trigger, and most clients should not need to worry about
|
||||
this case. Also, note that this race condition only applies to reads, not writes.
|
||||
|
||||
* stale - This mode allows any server to service the read regardless of whether
|
||||
it is the leader. This means reads can be arbitrarily stale; however, results are generally
|
||||
consistent to within 50 milliseconds of the leader. The trade-off is very fast and
|
||||
scalable reads with a higher likelihood of stale values. Since this mode allows reads without
|
||||
a leader, a cluster that is unavailable will still be able to respond to queries.
|
||||
|
||||
To switch these modes, use the `stale` query parameter on request.
|
||||
|
||||
To support bounding the acceptable staleness of data, responses provide the `X-Nomad-LastContact`
|
||||
header containing the time in milliseconds that a server was last contacted by the leader node.
|
||||
The `X-Nomad-KnownLeader` header also indicates if there is a known leader. These can be used
|
||||
by clients to gauge the staleness of a result and take appropriate action.
|
||||
|
||||
## Cross-Region Requests
|
||||
|
||||
By default any request to the HTTP API is assumed to pertain to the region of the machine
|
||||
servicing the request. A target region can be explicitly specified with the `region` query
|
||||
parameter. The request will be transparently forwarded and serviced by a server in the
|
||||
appropriate region.
|
||||
|
||||
## Compressed Responses
|
||||
|
||||
The HTTP API will gzip the response if the HTTP request denotes that the client accepts
|
||||
gzip compression. This is achieved via the standard, `Accept-Encoding: gzip`
|
||||
|
||||
## Formatted JSON Output
|
||||
|
||||
By default, the output of all HTTP API requests is minimized JSON. If the client passes `pretty`
|
||||
on the query string, formatted JSON will be returned.
|
||||
@@ -1,748 +0,0 @@
|
||||
---
|
||||
layout: "http"
|
||||
page_title: "HTTP API: /v1/job"
|
||||
sidebar_current: "docs-http-job-"
|
||||
description: |-
|
||||
The '/1/job' endpoint is used for CRUD on a single job.
|
||||
---
|
||||
|
||||
# /v1/job
|
||||
|
||||
The `job` endpoint is used for CRUD on a single job. By default, the agent's local
|
||||
region is used; another region can be specified using the `?region=` query parameter.
|
||||
|
||||
## GET
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Query a single job for its specification and status.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/job/<ID>`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
None
|
||||
</dd>
|
||||
|
||||
<dt>Blocking Queries</dt>
|
||||
<dd>
|
||||
[Supported](/docs/http/index.html#blocking-queries)
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
{
|
||||
"Region": "global",
|
||||
"ID": "binstore-storagelocker",
|
||||
"Name": "binstore-storagelocker",
|
||||
"Type": "service",
|
||||
"Priority": 50,
|
||||
"AllAtOnce": false,
|
||||
"Datacenters": [
|
||||
"us2",
|
||||
"eu1"
|
||||
],
|
||||
"Constraints": [
|
||||
{
|
||||
"LTarget": "${attr.kernel.os}",
|
||||
"RTarget": "windows",
|
||||
"Operand": "="
|
||||
}
|
||||
],
|
||||
"TaskGroups": [
|
||||
{
|
||||
"Name": "binsl",
|
||||
"Count": 5,
|
||||
"Constraints": [
|
||||
{
|
||||
"LTarget": "${attr.kernel.os}",
|
||||
"RTarget": "linux",
|
||||
"Operand": "="
|
||||
}
|
||||
],
|
||||
"Tasks": [
|
||||
{
|
||||
"Name": "binstore",
|
||||
"Driver": "docker",
|
||||
"Config": {
|
||||
"image": "hashicorp/binstore"
|
||||
},
|
||||
"Constraints": null,
|
||||
"Resources": {
|
||||
"CPU": 500,
|
||||
"MemoryMB": 0,
|
||||
"DiskMB": 0,
|
||||
"IOPS": 0,
|
||||
"Networks": [
|
||||
{
|
||||
"Device": "",
|
||||
"CIDR": "",
|
||||
"IP": "",
|
||||
"MBits": 100,
|
||||
"ReservedPorts": null,
|
||||
"DynamicPorts": null
|
||||
}
|
||||
]
|
||||
},
|
||||
"Meta": null
|
||||
},
|
||||
{
|
||||
"Name": "storagelocker",
|
||||
"Driver": "java",
|
||||
"Config": {
|
||||
"image": "hashicorp/storagelocker"
|
||||
},
|
||||
"Constraints": [
|
||||
{
|
||||
"LTarget": "${attr.kernel.arch}",
|
||||
"RTarget": "amd64",
|
||||
"Operand": "="
|
||||
}
|
||||
],
|
||||
"Resources": {
|
||||
"CPU": 500,
|
||||
"MemoryMB": 0,
|
||||
"DiskMB": 0,
|
||||
"IOPS": 0,
|
||||
"Networks": null
|
||||
},
|
||||
"Meta": null
|
||||
}
|
||||
],
|
||||
"Meta": {
|
||||
"elb_checks": "3",
|
||||
"elb_interval": "10",
|
||||
"elb_mode": "tcp"
|
||||
}
|
||||
}
|
||||
],
|
||||
"Update": {
|
||||
"Stagger": 0,
|
||||
"MaxParallel": 0
|
||||
},
|
||||
"Meta": {
|
||||
"foo": "bar"
|
||||
},
|
||||
"Status": "",
|
||||
"StatusDescription": "",
|
||||
"Version": 3,
|
||||
"CreateIndex": 14,
|
||||
"ModifyIndex": 14
|
||||
}
|
||||
```
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Query all versions of a single job.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/job/<ID>/versions`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
None
|
||||
</dd>
|
||||
|
||||
<dt>Blocking Queries</dt>
|
||||
<dd>
|
||||
[Supported](/docs/http/index.html#blocking-queries)
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
[
|
||||
{
|
||||
"Region": "global",
|
||||
"ID": "binstore-storagelocker",
|
||||
"Version": 2,
|
||||
...
|
||||
},
|
||||
{
|
||||
"Region": "global",
|
||||
"ID": "binstore-storagelocker",
|
||||
"Version": 1,
|
||||
...
|
||||
},
|
||||
{
|
||||
"Region": "global",
|
||||
"ID": "binstore-storagelocker",
|
||||
"Version": 0,
|
||||
...
|
||||
}
|
||||
]
|
||||
```
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Query the allocations belonging to a single job.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/job/<ID>/allocations`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">all</span>
|
||||
<span class="param-flags">optional</span>
|
||||
Returns all allocations of job with the given ID including those from
|
||||
past instances of the job.
|
||||
</li>
|
||||
</ul>
|
||||
</dd>
|
||||
|
||||
<dt>Blocking Queries</dt>
|
||||
<dd>
|
||||
[Supported](/docs/http/index.html#blocking-queries)
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
[
|
||||
{
|
||||
"ID": "3575ba9d-7a12-0c96-7b28-add168c67984",
|
||||
"EvalID": "151accaa-1ac6-90fe-d427-313e70ccbb88",
|
||||
"Name": "binstore-storagelocker.binsl[0]",
|
||||
"NodeID": "a703c3ca-5ff8-11e5-9213-970ee8879d1b",
|
||||
"JobID": "binstore-storagelocker",
|
||||
"TaskGroup": "binsl",
|
||||
"DesiredStatus": "run",
|
||||
"DesiredDescription": "",
|
||||
"ClientStatus": "running",
|
||||
"ClientDescription": "",
|
||||
"CreateIndex": 16,
|
||||
"ModifyIndex": 16
|
||||
},
|
||||
...
|
||||
]
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Query the evaluations belonging to a single job.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/job/<ID>/evaluations`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
None
|
||||
</dd>
|
||||
|
||||
<dt>Blocking Queries</dt>
|
||||
<dd>
|
||||
[Supported](/docs/http/index.html#blocking-queries)
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
[
|
||||
{
|
||||
"ID": "151accaa-1ac6-90fe-d427-313e70ccbb88",
|
||||
"Priority": 50,
|
||||
"Type": "service",
|
||||
"TriggeredBy": "job-register",
|
||||
"JobID": "binstore-storagelocker",
|
||||
"JobModifyIndex": 14,
|
||||
"NodeID": "",
|
||||
"NodeModifyIndex": 0,
|
||||
"Status": "complete",
|
||||
"StatusDescription": "",
|
||||
"Wait": 0,
|
||||
"NextEval": "",
|
||||
"PreviousEval": "",
|
||||
"CreateIndex": 15,
|
||||
"ModifyIndex": 17
|
||||
},
|
||||
...
|
||||
]
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Query the summary of a job.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/job/<ID>/summary`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
None
|
||||
</dd>
|
||||
|
||||
<dt>Blocking Queries</dt>
|
||||
<dd>
|
||||
[Supported](/docs/http/index.html#blocking-queries)
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
{
|
||||
"JobID": "example",
|
||||
"Children": {
|
||||
"Dead": 0,
|
||||
"Running": 7,
|
||||
"Pending": 2
|
||||
},
|
||||
"Summary": {
|
||||
"cache": {
|
||||
"Queued": 0,
|
||||
"Complete": 0,
|
||||
"Failed": 0,
|
||||
"Running": 1,
|
||||
"Starting": 0,
|
||||
"Lost": 0
|
||||
}
|
||||
},
|
||||
"CreateIndex": 6,
|
||||
"ModifyIndex": 10
|
||||
}
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
|
||||
## PUT / POST
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Registers a new job or updates an existing job
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>PUT or POST</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/job/<ID>`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">Job</span>
|
||||
<span class="param-flags">required</span>
|
||||
The JSON definition of the job.
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">EnforceIndex</span>
|
||||
<span class="param-flags">optional</span>
|
||||
If EnforceIndex is set the job will only be registered if the passed
|
||||
JobModifyIndex matches the current job's index. If the index is zero,
|
||||
the register only occurs if the job is new. This paradigm allows
|
||||
check-and-set style job updating.
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">JobModifyIndex</span>
|
||||
<span class="param-flags">optional</span>
|
||||
The JobModifyIndex to enforce the current job is at.
|
||||
</li>
|
||||
</ul>
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
{
|
||||
"EvalID": "d092fdc0-e1fd-2536-67d8-43af8ca798ac",
|
||||
"EvalCreateIndex": 35,
|
||||
"JobModifyIndex": 34,
|
||||
}
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Dispatch a new instance of a parameterized job.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>PUT or POST</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/job/<ID>/dispatch`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">Payload</span>
|
||||
<span class="param-flags">optional</span>
|
||||
A `[]byte` array encoded as a base64 string with a maximum size of 16KiB.
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">Meta</span>
|
||||
<span class="param-flags">optional</span>
|
||||
A `map[string]string` of metadata keys to their values.
|
||||
</li>
|
||||
</ul>
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
{
|
||||
"Index": 13,
|
||||
"JobCreateIndex": 12,
|
||||
"EvalCreateIndex": 13,
|
||||
"EvalID": "e5f55fac-bc69-119d-528a-1fc7ade5e02c",
|
||||
"DispatchedJobID": "example/dispatch-1485408778-81644024"
|
||||
}
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Creates a new evaluation for the given job. This can be used to force
|
||||
run the scheduling logic if necessary.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>PUT or POST</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/job/<ID>/evaluate`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
None
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
{
|
||||
"EvalID": "d092fdc0-e1fd-2536-67d8-43af8ca798ac",
|
||||
"EvalCreateIndex": 35,
|
||||
"JobModifyIndex": 34,
|
||||
}
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Invoke a dry-run of the scheduler for the job.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>PUT or POST</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/job/<ID>/plan`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">Job</span>
|
||||
<span class="param-flags">required</span>
|
||||
The JSON definition of the job.
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">Diff</span>
|
||||
<span class="param-flags">optional</span>
|
||||
Whether the diff structure between the submitted and server side version
|
||||
of the job should be included in the response.
|
||||
</li>
|
||||
</ul>
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
{
|
||||
"Index": 0,
|
||||
"NextPeriodicLaunch": "0001-01-01T00:00:00Z",
|
||||
"Diff": {
|
||||
"Type": "Added",
|
||||
"TaskGroups": [
|
||||
{
|
||||
"Updates": {
|
||||
"create": 1
|
||||
},
|
||||
"Type": "Added",
|
||||
"Tasks": [
|
||||
{
|
||||
"Type": "Added",
|
||||
"Objects": [...],
|
||||
"Name": "redis",
|
||||
"Fields": [
|
||||
{
|
||||
"Type": "Added",
|
||||
"Old": "",
|
||||
"New": "docker",
|
||||
"Name": "Driver",
|
||||
"Annotations": null
|
||||
},
|
||||
{
|
||||
"Type": "Added",
|
||||
"Old": "",
|
||||
"New": "5000000000",
|
||||
"Name": "KillTimeout",
|
||||
"Annotations": null
|
||||
}
|
||||
],
|
||||
"Annotations": [
|
||||
"forces create"
|
||||
]
|
||||
}
|
||||
],
|
||||
"Objects": [...],
|
||||
"Name": "cache",
|
||||
"Fields": [...]
|
||||
}
|
||||
],
|
||||
"Objects": [
|
||||
{
|
||||
"Type": "Added",
|
||||
"Objects": null,
|
||||
"Name": "Datacenters",
|
||||
"Fields": [...]
|
||||
},
|
||||
{
|
||||
"Type": "Added",
|
||||
"Objects": null,
|
||||
"Name": "Constraint",
|
||||
"Fields": [...]
|
||||
},
|
||||
{
|
||||
"Type": "Added",
|
||||
"Objects": null,
|
||||
"Name": "Update",
|
||||
"Fields": [...]
|
||||
}
|
||||
],
|
||||
"ID": "example",
|
||||
"Fields": [...],
|
||||
...
|
||||
]
|
||||
},
|
||||
"CreatedEvals": [
|
||||
{
|
||||
"ModifyIndex": 0,
|
||||
"CreateIndex": 0,
|
||||
"SnapshotIndex": 0,
|
||||
"AnnotatePlan": false,
|
||||
"EscapedComputedClass": false,
|
||||
"NodeModifyIndex": 0,
|
||||
"NodeID": "",
|
||||
"JobModifyIndex": 0,
|
||||
"JobID": "example",
|
||||
"TriggeredBy": "job-register",
|
||||
"Type": "batch",
|
||||
"Priority": 50,
|
||||
"ID": "312e6a6d-8d01-0daf-9105-14919a66dba3",
|
||||
"Status": "blocked",
|
||||
"StatusDescription": "created to place remaining allocations",
|
||||
"Wait": 0,
|
||||
"NextEval": "",
|
||||
"PreviousEval": "80318ae4-7eda-e570-e59d-bc11df134817",
|
||||
"BlockedEval": "",
|
||||
"FailedTGAllocs": null,
|
||||
"ClassEligibility": {
|
||||
"v1:7968290453076422024": true
|
||||
}
|
||||
}
|
||||
],
|
||||
"JobModifyIndex": 0,
|
||||
"FailedTGAllocs": {
|
||||
"cache": {
|
||||
"CoalescedFailures": 3,
|
||||
"AllocationTime": 46415,
|
||||
"Scores": null,
|
||||
"NodesEvaluated": 1,
|
||||
"NodesFiltered": 0,
|
||||
"NodesAvailable": {
|
||||
"dc1": 1
|
||||
},
|
||||
"ClassFiltered": null,
|
||||
"ConstraintFiltered": null,
|
||||
"NodesExhausted": 1,
|
||||
"ClassExhausted": null,
|
||||
"DimensionExhausted": {
|
||||
"cpu exhausted": 1
|
||||
}
|
||||
}
|
||||
},
|
||||
"Annotations": {
|
||||
"DesiredTGUpdates": {
|
||||
"cache": {
|
||||
"DestructiveUpdate": 0,
|
||||
"InPlaceUpdate": 0,
|
||||
"Stop": 0,
|
||||
"Migrate": 0,
|
||||
"Place": 11,
|
||||
"Ignore": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</dd>
|
||||
|
||||
<dt>Field Reference</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">Diff</span>
|
||||
A diff structure between the submitted job and the server side version.
|
||||
The top-level object is a Job Diff which contains Task Group Diffs,
|
||||
which in turn contain Task Diffs. Each of these objects then has Object
|
||||
and Field Diff structures embedded.
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">NextPeriodicLaunch</span>
|
||||
If the job being planned is periodic, this field will include the next
|
||||
launch time for the job.
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">CreatedEvals</span>
|
||||
A set of evaluations that were created as a result of the dry-run. These
|
||||
evaluations can signify a follow-up rolling update evaluation or a
|
||||
blocked evaluation.
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">JobModifyIndex</span>
|
||||
The JobModifyIndex of the server side version of this job.
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">FailedTGAllocs</span>
|
||||
A set of metrics to understand any allocation failures that occurred for
|
||||
the Task Group.
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">Annotations</span>
|
||||
Annotations include the DesiredTGUpdates, which tracks what the
|
||||
scheduler would do given enough resources for each Task Group.
|
||||
</li>
|
||||
</ul>
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Forces a new instance of the periodic job. A new instance will be created
|
||||
even if it violates the job's
|
||||
[`prohibit_overlap`](/docs/job-specification/periodic.html#prohibit_overlap) settings. As
|
||||
such, this should be only used to immediately run a periodic job.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>PUT or POST</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/job/<ID>/periodic/force`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
None
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
{
|
||||
"EvalCreateIndex": 7,
|
||||
"EvalID": "57983ddd-7fcf-3e3a-fd24-f699ccfb36f4"
|
||||
}
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
## DELETE
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Deregisters a job, and stops all allocations part of it.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>DELETE</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/job/<ID>`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
None
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
{
|
||||
"EvalID": "d092fdc0-e1fd-2536-67d8-43af8ca798ac",
|
||||
"EvalCreateIndex": 35,
|
||||
"JobModifyIndex": 34,
|
||||
}
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
@@ -1,104 +0,0 @@
|
||||
---
|
||||
layout: "http"
|
||||
page_title: "HTTP API: /v1/jobs"
|
||||
sidebar_current: "docs-http-jobs"
|
||||
description: |-
|
||||
The '/1/jobs' endpoint is used list jobs and register new ones.
|
||||
---
|
||||
|
||||
# /v1/jobs
|
||||
|
||||
The `jobs` endpoint is used to query the status of existing jobs in Nomad
|
||||
and to register new jobs. By default, the agent's local region is used;
|
||||
another region can be specified using the `?region=` query parameter.
|
||||
|
||||
## GET
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Lists all the jobs registered with Nomad.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/jobs`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">prefix</span>
|
||||
<span class="param-flags">optional</span>
|
||||
Filter jobs based on an identifier prefix.
|
||||
</li>
|
||||
</ul>
|
||||
</dd>
|
||||
|
||||
<dt>Blocking Queries</dt>
|
||||
<dd>
|
||||
[Supported](/docs/http/index.html#blocking-queries)
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
[
|
||||
{
|
||||
"ID": "binstore-storagelocker",
|
||||
"Name": "binstore-storagelocker",
|
||||
"Type": "service",
|
||||
"Priority": 50,
|
||||
"Status": "",
|
||||
"StatusDescription": "",
|
||||
"CreateIndex": 14,
|
||||
"ModifyIndex": 14
|
||||
},
|
||||
...
|
||||
]
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
## PUT / POST
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Registers a new job.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>PUT or POST</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/jobs`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">Job</span>
|
||||
<span class="param-flags">required</span>
|
||||
The JSON definition of the job. The general structure is given
|
||||
by the [job specification](/docs/http/json-jobs.html).
|
||||
</li>
|
||||
</ul>
|
||||
</dd>
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
{
|
||||
"EvalID": "d092fdc0-e1fd-2536-67d8-43af8ca798ac",
|
||||
"EvalCreateIndex": 35,
|
||||
"JobModifyIndex": 34,
|
||||
}
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
@@ -1,392 +0,0 @@
|
||||
---
|
||||
layout: "http"
|
||||
page_title: "HTTP API: /v1/node"
|
||||
sidebar_current: "docs-http-node-"
|
||||
description: |-
|
||||
The '/1/node-' endpoint is used to query a specific client node.
|
||||
---
|
||||
|
||||
# /v1/node
|
||||
|
||||
The `node` endpoint is used to query the a specific client node.
|
||||
By default, the agent's local region is used; another region can
|
||||
be specified using the `?region=` query parameter.
|
||||
|
||||
## GET
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Query the status of a client node registered with Nomad.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/node/<ID>`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
None
|
||||
</dd>
|
||||
|
||||
<dt>Blocking Queries</dt>
|
||||
<dd>
|
||||
[Supported](/docs/http/index.html#blocking-queries)
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
{
|
||||
"ID": "c9972143-861d-46e6-df73-1d8287bc3e66",
|
||||
"Datacenter": "dc1",
|
||||
"Name": "Armons-MacBook-Air.local",
|
||||
"Attributes": {
|
||||
"arch": "amd64",
|
||||
"cpu.frequency": "1300.000000",
|
||||
"cpu.modelname": "Intel(R) Core(TM) i5-4250U CPU @ 1.30GHz",
|
||||
"cpu.numcores": "2",
|
||||
"cpu.totalcompute": "2600.000000",
|
||||
"driver.exec": "1",
|
||||
"driver.java": "1",
|
||||
"driver.java.runtime": "Java(TM) SE Runtime Environment (build 1.8.0_05-b13)",
|
||||
"driver.java.version": "1.8.0_05",
|
||||
"driver.java.vm": "Java HotSpot(TM) 64-Bit Server VM (build 25.5-b02, mixed mode)",
|
||||
"hostname": "Armons-MacBook-Air.local",
|
||||
"kernel.name": "darwin",
|
||||
"kernel.version": "14.4.0",
|
||||
"memory.totalbytes": "8589934592",
|
||||
"network.ip-address": "127.0.0.1",
|
||||
"os.name": "darwin",
|
||||
"os.version": "14.4.0",
|
||||
"storage.bytesfree": "35888713728",
|
||||
"storage.bytestotal": "249821659136",
|
||||
"storage.volume": "/dev/disk1"
|
||||
},
|
||||
"Resources": {
|
||||
"CPU": 2600,
|
||||
"MemoryMB": 8192,
|
||||
"DiskMB": 34226,
|
||||
"IOPS": 0,
|
||||
"Networks": null
|
||||
},
|
||||
"Reserved": {
|
||||
"CPU": 0,
|
||||
"MemoryMB": 0,
|
||||
"DiskMB": 0,
|
||||
"IOPS": 0,
|
||||
"Networks": null
|
||||
},
|
||||
"Links": {},
|
||||
"Meta": {},
|
||||
"NodeClass": "",
|
||||
"Drain": false,
|
||||
"Status": "ready",
|
||||
"StatusDescription": "",
|
||||
"CreateIndex": 3,
|
||||
"ModifyIndex": 4
|
||||
}
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Query the allocations belonging to a single node.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/node/<ID>/allocations`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
None
|
||||
</dd>
|
||||
|
||||
<dt>Blocking Queries</dt>
|
||||
<dd>
|
||||
[Supported](/docs/http/index.html#blocking-queries)
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
[
|
||||
{
|
||||
"ID": "203266e5-e0d6-9486-5e05-397ed2b184af",
|
||||
"EvalID": "e68125ed-3fba-fb46-46cc-291addbc4455",
|
||||
"Name": "example.cache[0]",
|
||||
"NodeID": "e02b6169-83bd-9df6-69bd-832765f333eb",
|
||||
"JobID": "example",
|
||||
"ModifyIndex": 9,
|
||||
"Resources": {
|
||||
"Networks": [
|
||||
{
|
||||
"DynamicPorts": [
|
||||
{
|
||||
"Value": 20802,
|
||||
"Label": "db"
|
||||
}
|
||||
],
|
||||
"ReservedPorts": null,
|
||||
"MBits": 10,
|
||||
"IP": "",
|
||||
"CIDR": "",
|
||||
"Device": ""
|
||||
}
|
||||
],
|
||||
"IOPS": 0,
|
||||
"DiskMB": 0,
|
||||
"MemoryMB": 256,
|
||||
"CPU": 500
|
||||
},
|
||||
"TaskGroup": "cache",
|
||||
"Job": {
|
||||
"ModifyIndex": 5,
|
||||
"CreateIndex": 5,
|
||||
"StatusDescription": "",
|
||||
"Status": "",
|
||||
"Meta": null,
|
||||
"Update": {
|
||||
"MaxParallel": 1,
|
||||
"Stagger": 1e+10
|
||||
},
|
||||
"TaskGroups": [
|
||||
{
|
||||
"Meta": null,
|
||||
"Tasks": [
|
||||
{
|
||||
"Meta": null,
|
||||
"Resources": {
|
||||
"Networks": [
|
||||
{
|
||||
"DynamicPorts": [
|
||||
{
|
||||
"Value": 20802,
|
||||
"Label": "db"
|
||||
}
|
||||
],
|
||||
"ReservedPorts": null,
|
||||
"MBits": 0,
|
||||
"IP": "127.0.0.1",
|
||||
"CIDR": "",
|
||||
"Device": "lo"
|
||||
}
|
||||
],
|
||||
"IOPS": 0,
|
||||
"DiskMB": 0,
|
||||
"MemoryMB": 256,
|
||||
"CPU": 500
|
||||
},
|
||||
"Constraints": null,
|
||||
"Services": [
|
||||
{
|
||||
"Checks": [
|
||||
{
|
||||
"Timeout": 2e+09,
|
||||
"Interval": 1e+10,
|
||||
"Protocol": "",
|
||||
"Http": "",
|
||||
"Script": "",
|
||||
"Type": "tcp",
|
||||
"Name": "alive",
|
||||
"Id": ""
|
||||
}
|
||||
],
|
||||
"PortLabel": "db",
|
||||
"Tags": [
|
||||
"global",
|
||||
"cache"
|
||||
],
|
||||
"Name": "example-cache-redis",
|
||||
"Id": ""
|
||||
}
|
||||
],
|
||||
"Env": null,
|
||||
"Config": {
|
||||
"port_map": [
|
||||
{
|
||||
"db": 6379
|
||||
}
|
||||
],
|
||||
"image": "redis:latest"
|
||||
},
|
||||
"Driver": "docker",
|
||||
"Name": "redis"
|
||||
}
|
||||
],
|
||||
"RestartPolicy": {
|
||||
"Delay": 2.5e+10,
|
||||
"Interval": 3e+11,
|
||||
"Attempts": 10
|
||||
},
|
||||
"Constraints": null,
|
||||
"Count": 1,
|
||||
"Name": "cache"
|
||||
}
|
||||
],
|
||||
"Region": "global",
|
||||
"ID": "example",
|
||||
"Name": "example",
|
||||
"Type": "service",
|
||||
"Priority": 50,
|
||||
"AllAtOnce": false,
|
||||
"Datacenters": [
|
||||
"dc1"
|
||||
],
|
||||
"Constraints": [
|
||||
{
|
||||
"Operand": "=",
|
||||
"RTarget": "linux",
|
||||
"LTarget": "${attr.kernel.name}"
|
||||
}
|
||||
]
|
||||
},
|
||||
"TaskResources": {
|
||||
"redis": {
|
||||
"Networks": [
|
||||
{
|
||||
"DynamicPorts": [
|
||||
{
|
||||
"Value": 20802,
|
||||
"Label": "db"
|
||||
}
|
||||
],
|
||||
"ReservedPorts": null,
|
||||
"MBits": 0,
|
||||
"IP": "127.0.0.1",
|
||||
"CIDR": "",
|
||||
"Device": "lo"
|
||||
}
|
||||
],
|
||||
"IOPS": 0,
|
||||
"DiskMB": 0,
|
||||
"MemoryMB": 256,
|
||||
"CPU": 500
|
||||
}
|
||||
},
|
||||
"Metrics": {
|
||||
"CoalescedFailures": 0,
|
||||
"AllocationTime": 1590406,
|
||||
"NodesEvaluated": 1,
|
||||
"NodesFiltered": 0,
|
||||
"ClassFiltered": null,
|
||||
"ConstraintFiltered": null,
|
||||
"NodesExhausted": 0,
|
||||
"ClassExhausted": null,
|
||||
"DimensionExhausted": null,
|
||||
"Scores": {
|
||||
"e02b6169-83bd-9df6-69bd-832765f333eb.binpack": 6.133651487695705
|
||||
}
|
||||
},
|
||||
"DesiredStatus": "run",
|
||||
"DesiredDescription": "",
|
||||
"ClientStatus": "running",
|
||||
"ClientDescription": "",
|
||||
"TaskStates": {
|
||||
"redis": {
|
||||
"Events": [
|
||||
{
|
||||
"KillError": "",
|
||||
"Message": "",
|
||||
"Signal": 0,
|
||||
"ExitCode": 0,
|
||||
"DriverError": "",
|
||||
"Time": 1447806038427841000,
|
||||
"Type": "Started"
|
||||
}
|
||||
],
|
||||
"State": "running"
|
||||
}
|
||||
},
|
||||
"CreateIndex": 7
|
||||
},
|
||||
...
|
||||
]
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
## PUT / POST
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Creates a new evaluation for the given node. This can be used to force
|
||||
run the scheduling logic if necessary.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>PUT or POST</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/node/<ID>/evaluate`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
None
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
{
|
||||
"EvalIDs": ["d092fdc0-e1fd-2536-67d8-43af8ca798ac"],
|
||||
"EvalCreateIndex": 35,
|
||||
"NodeModifyIndex": 34
|
||||
}
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Toggle the drain mode of the node. When enabled, no further
|
||||
allocations will be assigned and existing allocations will be
|
||||
migrated.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>PUT or POST</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/node/<ID>/drain`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">enable</span>
|
||||
<span class="param-flags">required</span>
|
||||
Boolean value provided as a query parameter to either set
|
||||
enabled to true or false.
|
||||
</li>
|
||||
</ul>
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
{
|
||||
"EvalID": "d092fdc0-e1fd-2536-67d8-43af8ca798ac",
|
||||
"EvalCreateIndex": 35,
|
||||
"NodeModifyIndex": 34
|
||||
}
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
@@ -1,66 +0,0 @@
|
||||
---
|
||||
layout: "http"
|
||||
page_title: "HTTP API: /v1/nodes"
|
||||
sidebar_current: "docs-http-nodes"
|
||||
description: |-
|
||||
The '/1/nodes' endpoint is used to list the client nodes.
|
||||
---
|
||||
|
||||
# /v1/nodes
|
||||
|
||||
The `nodes` endpoint is used to query the status of client nodes.
|
||||
By default, the agent's local region is used; another region can
|
||||
be specified using the `?region=` query parameter.
|
||||
|
||||
## GET
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Lists all the client nodes registered with Nomad.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/nodes`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">prefix</span>
|
||||
<span class="param-flags">optional</span>
|
||||
Filter nodes based on an identifier prefix.
|
||||
</li>
|
||||
</ul>
|
||||
</dd>
|
||||
|
||||
<dt>Blocking Queries</dt>
|
||||
<dd>
|
||||
[Supported](/docs/http/index.html#blocking-queries)
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
[
|
||||
{
|
||||
"ID": "c9972143-861d-46e6-df73-1d8287bc3e66",
|
||||
"Datacenter": "dc1",
|
||||
"Name": "web-8e40e308",
|
||||
"NodeClass": "",
|
||||
"Drain": false,
|
||||
"Status": "ready",
|
||||
"StatusDescription": "",
|
||||
"CreateIndex": 3,
|
||||
"ModifyIndex": 4
|
||||
},
|
||||
...
|
||||
]
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
@@ -1,166 +0,0 @@
|
||||
---
|
||||
layout: "http"
|
||||
page_title: "HTTP API: /v1/operator/"
|
||||
sidebar_current: "docs-http-operator"
|
||||
description: >
|
||||
The '/v1/operator/' endpoints provides cluster-level tools for Nomad
|
||||
operators.
|
||||
---
|
||||
|
||||
# /v1/operator
|
||||
|
||||
The Operator endpoint provides cluster-level tools for Nomad operators, such
|
||||
as interacting with the Raft subsystem. This was added in Nomad 0.5.5
|
||||
|
||||
~> Use this interface with extreme caution, as improper use could lead to a
|
||||
Nomad outage and even loss of data.
|
||||
|
||||
See the [Outage Recovery](/guides/outage.html) guide for some examples of how
|
||||
these capabilities are used. For a CLI to perform these operations manually, please
|
||||
see the documentation for the [`nomad operator`](/docs/commands/operator-index.html)
|
||||
command.
|
||||
|
||||
By default, the agent's local region is used; another region can be specified
|
||||
using the `?region=` query parameter.
|
||||
|
||||
## GET
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Query the status of a client node registered with Nomad.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/operator/raft/configuration`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">stale</span>
|
||||
<span class="param-flags">optional</span>
|
||||
If the cluster doesn't currently have a leader an error will be
|
||||
returned. You can use the `?stale` query parameter to read the Raft
|
||||
configuration from any of the Nomad servers.
|
||||
</li>
|
||||
</ul>
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
{
|
||||
"Servers": [
|
||||
{
|
||||
"ID": "127.0.0.1:4647",
|
||||
"Node": "alice",
|
||||
"Address": "127.0.0.1:4647",
|
||||
"Leader": true,
|
||||
"Voter": true
|
||||
},
|
||||
{
|
||||
"ID": "127.0.0.2:4647",
|
||||
"Node": "bob",
|
||||
"Address": "127.0.0.2:4647",
|
||||
"Leader": false,
|
||||
"Voter": true
|
||||
},
|
||||
{
|
||||
"ID": "127.0.0.3:4647",
|
||||
"Node": "carol",
|
||||
"Address": "127.0.0.3:4647",
|
||||
"Leader": false,
|
||||
"Voter": true
|
||||
}
|
||||
],
|
||||
"Index": 22
|
||||
}
|
||||
```
|
||||
|
||||
</dd>
|
||||
|
||||
<dt>Field Reference</dt>
|
||||
<dd>
|
||||
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">Servers</span>
|
||||
The returned `Servers` array has information about the servers in the Raft
|
||||
peer configuration. See the `Server` block for a description of its fields:
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">Index</span>
|
||||
The `Index` value is the Raft corresponding to this configuration. The
|
||||
latest configuration may not yet be committed if changes are in flight.
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
`Server` Fields:
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">ID</span>
|
||||
`ID` is the ID of the server. This is the same as the `Address` but may
|
||||
be upgraded to a GUID in a future version of Nomad.
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">Node</span>
|
||||
`Node` is the node name of the server, as known to Nomad, or "(unknown)" if
|
||||
the node is stale and not known.
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">Address</span>
|
||||
`Address` is the IP:port for the server.
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">Leader</span>
|
||||
`Leader` is either "true" or "false" depending on the server's role in the
|
||||
Raft configuration.
|
||||
</li>
|
||||
<li>
|
||||
<span class="param">Voter</span>
|
||||
`Voter` is "true" or "false", indicating if the server has a vote in the Raft
|
||||
configuration. Future versions of Nomad may add support for non-voting servers.
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
|
||||
## DELETE
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Remove the Nomad server with given address from the Raft configuration. The
|
||||
return code signifies success or failure.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>DELETE</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/operator/raft/peer`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
<ul>
|
||||
<li>
|
||||
<span class="param">address</span>
|
||||
<span class="param-flags">required</span>
|
||||
The address specifies the server to remove and is given as an `IP:port`.
|
||||
The port number is usually 4647, unless configured otherwise. Nothing is
|
||||
required in the body of the request.
|
||||
</li>
|
||||
</ul>
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>None</dd>
|
||||
|
||||
</dl>
|
||||
@@ -1,38 +0,0 @@
|
||||
---
|
||||
layout: "http"
|
||||
page_title: "HTTP API: /v1/regions"
|
||||
sidebar_current: "docs-http-regions"
|
||||
description: >
|
||||
The '/v1/regions' endpoint lists the known cluster regions.
|
||||
---
|
||||
|
||||
# /v1/regions
|
||||
|
||||
## GET
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Returns the known region names.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/regions`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
None
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
["region1","region2"]
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
@@ -1,77 +0,0 @@
|
||||
---
|
||||
layout: "http"
|
||||
page_title: "HTTP API: /v1/status/"
|
||||
sidebar_current: "docs-http-status"
|
||||
description: |-
|
||||
The '/1/status/' endpoints are used to query the system status.
|
||||
---
|
||||
|
||||
# /v1/status/leader
|
||||
|
||||
By default, the agent's local region is used; another region can
|
||||
be specified using the `?region=` query parameter.
|
||||
|
||||
## GET
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Returns the address of the current leader in the region.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/status/leader`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
None
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
"127.0.0.1:4647"
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
# /v1/status/peers
|
||||
|
||||
## GET
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Returns the set of raft peers in the region.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>GET</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/status/peers`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
None
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
[
|
||||
"127.0.0.1:4647",
|
||||
...
|
||||
]
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
---
|
||||
layout: "http"
|
||||
page_title: "HTTP API: /v1/system/"
|
||||
sidebar_current: "docs-http-system"
|
||||
description: |-
|
||||
The '/1/system/' endpoints are used to for system maintenance.
|
||||
---
|
||||
|
||||
# /v1/system
|
||||
|
||||
The `system` endpoint is used to for system maintenance and should not be
|
||||
necessary for most users. By default, the agent's local region is used; another
|
||||
region can be specified using the `?region=` query parameter.
|
||||
|
||||
## PUT
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Initiate garbage collection of jobs, evals, allocations and nodes.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>PUT</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/system/gc`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
None
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
None
|
||||
</dd>
|
||||
</dl>
|
||||
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Reconcile the summaries of all the registered jobs based.
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>PUT</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/system/reconcile/summaries`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
None
|
||||
</dd>
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
None
|
||||
</dd>
|
||||
</dl>
|
||||
@@ -1,209 +0,0 @@
|
||||
---
|
||||
layout: "http"
|
||||
page_title: "HTTP API: /v1/validate/"
|
||||
sidebar_current: "docs-http-validate"
|
||||
description: |-
|
||||
The '/1/validate/' endpoints are used to for validation of objects.
|
||||
---
|
||||
|
||||
# /v1/validate/job
|
||||
|
||||
The `/validate/job` endpoint is to validate a Nomad job file. The local Nomad
|
||||
agent forwards the request to a server. In the event a server can't be
|
||||
reached the agent verifies the job file locally but skips validating driver
|
||||
configurations.
|
||||
|
||||
## POST
|
||||
|
||||
<dl>
|
||||
<dt>Description</dt>
|
||||
<dd>
|
||||
Validates a Nomad job file
|
||||
</dd>
|
||||
|
||||
<dt>Method</dt>
|
||||
<dd>POST</dd>
|
||||
|
||||
<dt>URL</dt>
|
||||
<dd>`/v1/validate/job`</dd>
|
||||
|
||||
<dt>Parameters</dt>
|
||||
<dd>
|
||||
None
|
||||
</dd>
|
||||
<dt>Body</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
{
|
||||
"Job": {
|
||||
"Region": "global",
|
||||
"ID": "example",
|
||||
"ParentID": null,
|
||||
"Name": "example",
|
||||
"Type": "service",
|
||||
"Priority": 50,
|
||||
"AllAtOnce": null,
|
||||
"Datacenters": [
|
||||
"dc1"
|
||||
],
|
||||
"Constraints": null,
|
||||
"TaskGroups": [
|
||||
{
|
||||
"Name": "cache",
|
||||
"Count": 1,
|
||||
"Constraints": null,
|
||||
"Tasks": [
|
||||
{
|
||||
"Name": "mongo",
|
||||
"Driver": "exec",
|
||||
"User": "",
|
||||
"Config": {
|
||||
"args": [
|
||||
"-l",
|
||||
"127.0.0.1",
|
||||
"0"
|
||||
],
|
||||
"command": "/bin/nc"
|
||||
},
|
||||
"Constraints": null,
|
||||
"Env": null,
|
||||
"Services": null,
|
||||
"Resources": {
|
||||
"CPU": 1,
|
||||
"MemoryMB": 10,
|
||||
"DiskMB": null,
|
||||
"IOPS": 0,
|
||||
"Networks": [
|
||||
{
|
||||
"Public": false,
|
||||
"CIDR": "",
|
||||
"ReservedPorts": null,
|
||||
"DynamicPorts": [
|
||||
{
|
||||
"Label": "db111",
|
||||
"Value": 0
|
||||
},
|
||||
{
|
||||
"Label": "http111",
|
||||
"Value": 0
|
||||
}
|
||||
],
|
||||
"IP": "",
|
||||
"MBits": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
"Meta": null,
|
||||
"KillTimeout": null,
|
||||
"LogConfig": {
|
||||
"MaxFiles": 10,
|
||||
"MaxFileSizeMB": 10
|
||||
},
|
||||
"Artifacts": null,
|
||||
"Vault": null,
|
||||
"Templates": null,
|
||||
"DispatchPayload": null
|
||||
},
|
||||
{
|
||||
"Name": "redis",
|
||||
"Driver": "raw_exec",
|
||||
"User": "",
|
||||
"Config": {
|
||||
"args": [
|
||||
"-l",
|
||||
"127.0.0.1",
|
||||
"0"
|
||||
],
|
||||
"command": "/usr/bin/nc"
|
||||
},
|
||||
"Constraints": null,
|
||||
"Env": null,
|
||||
"Services": null,
|
||||
"Resources": {
|
||||
"CPU": 1,
|
||||
"MemoryMB": 10,
|
||||
"DiskMB": null,
|
||||
"IOPS": 0,
|
||||
"Networks": [
|
||||
{
|
||||
"Public": false,
|
||||
"CIDR": "",
|
||||
"ReservedPorts": null,
|
||||
"DynamicPorts": [
|
||||
{
|
||||
"Label": "db",
|
||||
"Value": 0
|
||||
},
|
||||
{
|
||||
"Label": "http",
|
||||
"Value": 0
|
||||
}
|
||||
],
|
||||
"IP": "",
|
||||
"MBits": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
"Meta": null,
|
||||
"KillTimeout": null,
|
||||
"LogConfig": {
|
||||
"MaxFiles": 10,
|
||||
"MaxFileSizeMB": 10
|
||||
},
|
||||
"Artifacts": null,
|
||||
"Vault": null,
|
||||
"Templates": null,
|
||||
"DispatchPayload": null
|
||||
}
|
||||
],
|
||||
"RestartPolicy": {
|
||||
"Interval": 300000000000,
|
||||
"Attempts": 10,
|
||||
"Delay": 25000000000,
|
||||
"Mode": "delay"
|
||||
},
|
||||
"EphemeralDisk": {
|
||||
"Sticky": null,
|
||||
"Migrate": null,
|
||||
"SizeMB": 300
|
||||
},
|
||||
"Meta": null
|
||||
}
|
||||
],
|
||||
"Update": {
|
||||
"Stagger": 10000000000,
|
||||
"MaxParallel": 0
|
||||
},
|
||||
"Periodic": null,
|
||||
"ParameterizedJob": null,
|
||||
"Payload": null,
|
||||
"Meta": null,
|
||||
"VaultToken": null,
|
||||
"Status": null,
|
||||
"StatusDescription": null,
|
||||
"CreateIndex": null,
|
||||
"ModifyIndex": null,
|
||||
"JobModifyIndex": null
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</dd>
|
||||
|
||||
|
||||
<dt>Returns</dt>
|
||||
<dd>
|
||||
|
||||
```javascript
|
||||
{
|
||||
"DriverConfigValidated": true,
|
||||
"ValidationErrors": [
|
||||
"Task group cache validation failed: 1 error(s) occurred:\n\n* Task redis validation failed: 1 error(s) occurred:\n\n* 1 error(s) occurred:\n\n* minimum CPU value is 20; got 1"
|
||||
],
|
||||
"Error": "1 error(s) occurred:\n\n* Task group cache validation failed: 1 error(s) occurred:\n\n* Task redis validation failed: 1 error(s) occurred:\n\n* 1 error(s) occurred:\n\n* minimum CPU value is 20; got 1"
|
||||
}
|
||||
```
|
||||
|
||||
</dd>
|
||||
</dl>
|
||||
@@ -203,7 +203,7 @@ the key name, the second part is the key's value.
|
||||
## Client Configuration
|
||||
|
||||
The `template` block has the following [client configuration
|
||||
options](/docs/agent/config.html#options):
|
||||
options](/docs/agent/configuration/client.html#options):
|
||||
|
||||
* `template.allow_host_source` - Allows templates to specify their source
|
||||
template as an absolute path referencing host directories. Defaults to `true`.
|
||||
|
||||
@@ -16,7 +16,7 @@ as simple as possible, Nomad provides:
|
||||
|
||||
- Job specification for [log rotation](/docs/job-specification/logs.html)
|
||||
- CLI command for [log viewing](/docs/commands/logs.html)
|
||||
- API for programatic [log access](/docs/http/client-fs.html#logs)
|
||||
- API for programatic [log access](/api/client.html#stream-logs)
|
||||
|
||||
This section will utilize the job named "docs" from the [previous
|
||||
sections](/docs/operating-a-job/submitting-jobs.html), but these operations
|
||||
|
||||
@@ -94,4 +94,4 @@ documentation](/docs/agent/telemetry.html).
|
||||
|
||||
For more advanced use cases, the resource usage data is also accessible via the
|
||||
client's HTTP API. See the documentation of the Client's [allocation HTTP
|
||||
API](/docs/http/client-allocation-stats.html).
|
||||
API](/api/client.html).
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
<li><a href="/intro/index.html">Intro</a></li>
|
||||
<li><a href="/guides/index.html">Guides</a></li>
|
||||
<li><a href="/docs/index.html">Docs</a></li>
|
||||
<li><a href="/api/index.html">API</a></li>
|
||||
<li><a href="/community.html">Community</a></li>
|
||||
<li><a href="/security.html">Security</a></li>
|
||||
<li><a href="/assets/files/press-kit.zip">Press Kit</a></li>
|
||||
|
||||
65
website/source/layouts/api.erb
Normal file
65
website/source/layouts/api.erb
Normal file
@@ -0,0 +1,65 @@
|
||||
<% wrap_layout :inner do %>
|
||||
<% content_for :sidebar do %>
|
||||
<ul class="nav docs-sidenav">
|
||||
<li<%= sidebar_current("api-overview") %>>
|
||||
<a href="/api/index.html">Overview</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("api-libraries-and-sdks") %>>
|
||||
<a href="/api/libraries-and-sdks.html">Libraries & SDKs</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("api-json-jobs") %>>
|
||||
<a href="/api/json-jobs.html">JSON Jobs</a>
|
||||
</li>
|
||||
|
||||
<hr>
|
||||
|
||||
<li<%= sidebar_current("api-agent") %>>
|
||||
<a href="/api/agent.html">Agent</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("api-allocations") %>>
|
||||
<a href="/api/allocations.html">Allocations</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("api-client") %>>
|
||||
<a href="/api/client.html">Client</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("api-evaluations") %>>
|
||||
<a href="/api/evaluations.html">Evaluations</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("api-jobs") %>>
|
||||
<a href="/api/jobs.html">Jobs</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("api-nodes") %>>
|
||||
<a href="/api/nodes.html">Nodes</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("api-operator") %>>
|
||||
<a href="/api/operator.html">Operator</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("api-regions") %>>
|
||||
<a href="/api/regions.html">Regions</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("api-status") %>>
|
||||
<a href="/api/status.html">Status</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("api-system") %>>
|
||||
<a href="/api/system.html">System</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("api-validate") %>>
|
||||
<a href="/api/validate.html">Validate</a>
|
||||
</li>
|
||||
</ul>
|
||||
<% end %>
|
||||
|
||||
<%= yield %>
|
||||
<% end %>
|
||||
@@ -291,10 +291,6 @@
|
||||
|
||||
<hr>
|
||||
|
||||
<li<%= sidebar_current("docs-http") %>>
|
||||
<a href="/docs/http/index.html">HTTP API</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-internal") %>>
|
||||
<a href="/docs/internals/index.html">Internals</a>
|
||||
<ul class="nav">
|
||||
|
||||
@@ -1,130 +0,0 @@
|
||||
<% wrap_layout :inner do %>
|
||||
<% content_for :sidebar do %>
|
||||
<ul class="nav docs-sidenav">
|
||||
<li<%= sidebar_current("docs-home") %>>
|
||||
<a href="/docs/index.html">Documentation Home</a>
|
||||
</li>
|
||||
|
||||
<hr>
|
||||
|
||||
<li<%= sidebar_current("docs-http-overview") %>>
|
||||
<a href="/docs/http/index.html">Overview</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-http-json-jobs") %>>
|
||||
<a href="/docs/http/json-jobs.html">JSON Jobs</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-http-job") %>>
|
||||
<a href="#">Jobs</a>
|
||||
<ul class="nav nav-visible">
|
||||
<li<%= sidebar_current("docs-http-jobs") %>>
|
||||
<a href="/docs/http/jobs.html">/v1/jobs</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-http-job-") %>>
|
||||
<a href="/docs/http/job.html">/v1/job</a>
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-http-node") %>>
|
||||
<a href="#">Nodes</a>
|
||||
<ul class="nav nav-visible">
|
||||
<li<%= sidebar_current("docs-http-nodes") %>>
|
||||
<a href="/docs/http/nodes.html">/v1/nodes</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-http-node-") %>>
|
||||
<a href="/docs/http/node.html">/v1/node</a>
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-http-alloc") %>>
|
||||
<a href="#">Allocations</a>
|
||||
<ul class="nav nav-visible">
|
||||
<li<%= sidebar_current("docs-http-allocs") %>>
|
||||
<a href="/docs/http/allocs.html">/v1/allocations</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-http-alloc-") %>>
|
||||
<a href="/docs/http/alloc.html">/v1/allocation</a>
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-http-eval") %>>
|
||||
<a href="#">Evaluations</a>
|
||||
<ul class="nav nav-visible">
|
||||
<li<%= sidebar_current("docs-http-evals") %>>
|
||||
<a href="/docs/http/evals.html">/v1/evaluations</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-http-eval-") %>>
|
||||
<a href="/docs/http/eval.html">/v1/evaluation</a>
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-http-agent") %>>
|
||||
<a href="#">Agent</a>
|
||||
<ul class="nav nav-visible">
|
||||
<li<%= sidebar_current("docs-http-agent-self") %>>
|
||||
<a href="/docs/http/agent-self.html">/v1/agent/self</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-http-agent-join") %>>
|
||||
<a href="/docs/http/agent-join.html">/v1/agent/join</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-http-agent-members") %>>
|
||||
<a href="/docs/http/agent-members.html">/v1/agent/members</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-http-agent-force-leave") %>>
|
||||
<a href="/docs/http/agent-force-leave.html">/v1/agent/force-leave</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-http-agent-servers") %>>
|
||||
<a href="/docs/http/agent-servers.html">/v1/agent/servers</a>
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-http-client") %>>
|
||||
<a href="#">Client</a>
|
||||
<ul class="nav nav-visible">
|
||||
<li<%= sidebar_current("docs-http-client-fs") %>>
|
||||
<a href="/docs/http/client-fs.html">/v1/client/fs</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-http-client-stats") %>>
|
||||
<a href="/docs/http/client-stats.html">/v1/client/stats</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-http-client-allocation-stats") %>>
|
||||
<a href="/docs/http/client-allocation-stats.html">/v1/client/allocation</a>
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-http-regions") %>>
|
||||
<a href="/docs/http/regions.html">Regions</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-http-status") %>>
|
||||
<a href="/docs/http/status.html">Status</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-http-operator") %>>
|
||||
<a href="/docs/http/operator.html">Operator</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-http-system") %>>
|
||||
<a href="/docs/http/system.html">System</a>
|
||||
</li>
|
||||
</ul>
|
||||
<% end %>
|
||||
|
||||
<%= yield %>
|
||||
<% end %>
|
||||
@@ -1,11 +1,11 @@
|
||||
<% wrap_layout :layout do %>
|
||||
<div class="container">
|
||||
<div class="row">
|
||||
<div id="docs-sidebar" class="col-sm-3 col-md-3 col-xs-12 hidden-print" role="complementary">
|
||||
<div id="docs-sidebar" class="col-sm-4 col-md-3 col-xs-12 hidden-print" role="complementary">
|
||||
<%= yield_content :sidebar %>
|
||||
</div>
|
||||
|
||||
<div id="inner" class="col-sm-9 col-md-9 col-xs-12" role="main">
|
||||
<div id="inner" class="col-sm-8 col-md-9 col-xs-12" role="main">
|
||||
<%= yield %>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -75,6 +75,7 @@
|
||||
<li><a href="/intro/index.html">Intro</a></li>
|
||||
<li><a href="/guides/index.html">Guides</a></li>
|
||||
<li><a href="/docs/index.html">Docs</a></li>
|
||||
<li><a href="/api/index.html">API</a></li>
|
||||
<li><a href="/community.html">Community</a></li>
|
||||
<li>
|
||||
<a href="/downloads.html">
|
||||
@@ -106,6 +107,7 @@
|
||||
<li><a href="/intro/index.html">Intro</a></li>
|
||||
<li><a href="/guides/index.html">Guides</a></li>
|
||||
<li><a href="/docs/index.html">Docs</a></li>
|
||||
<li><a href="/api/index.html">API</a></li>
|
||||
<li><a href="/community.html">Community</a></li>
|
||||
<li><a href="/security.html">Security</a></li>
|
||||
<li><a href="/assets/files/press-kit.zip">Press Kit</a></li>
|
||||
|
||||
Reference in New Issue
Block a user