mirror of
https://github.com/kemko/nomad.git
synced 2026-01-08 11:25:41 +03:00
Merge remote-tracking branch 'origin/master' into f-ui/alloc-fs
* origin/master: (32 commits) Added additional test cases and fixed go test case update changelog Add Mirage-toggling via environment variable (#5899) changelog: Add entries for windows fixes fifo: Safer access to Conn run post-run/post-stop task runner hooks Fail alloc if alloc runner prestart hooks fail address review comments changelog Missed one revert of backwards compatibility for node drain Improve test cases for detecting content type Undo removal of node drain compat changes Updated with suggestions. fifo: Close connections and cleanup lock handling logmon: Add windows compatibility test client: defensive against getting stale alloc updates Infer content type in alloc fs stat endpoint appveyor: Run logmon tests fifo: Require that fifos do not exist for create vendor: Use dani fork of go-winio ...
This commit is contained in:
@@ -1,8 +1,9 @@
|
||||
## 0.9.4 (Unreleased)
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* core: removed deprecated upgrade path code pertaining to older versions of Nomad [[GH-5894](https://github.com/hashicorp/nomad/issues/5894)]
|
||||
* api: use region from job hcl when not provided as query parameter in job registration and plan endpoints [[GH-5664](https://github.com/hashicorp/nomad/pull/5664)]
|
||||
* api: infer content type of file in alloc filesystem stat endpoint [[GH-5907](https://github.com/hashicorp/nomad/issues/5907)]
|
||||
* metrics: add namespace label as appropriate to metrics [[GH-5847](https://github.com/hashicorp/nomad/issues/5847)]
|
||||
* ui: Moved client status, draining, and eligibility fields into single state column [[GH-5789](https://github.com/hashicorp/nomad/pull/5789)]
|
||||
|
||||
@@ -11,7 +12,9 @@ BUG FIXES:
|
||||
* core: Improved job spec parsing error messages for variable interpolation failures [[GH-5844](https://github.com/hashicorp/nomad/issues/5844)]
|
||||
* core: Handle error case when attempting to stop a non-existent allocation [[GH-5865](https://github.com/hashicorp/nomad/issues/5865)]
|
||||
* client: Fixed regression that prevented registering multiple services with the same name but different ports in Consul correctly [[GH-5829](https://github.com/hashicorp/nomad/issues/5829)]
|
||||
* client: Fixed a race condition when performing local task restarts that would result in incorrect task not found errors on Windows [[GH-5899](https://github.com/hashicorp/nomad/pull/5889)]
|
||||
* driver: Fixed an issue preventing external driver plugins from launching executor process [[GH-5726](https://github.com/hashicorp/nomad/issues/5726)]
|
||||
* driver: Fixed an issue preventing local task restarts on Windows [[GH-5864](https://github.com/hashicorp/nomad/pull/5864)]
|
||||
* driver/docker: Fixed a bug mounting relative paths on Windows [[GH-5811](https://github.com/hashicorp/nomad/issues/5811)]
|
||||
* driver/exec: Upgraded libcontainer dependency to avoid zombie `runc:[1:CHILD]]` processes [[GH-5851](https://github.com/hashicorp/nomad/issues/5851)]
|
||||
* metrics: Upgrade prometheus client to avoid label conflicts [[GH-5850](https://github.com/hashicorp/nomad/issues/5850)]
|
||||
|
||||
11
api/fs.go
11
api/fs.go
@@ -20,11 +20,12 @@ const (
|
||||
|
||||
// AllocFileInfo holds information about a file inside the AllocDir
|
||||
type AllocFileInfo struct {
|
||||
Name string
|
||||
IsDir bool
|
||||
Size int64
|
||||
FileMode string
|
||||
ModTime time.Time
|
||||
Name string
|
||||
IsDir bool
|
||||
Size int64
|
||||
FileMode string
|
||||
ModTime time.Time
|
||||
ContentType string
|
||||
}
|
||||
|
||||
// StreamFrame is used to frame data of a file when streaming
|
||||
|
||||
@@ -46,6 +46,7 @@ test_script:
|
||||
gotestsum --junitfile results.xml
|
||||
github.com/hashicorp/nomad/drivers/docker
|
||||
github.com/hashicorp/nomad/client/lib/fifo
|
||||
github.com/hashicorp/nomad/client/logmon
|
||||
# on_finish:
|
||||
# - ps: |
|
||||
# Push-AppveyorArtifact (Resolve-Path .\results.xml)
|
||||
|
||||
@@ -11,6 +11,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
multierror "github.com/hashicorp/go-multierror"
|
||||
cstructs "github.com/hashicorp/nomad/client/structs"
|
||||
@@ -392,15 +395,41 @@ func (d *AllocDir) Stat(path string) (*cstructs.AllocFileInfo, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
contentType := detectContentType(info, p)
|
||||
|
||||
return &cstructs.AllocFileInfo{
|
||||
Size: info.Size(),
|
||||
Name: info.Name(),
|
||||
IsDir: info.IsDir(),
|
||||
FileMode: info.Mode().String(),
|
||||
ModTime: info.ModTime(),
|
||||
Size: info.Size(),
|
||||
Name: info.Name(),
|
||||
IsDir: info.IsDir(),
|
||||
FileMode: info.Mode().String(),
|
||||
ModTime: info.ModTime(),
|
||||
ContentType: contentType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// detectContentType tries to infer the file type by reading the first
|
||||
// 512 bytes of the file. Json file extensions are special cased.
|
||||
func detectContentType(fileInfo os.FileInfo, path string) string {
|
||||
contentType := "application/octet-stream"
|
||||
if !fileInfo.IsDir() {
|
||||
f, err := os.Open(path)
|
||||
// Best effort content type detection
|
||||
// We ignore errors because this is optional information
|
||||
if err == nil {
|
||||
fileBytes := make([]byte, 512)
|
||||
_, err := f.Read(fileBytes)
|
||||
if err == nil {
|
||||
contentType = http.DetectContentType(fileBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Special case json files
|
||||
if strings.HasSuffix(path, ".json") {
|
||||
contentType = "application/json"
|
||||
}
|
||||
return contentType
|
||||
}
|
||||
|
||||
// ReadAt returns a reader for a file at the path relative to the alloc dir
|
||||
func (d *AllocDir) ReadAt(path string, offset int64) (io.ReadCloser, error) {
|
||||
if escapes, err := structs.PathEscapesAllocDir("", path); err != nil {
|
||||
|
||||
@@ -472,3 +472,32 @@ func TestPathFuncs(t *testing.T) {
|
||||
t.Errorf("%q is not empty. empty=%v error=%v", dir, empty, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllocDir_DetectContentType(t *testing.T) {
|
||||
require := require.New(t)
|
||||
inputPath := "input/"
|
||||
var testFiles []string
|
||||
err := filepath.Walk(inputPath, func(path string, info os.FileInfo, err error) error {
|
||||
if !info.IsDir() {
|
||||
testFiles = append(testFiles, path)
|
||||
}
|
||||
return err
|
||||
})
|
||||
require.Nil(err)
|
||||
|
||||
expectedEncodings := map[string]string{
|
||||
"input/happy.gif": "image/gif",
|
||||
"input/image.png": "image/png",
|
||||
"input/nomad.jpg": "image/jpeg",
|
||||
"input/test.bin": "application/octet-stream",
|
||||
"input/test.json": "application/json",
|
||||
"input/test.txt": "text/plain; charset=utf-8",
|
||||
"input/test.go": "text/plain; charset=utf-8",
|
||||
}
|
||||
for _, file := range testFiles {
|
||||
fileInfo, err := os.Stat(file)
|
||||
require.Nil(err)
|
||||
res := detectContentType(fileInfo, file)
|
||||
require.Equal(expectedEncodings[file], res, "unexpected output for %v", file)
|
||||
}
|
||||
}
|
||||
|
||||
BIN
client/allocdir/input/happy.gif
Normal file
BIN
client/allocdir/input/happy.gif
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 475 KiB |
BIN
client/allocdir/input/image.png
Normal file
BIN
client/allocdir/input/image.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 164 KiB |
BIN
client/allocdir/input/nomad.jpg
Normal file
BIN
client/allocdir/input/nomad.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 72 KiB |
BIN
client/allocdir/input/test.bin
Normal file
BIN
client/allocdir/input/test.bin
Normal file
Binary file not shown.
26
client/allocdir/input/test.go
Normal file
26
client/allocdir/input/test.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package allocdir
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// linkDir hardlinks src to dst. The src and dst must be on the same filesystem.
|
||||
func linkDir(src, dst string) error {
|
||||
return syscall.Link(src, dst)
|
||||
}
|
||||
|
||||
// unlinkDir removes a directory link.
|
||||
func unlinkDir(dir string) error {
|
||||
return syscall.Unlink(dir)
|
||||
}
|
||||
|
||||
// createSecretDir creates the secrets dir folder at the given path
|
||||
func createSecretDir(dir string) error {
|
||||
return os.MkdirAll(dir, 0777)
|
||||
}
|
||||
|
||||
// removeSecretDir removes the secrets dir folder
|
||||
func removeSecretDir(dir string) error {
|
||||
return os.RemoveAll(dir)
|
||||
}
|
||||
3
client/allocdir/input/test.json
Normal file
3
client/allocdir/input/test.json
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"test":"test"
|
||||
}
|
||||
1
client/allocdir/input/test.txt
Normal file
1
client/allocdir/input/test.txt
Normal file
@@ -0,0 +1 @@
|
||||
hello world
|
||||
@@ -248,10 +248,18 @@ func (ar *allocRunner) Run() {
|
||||
default:
|
||||
}
|
||||
|
||||
// When handling (potentially restored) terminal alloc, ensure tasks and post-run hooks are run
|
||||
// to perform any cleanup that's necessary, potentially not done prior to earlier termination
|
||||
|
||||
// Run the prestart hooks if non-terminal
|
||||
if ar.shouldRun() {
|
||||
if err := ar.prerun(); err != nil {
|
||||
ar.logger.Error("prerun failed", "error", err)
|
||||
|
||||
for _, tr := range ar.tasks {
|
||||
tr.MarkFailedDead(fmt.Sprintf("failed to setup runner: %v", err))
|
||||
}
|
||||
|
||||
goto POST
|
||||
}
|
||||
}
|
||||
|
||||
@@ -131,3 +131,163 @@ func TestAllocRunner_Restore_RunningTerminal(t *testing.T) {
|
||||
require.Equal(t, events[2].Type, structs.TaskStarted)
|
||||
require.Equal(t, events[3].Type, structs.TaskTerminated)
|
||||
}
|
||||
|
||||
// TestAllocRunner_Restore_CompletedBatch asserts that restoring a completed
|
||||
// batch alloc doesn't run it again
|
||||
func TestAllocRunner_Restore_CompletedBatch(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// 1. Run task and wait for it to complete
|
||||
// 2. Start new alloc runner
|
||||
// 3. Assert task didn't run again
|
||||
|
||||
alloc := mock.Alloc()
|
||||
alloc.Job.Type = structs.JobTypeBatch
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
task.Config = map[string]interface{}{
|
||||
"run_for": "2ms",
|
||||
}
|
||||
|
||||
conf, cleanup := testAllocRunnerConfig(t, alloc.Copy())
|
||||
defer cleanup()
|
||||
|
||||
// Maintain state for subsequent run
|
||||
conf.StateDB = state.NewMemDB(conf.Logger)
|
||||
|
||||
// Start and wait for task to be running
|
||||
ar, err := NewAllocRunner(conf)
|
||||
require.NoError(t, err)
|
||||
go ar.Run()
|
||||
defer destroy(ar)
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
s := ar.AllocState()
|
||||
if s.ClientStatus != structs.AllocClientStatusComplete {
|
||||
return false, fmt.Errorf("expected complete, got %s", s.ClientStatus)
|
||||
}
|
||||
return true, nil
|
||||
}, func(err error) {
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
// once job finishes, it shouldn't run again
|
||||
require.False(t, ar.shouldRun())
|
||||
initialRunEvents := ar.AllocState().TaskStates[task.Name].Events
|
||||
require.Len(t, initialRunEvents, 4)
|
||||
|
||||
ls, ts, err := conf.StateDB.GetTaskRunnerState(alloc.ID, task.Name)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, ls)
|
||||
require.Equal(t, structs.TaskStateDead, ts.State)
|
||||
|
||||
// Start a new alloc runner and assert it gets stopped
|
||||
conf2, cleanup2 := testAllocRunnerConfig(t, alloc)
|
||||
defer cleanup2()
|
||||
|
||||
// Use original statedb to maintain hook state
|
||||
conf2.StateDB = conf.StateDB
|
||||
|
||||
// Restore, start, and wait for task to be killed
|
||||
ar2, err := NewAllocRunner(conf2)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, ar2.Restore())
|
||||
|
||||
go ar2.Run()
|
||||
defer destroy(ar2)
|
||||
|
||||
// AR waitCh must be closed even when task doesn't run again
|
||||
select {
|
||||
case <-ar2.WaitCh():
|
||||
case <-time.After(10 * time.Second):
|
||||
require.Fail(t, "alloc.waitCh wasn't closed")
|
||||
}
|
||||
|
||||
// TR waitCh must be closed too!
|
||||
select {
|
||||
case <-ar2.tasks[task.Name].WaitCh():
|
||||
case <-time.After(10 * time.Second):
|
||||
require.Fail(t, "tr.waitCh wasn't closed")
|
||||
}
|
||||
|
||||
// Assert that events are unmodified, which they would if task re-run
|
||||
events := ar2.AllocState().TaskStates[task.Name].Events
|
||||
require.Equal(t, initialRunEvents, events)
|
||||
}
|
||||
|
||||
// TestAllocRunner_PreStartFailuresLeadToFailed asserts that if an alloc
|
||||
// prestart hooks failed, then the alloc and subsequent tasks transition
|
||||
// to failed state
|
||||
func TestAllocRunner_PreStartFailuresLeadToFailed(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
alloc := mock.Alloc()
|
||||
alloc.Job.Type = structs.JobTypeBatch
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
task.Config = map[string]interface{}{
|
||||
"run_for": "2ms",
|
||||
}
|
||||
alloc.Job.TaskGroups[0].RestartPolicy = &structs.RestartPolicy{
|
||||
Attempts: 0,
|
||||
}
|
||||
|
||||
conf, cleanup := testAllocRunnerConfig(t, alloc.Copy())
|
||||
defer cleanup()
|
||||
|
||||
// Maintain state for subsequent run
|
||||
conf.StateDB = state.NewMemDB(conf.Logger)
|
||||
|
||||
// Start and wait for task to be running
|
||||
ar, err := NewAllocRunner(conf)
|
||||
require.NoError(t, err)
|
||||
|
||||
ar.runnerHooks = append(ar.runnerHooks, &allocFailingPrestartHook{})
|
||||
|
||||
go ar.Run()
|
||||
defer destroy(ar)
|
||||
|
||||
select {
|
||||
case <-ar.WaitCh():
|
||||
case <-time.After(10 * time.Second):
|
||||
require.Fail(t, "alloc.waitCh wasn't closed")
|
||||
}
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
s := ar.AllocState()
|
||||
if s.ClientStatus != structs.AllocClientStatusFailed {
|
||||
return false, fmt.Errorf("expected complete, got %s", s.ClientStatus)
|
||||
}
|
||||
return true, nil
|
||||
}, func(err error) {
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
// once job finishes, it shouldn't run again
|
||||
require.False(t, ar.shouldRun())
|
||||
initialRunEvents := ar.AllocState().TaskStates[task.Name].Events
|
||||
require.Len(t, initialRunEvents, 2)
|
||||
|
||||
ls, ts, err := conf.StateDB.GetTaskRunnerState(alloc.ID, task.Name)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, ls)
|
||||
require.NotNil(t, ts)
|
||||
require.Equal(t, structs.TaskStateDead, ts.State)
|
||||
require.True(t, ts.Failed)
|
||||
|
||||
// TR waitCh must be closed too!
|
||||
select {
|
||||
case <-ar.tasks[task.Name].WaitCh():
|
||||
case <-time.After(10 * time.Second):
|
||||
require.Fail(t, "tr.waitCh wasn't closed")
|
||||
}
|
||||
}
|
||||
|
||||
type allocFailingPrestartHook struct{}
|
||||
|
||||
func (*allocFailingPrestartHook) Name() string { return "failing_prestart" }
|
||||
|
||||
func (*allocFailingPrestartHook) Prerun() error {
|
||||
return fmt.Errorf("failing prestart hooks")
|
||||
}
|
||||
|
||||
@@ -388,12 +388,57 @@ func (tr *TaskRunner) initLabels() {
|
||||
}
|
||||
}
|
||||
|
||||
// Mark a task as failed and not to run. Aimed to be invoked when alloc runner
|
||||
// prestart hooks failed.
|
||||
// Should never be called with Run().
|
||||
func (tr *TaskRunner) MarkFailedDead(reason string) {
|
||||
defer close(tr.waitCh)
|
||||
|
||||
tr.stateLock.Lock()
|
||||
if err := tr.stateDB.PutTaskRunnerLocalState(tr.allocID, tr.taskName, tr.localState); err != nil {
|
||||
//TODO Nomad will be unable to restore this task; try to kill
|
||||
// it now and fail? In general we prefer to leave running
|
||||
// tasks running even if the agent encounters an error.
|
||||
tr.logger.Warn("error persisting local failed task state; may be unable to restore after a Nomad restart",
|
||||
"error", err)
|
||||
}
|
||||
tr.stateLock.Unlock()
|
||||
|
||||
event := structs.NewTaskEvent(structs.TaskSetupFailure).
|
||||
SetDisplayMessage(reason).
|
||||
SetFailsTask()
|
||||
tr.UpdateState(structs.TaskStateDead, event)
|
||||
|
||||
// Run the stop hooks in case task was a restored task that failed prestart
|
||||
if err := tr.stop(); err != nil {
|
||||
tr.logger.Error("stop failed while marking task dead", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Run the TaskRunner. Starts the user's task or reattaches to a restored task.
|
||||
// Run closes WaitCh when it exits. Should be started in a goroutine.
|
||||
func (tr *TaskRunner) Run() {
|
||||
defer close(tr.waitCh)
|
||||
var result *drivers.ExitResult
|
||||
|
||||
tr.stateLock.RLock()
|
||||
dead := tr.state.State == structs.TaskStateDead
|
||||
tr.stateLock.RUnlock()
|
||||
|
||||
// if restoring a dead task, ensure that task is cleared and all post hooks
|
||||
// are called without additional state updates
|
||||
if dead {
|
||||
// do cleanup functions without emitting any additional events/work
|
||||
// to handle cases where we restored a dead task where client terminated
|
||||
// after task finished before completing post-run actions.
|
||||
tr.clearDriverHandle()
|
||||
tr.stateUpdater.TaskStateUpdated()
|
||||
if err := tr.stop(); err != nil {
|
||||
tr.logger.Error("stop failed on terminal task", "error", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Updates are handled asynchronously with the other hooks but each
|
||||
// triggered update - whether due to alloc updates or a new vault token
|
||||
// - should be handled serially.
|
||||
@@ -899,7 +944,7 @@ func (tr *TaskRunner) Restore() error {
|
||||
}
|
||||
|
||||
alloc := tr.Alloc()
|
||||
if alloc.TerminalStatus() || alloc.Job.Type == structs.JobTypeSystem {
|
||||
if tr.state.State == structs.TaskStateDead || alloc.TerminalStatus() || alloc.Job.Type == structs.JobTypeSystem {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1767,6 +1767,9 @@ func (c *Client) allocSync() {
|
||||
// allocUpdates holds the results of receiving updated allocations from the
|
||||
// servers.
|
||||
type allocUpdates struct {
|
||||
// index is index of server store snapshot used for fetching alloc status
|
||||
index uint64
|
||||
|
||||
// pulled is the set of allocations that were downloaded from the servers.
|
||||
pulled map[string]*structs.Allocation
|
||||
|
||||
@@ -1944,6 +1947,7 @@ OUTER:
|
||||
filtered: filtered,
|
||||
pulled: pulledAllocs,
|
||||
migrateTokens: resp.MigrateTokens,
|
||||
index: resp.Index,
|
||||
}
|
||||
|
||||
select {
|
||||
|
||||
@@ -98,7 +98,7 @@ func TestFS_Stat(t *testing.T) {
|
||||
// Wait for alloc to be running
|
||||
alloc := testutil.WaitForRunning(t, s.RPC, job)[0]
|
||||
|
||||
// Make the request with bad allocation id
|
||||
// Make the request
|
||||
req := &cstructs.FsStatRequest{
|
||||
AllocID: alloc.ID,
|
||||
Path: "/",
|
||||
|
||||
@@ -11,21 +11,23 @@ import (
|
||||
)
|
||||
|
||||
// CreateAndRead creates a fifo at the given path, and returns an open function for reading.
|
||||
// The fifo must not exist already, or that it's already a fifo file
|
||||
// For compatibility with windows, the fifo must not exist already.
|
||||
//
|
||||
// It returns a reader open function that may block until a writer opens
|
||||
// so it's advised to run it in a goroutine different from reader goroutine
|
||||
func CreateAndRead(path string) (func() (io.ReadCloser, error), error) {
|
||||
// create first
|
||||
if err := mkfifo(path, 0600); err != nil && !os.IsExist(err) {
|
||||
if err := mkfifo(path, 0600); err != nil {
|
||||
return nil, fmt.Errorf("error creating fifo %v: %v", path, err)
|
||||
}
|
||||
|
||||
openFn := func() (io.ReadCloser, error) {
|
||||
return os.OpenFile(path, unix.O_RDONLY, os.ModeNamedPipe)
|
||||
}
|
||||
return func() (io.ReadCloser, error) {
|
||||
return OpenReader(path)
|
||||
}, nil
|
||||
}
|
||||
|
||||
return openFn, nil
|
||||
func OpenReader(path string) (io.ReadCloser, error) {
|
||||
return os.OpenFile(path, unix.O_RDONLY, os.ModeNamedPipe)
|
||||
}
|
||||
|
||||
// OpenWriter opens a fifo file for writer, assuming it already exists, returns io.WriteCloser
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package fifo
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
@@ -20,21 +21,29 @@ type winFIFO struct {
|
||||
connLock sync.Mutex
|
||||
}
|
||||
|
||||
func (f *winFIFO) Read(p []byte) (n int, err error) {
|
||||
func (f *winFIFO) ensureConn() (net.Conn, error) {
|
||||
f.connLock.Lock()
|
||||
defer f.connLock.Unlock()
|
||||
if f.conn == nil {
|
||||
c, err := f.listener.Accept()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f.conn = c
|
||||
}
|
||||
|
||||
return f.conn, nil
|
||||
}
|
||||
|
||||
func (f *winFIFO) Read(p []byte) (n int, err error) {
|
||||
conn, err := f.ensureConn()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// If the connection is closed then we need to close the listener
|
||||
// to emulate unix fifo behavior
|
||||
n, err = f.conn.Read(p)
|
||||
n, err = conn.Read(p)
|
||||
if err == io.EOF {
|
||||
f.listener.Close()
|
||||
}
|
||||
@@ -42,21 +51,16 @@ func (f *winFIFO) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
func (f *winFIFO) Write(p []byte) (n int, err error) {
|
||||
f.connLock.Lock()
|
||||
defer f.connLock.Unlock()
|
||||
if f.conn == nil {
|
||||
c, err := f.listener.Accept()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
f.conn = c
|
||||
conn, err := f.ensureConn()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// If the connection is closed then we need to close the listener
|
||||
// to emulate unix fifo behavior
|
||||
n, err = f.conn.Write(p)
|
||||
n, err = conn.Write(p)
|
||||
if err == io.EOF {
|
||||
conn.Close()
|
||||
f.listener.Close()
|
||||
}
|
||||
return n, err
|
||||
@@ -64,6 +68,11 @@ func (f *winFIFO) Write(p []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
func (f *winFIFO) Close() error {
|
||||
f.connLock.Lock()
|
||||
if f.conn != nil {
|
||||
f.conn.Close()
|
||||
}
|
||||
f.connLock.Unlock()
|
||||
return f.listener.Close()
|
||||
}
|
||||
|
||||
@@ -75,16 +84,23 @@ func CreateAndRead(path string) (func() (io.ReadCloser, error), error) {
|
||||
OutputBufferSize: PipeBufferSize,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to create fifo: %v", err)
|
||||
}
|
||||
|
||||
openFn := func() (io.ReadCloser, error) {
|
||||
return func() (io.ReadCloser, error) {
|
||||
return &winFIFO{
|
||||
listener: l,
|
||||
}, nil
|
||||
}, nil
|
||||
}
|
||||
|
||||
func OpenReader(path string) (io.ReadCloser, error) {
|
||||
l, err := winio.ListenOnlyPipe(path, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open fifo listener: %v", err)
|
||||
}
|
||||
|
||||
return openFn, nil
|
||||
return &winFIFO{listener: l}, nil
|
||||
}
|
||||
|
||||
// OpenWriter opens a fifo that already exists and returns an io.WriteCloser for it
|
||||
|
||||
@@ -3,6 +3,7 @@ package logmon
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -199,7 +200,18 @@ func (l *logRotatorWrapper) isRunning() bool {
|
||||
// processOutWriter to attach to the stdout or stderr of a process.
|
||||
func newLogRotatorWrapper(path string, logger hclog.Logger, rotator *logging.FileRotator) (*logRotatorWrapper, error) {
|
||||
logger.Info("opening fifo", "path", path)
|
||||
fifoOpenFn, err := fifo.CreateAndRead(path)
|
||||
|
||||
var openFn func() (io.ReadCloser, error)
|
||||
var err error
|
||||
|
||||
if _, ferr := os.Stat(path); os.IsNotExist(ferr) {
|
||||
openFn, err = fifo.CreateAndRead(path)
|
||||
} else {
|
||||
openFn = func() (io.ReadCloser, error) {
|
||||
return fifo.OpenReader(path)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create fifo for extracting logs: %v", err)
|
||||
}
|
||||
@@ -211,20 +223,20 @@ func newLogRotatorWrapper(path string, logger hclog.Logger, rotator *logging.Fil
|
||||
openCompleted: make(chan struct{}),
|
||||
logger: logger,
|
||||
}
|
||||
wrap.start(fifoOpenFn)
|
||||
|
||||
wrap.start(openFn)
|
||||
return wrap, nil
|
||||
}
|
||||
|
||||
// start starts a goroutine that copies from the pipe into the rotator. This is
|
||||
// called by the constructor and not the user of the wrapper.
|
||||
func (l *logRotatorWrapper) start(readerOpenFn func() (io.ReadCloser, error)) {
|
||||
func (l *logRotatorWrapper) start(openFn func() (io.ReadCloser, error)) {
|
||||
go func() {
|
||||
defer close(l.hasFinishedCopied)
|
||||
|
||||
reader, err := readerOpenFn()
|
||||
reader, err := openFn()
|
||||
if err != nil {
|
||||
close(l.openCompleted)
|
||||
l.logger.Warn("failed to open log fifo", "error", err)
|
||||
l.logger.Warn("failed to open fifo", "error", err)
|
||||
return
|
||||
}
|
||||
l.processOutReader = reader
|
||||
@@ -284,5 +296,4 @@ func (l *logRotatorWrapper) Close() {
|
||||
}
|
||||
|
||||
l.rotatorWriter.Close()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/client/lib/fifo"
|
||||
@@ -16,19 +17,25 @@ import (
|
||||
|
||||
func TestLogmon_Start_rotate(t *testing.T) {
|
||||
require := require.New(t)
|
||||
var stdoutFifoPath, stderrFifoPath string
|
||||
|
||||
dir, err := ioutil.TempDir("", "nomadtest")
|
||||
require.NoError(err)
|
||||
defer os.RemoveAll(dir)
|
||||
stdoutLog := "stdout"
|
||||
stdoutFifoPath := filepath.Join(dir, "stdout.fifo")
|
||||
stderrLog := "stderr"
|
||||
stderrFifoPath := filepath.Join(dir, "stderr.fifo")
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
stdoutFifoPath = "//./pipe/test-rotate.stdout"
|
||||
stderrFifoPath = "//./pipe/test-rotate.stderr"
|
||||
} else {
|
||||
stdoutFifoPath = filepath.Join(dir, "stdout.fifo")
|
||||
stderrFifoPath = filepath.Join(dir, "stderr.fifo")
|
||||
}
|
||||
|
||||
cfg := &LogConfig{
|
||||
LogDir: dir,
|
||||
StdoutLogFile: stdoutLog,
|
||||
StdoutLogFile: "stdout",
|
||||
StdoutFifo: stdoutFifoPath,
|
||||
StderrLogFile: stderrLog,
|
||||
StderrLogFile: "stderr",
|
||||
StderrFifo: stderrFifoPath,
|
||||
MaxFiles: 2,
|
||||
MaxFileSizeMB: 1,
|
||||
@@ -66,22 +73,33 @@ func TestLogmon_Start_rotate(t *testing.T) {
|
||||
require.NoError(lm.Stop())
|
||||
}
|
||||
|
||||
// asserts that calling Start twice restarts the log rotator
|
||||
func TestLogmon_Start_restart(t *testing.T) {
|
||||
// asserts that calling Start twice restarts the log rotator and that any logs
|
||||
// published while the listener was unavailable are received.
|
||||
func TestLogmon_Start_restart_flusheslogs(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("windows does not support pushing data to a pipe with no servers")
|
||||
}
|
||||
|
||||
require := require.New(t)
|
||||
var stdoutFifoPath, stderrFifoPath string
|
||||
|
||||
dir, err := ioutil.TempDir("", "nomadtest")
|
||||
require.NoError(err)
|
||||
defer os.RemoveAll(dir)
|
||||
stdoutLog := "stdout"
|
||||
stdoutFifoPath := filepath.Join(dir, "stdout.fifo")
|
||||
stderrLog := "stderr"
|
||||
stderrFifoPath := filepath.Join(dir, "stderr.fifo")
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
stdoutFifoPath = "//./pipe/test-restart.stdout"
|
||||
stderrFifoPath = "//./pipe/test-restart.stderr"
|
||||
} else {
|
||||
stdoutFifoPath = filepath.Join(dir, "stdout.fifo")
|
||||
stderrFifoPath = filepath.Join(dir, "stderr.fifo")
|
||||
}
|
||||
|
||||
cfg := &LogConfig{
|
||||
LogDir: dir,
|
||||
StdoutLogFile: stdoutLog,
|
||||
StdoutLogFile: "stdout",
|
||||
StdoutFifo: stdoutFifoPath,
|
||||
StderrLogFile: stderrLog,
|
||||
StderrLogFile: "stderr",
|
||||
StderrFifo: stderrFifoPath,
|
||||
MaxFiles: 2,
|
||||
MaxFileSizeMB: 1,
|
||||
@@ -162,3 +180,88 @@ func TestLogmon_Start_restart(t *testing.T) {
|
||||
require.NoError(err)
|
||||
})
|
||||
}
|
||||
|
||||
// asserts that calling Start twice restarts the log rotator
|
||||
func TestLogmon_Start_restart(t *testing.T) {
|
||||
require := require.New(t)
|
||||
var stdoutFifoPath, stderrFifoPath string
|
||||
|
||||
dir, err := ioutil.TempDir("", "nomadtest")
|
||||
require.NoError(err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
stdoutFifoPath = "//./pipe/test-restart.stdout"
|
||||
stderrFifoPath = "//./pipe/test-restart.stderr"
|
||||
} else {
|
||||
stdoutFifoPath = filepath.Join(dir, "stdout.fifo")
|
||||
stderrFifoPath = filepath.Join(dir, "stderr.fifo")
|
||||
}
|
||||
|
||||
cfg := &LogConfig{
|
||||
LogDir: dir,
|
||||
StdoutLogFile: "stdout",
|
||||
StdoutFifo: stdoutFifoPath,
|
||||
StderrLogFile: "stderr",
|
||||
StderrFifo: stderrFifoPath,
|
||||
MaxFiles: 2,
|
||||
MaxFileSizeMB: 1,
|
||||
}
|
||||
|
||||
lm := NewLogMon(testlog.HCLogger(t))
|
||||
impl, ok := lm.(*logmonImpl)
|
||||
require.True(ok)
|
||||
require.NoError(lm.Start(cfg))
|
||||
|
||||
stdout, err := fifo.OpenWriter(stdoutFifoPath)
|
||||
require.NoError(err)
|
||||
stderr, err := fifo.OpenWriter(stderrFifoPath)
|
||||
require.NoError(err)
|
||||
|
||||
// Write a string and assert it was written to the file
|
||||
_, err = stdout.Write([]byte("test\n"))
|
||||
require.NoError(err)
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
raw, err := ioutil.ReadFile(filepath.Join(dir, "stdout.0"))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return "test\n" == string(raw), fmt.Errorf("unexpected stdout %q", string(raw))
|
||||
}, func(err error) {
|
||||
require.NoError(err)
|
||||
})
|
||||
require.True(impl.tl.IsRunning())
|
||||
|
||||
// Close stdout and assert that logmon no longer writes to the file
|
||||
require.NoError(stdout.Close())
|
||||
require.NoError(stderr.Close())
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
return !impl.tl.IsRunning(), fmt.Errorf("logmon is still running")
|
||||
}, func(err error) {
|
||||
require.NoError(err)
|
||||
})
|
||||
|
||||
// Start logmon again and assert that it can receive logs again
|
||||
require.NoError(lm.Start(cfg))
|
||||
|
||||
stdout, err = fifo.OpenWriter(stdoutFifoPath)
|
||||
require.NoError(err)
|
||||
stderr, err = fifo.OpenWriter(stderrFifoPath)
|
||||
require.NoError(err)
|
||||
|
||||
_, err = stdout.Write([]byte("test\n"))
|
||||
require.NoError(err)
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
raw, err := ioutil.ReadFile(filepath.Join(dir, "stdout.0"))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
expected := "test\ntest\n" == string(raw)
|
||||
return expected, fmt.Errorf("unexpected stdout %q", string(raw))
|
||||
}, func(err error) {
|
||||
require.NoError(err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -36,11 +36,12 @@ type ClientStatsResponse struct {
|
||||
|
||||
// AllocFileInfo holds information about a file inside the AllocDir
|
||||
type AllocFileInfo struct {
|
||||
Name string
|
||||
IsDir bool
|
||||
Size int64
|
||||
FileMode string
|
||||
ModTime time.Time
|
||||
Name string
|
||||
IsDir bool
|
||||
Size int64
|
||||
FileMode string
|
||||
ModTime time.Time
|
||||
ContentType string `json:"contenttype,omitempty"`
|
||||
}
|
||||
|
||||
// FsListRequest is used to list an allocation's directory.
|
||||
|
||||
@@ -33,7 +33,7 @@ func diffAllocs(existing map[string]uint64, allocs *allocUpdates) *diffResult {
|
||||
_, filtered := allocs.filtered[existID]
|
||||
|
||||
// If not updated or filtered, removed
|
||||
if !pulled && !filtered {
|
||||
if !pulled && !filtered && allocs.index > existIndex {
|
||||
result.removed = append(result.removed, existID)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -108,7 +108,7 @@ func (s *HTTPServer) nodeToggleDrain(resp http.ResponseWriter, req *http.Request
|
||||
|
||||
var drainRequest api.NodeUpdateDrainRequest
|
||||
|
||||
// COMPAT: Remove in 0.9. Allow the old style enable query param.
|
||||
// COMPAT: Remove in 0.10. Allow the old style enable query param.
|
||||
// Get the enable parameter
|
||||
enableRaw := req.URL.Query().Get("enable")
|
||||
var enable bool
|
||||
|
||||
@@ -213,7 +213,7 @@ func (f *AllocFSCommand) Run(args []string) int {
|
||||
if stat {
|
||||
// Display the file information
|
||||
out := make([]string, 2)
|
||||
out[0] = "Mode|Size|Modified Time|Name"
|
||||
out[0] = "Mode|Size|Modified Time|Content Type|Name"
|
||||
if file != nil {
|
||||
fn := file.Name
|
||||
if file.IsDir {
|
||||
@@ -225,8 +225,8 @@ func (f *AllocFSCommand) Run(args []string) int {
|
||||
} else {
|
||||
size = humanize.IBytes(uint64(file.Size))
|
||||
}
|
||||
out[1] = fmt.Sprintf("%s|%s|%s|%s", file.FileMode, size,
|
||||
formatTime(file.ModTime), fn)
|
||||
out[1] = fmt.Sprintf("%s|%s|%s|%s|%s", file.FileMode, size,
|
||||
formatTime(file.ModTime), file.ContentType, fn)
|
||||
}
|
||||
f.Ui.Output(formatList(out))
|
||||
return 0
|
||||
|
||||
@@ -235,7 +235,7 @@ func formatAllocBasicInfo(alloc *api.Allocation, client *api.Client, uuidLength
|
||||
fmt.Sprintf("Node ID|%s", limit(alloc.NodeID, uuidLength)),
|
||||
fmt.Sprintf("Node Name|%s", alloc.NodeName),
|
||||
fmt.Sprintf("Job ID|%s", alloc.JobID),
|
||||
fmt.Sprintf("Job Version|%d", getVersion(alloc.Job)),
|
||||
fmt.Sprintf("Job Version|%d", alloc.Job.Version),
|
||||
fmt.Sprintf("Client Status|%s", alloc.ClientStatus),
|
||||
fmt.Sprintf("Client Description|%s", alloc.ClientDescription),
|
||||
fmt.Sprintf("Desired Status|%s", alloc.DesiredStatus),
|
||||
|
||||
@@ -438,32 +438,6 @@ func (j *JobGetter) ApiJob(jpath string) (*api.Job, error) {
|
||||
return jobStruct, nil
|
||||
}
|
||||
|
||||
// COMPAT: Remove in 0.7.0
|
||||
// Nomad 0.6.0 introduces the submit time field so CLI's interacting with
|
||||
// older versions of Nomad would SEGFAULT as reported here:
|
||||
// https://github.com/hashicorp/nomad/issues/2918
|
||||
// getSubmitTime returns a submit time of the job converting to time.Time
|
||||
func getSubmitTime(job *api.Job) time.Time {
|
||||
if job.SubmitTime != nil {
|
||||
return time.Unix(0, *job.SubmitTime)
|
||||
}
|
||||
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// COMPAT: Remove in 0.7.0
|
||||
// Nomad 0.6.0 introduces job Versions so CLI's interacting with
|
||||
// older versions of Nomad would SEGFAULT as reported here:
|
||||
// https://github.com/hashicorp/nomad/issues/2918
|
||||
// getVersion returns a version of the job in safely.
|
||||
func getVersion(job *api.Job) uint64 {
|
||||
if job.Version != nil {
|
||||
return *job.Version
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// mergeAutocompleteFlags is used to join multiple flag completion sets.
|
||||
func mergeAutocompleteFlags(flags ...complete.Flags) complete.Flags {
|
||||
merged := make(map[string]complete.Predictor, len(flags))
|
||||
|
||||
@@ -169,7 +169,7 @@ func (c *JobStatusCommand) Run(args []string) int {
|
||||
basic := []string{
|
||||
fmt.Sprintf("ID|%s", *job.ID),
|
||||
fmt.Sprintf("Name|%s", *job.Name),
|
||||
fmt.Sprintf("Submit Date|%s", formatTime(getSubmitTime(job))),
|
||||
fmt.Sprintf("Submit Date|%s", formatTime(time.Unix(0, *job.SubmitTime))),
|
||||
fmt.Sprintf("Type|%s", *job.Type),
|
||||
fmt.Sprintf("Priority|%d", *job.Priority),
|
||||
fmt.Sprintf("Datacenters|%s", strings.Join(job.Datacenters, ",")),
|
||||
@@ -462,7 +462,7 @@ func formatAllocList(allocations []*api.Allocation, verbose bool, uuidLength int
|
||||
limit(alloc.EvalID, uuidLength),
|
||||
limit(alloc.NodeID, uuidLength),
|
||||
alloc.TaskGroup,
|
||||
getVersion(alloc.Job),
|
||||
alloc.Job.Version,
|
||||
alloc.DesiredStatus,
|
||||
alloc.ClientStatus,
|
||||
formatUnixNanoTime(alloc.CreateTime),
|
||||
@@ -478,7 +478,7 @@ func formatAllocList(allocations []*api.Allocation, verbose bool, uuidLength int
|
||||
limit(alloc.ID, uuidLength),
|
||||
limit(alloc.NodeID, uuidLength),
|
||||
alloc.TaskGroup,
|
||||
getVersion(alloc.Job),
|
||||
alloc.Job.Version,
|
||||
alloc.DesiredStatus,
|
||||
alloc.ClientStatus,
|
||||
createTimePretty,
|
||||
|
||||
54
nomad/fsm.go
54
nomad/fsm.go
@@ -1135,11 +1135,6 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// COMPAT: Handle upgrade to v0.7.0
|
||||
if eval.Namespace == "" {
|
||||
eval.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
if err := restore.EvalRestore(eval); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1150,11 +1145,6 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// COMPAT: Handle upgrade to v0.7.0
|
||||
if alloc.Namespace == "" {
|
||||
alloc.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
if err := restore.AllocRestore(alloc); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1174,11 +1164,6 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// COMPAT: Handle upgrade to v0.7.0
|
||||
if launch.Namespace == "" {
|
||||
launch.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
if err := restore.PeriodicLaunchRestore(launch); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1189,11 +1174,6 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// COMPAT: Handle upgrade to v0.7.0
|
||||
if summary.Namespace == "" {
|
||||
summary.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
if err := restore.JobSummaryRestore(summary); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1213,11 +1193,6 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// COMPAT: Handle upgrade to v0.7.0
|
||||
if version.Namespace == "" {
|
||||
version.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
if err := restore.JobVersionRestore(version); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1228,11 +1203,6 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// COMPAT: Handle upgrade to v0.7.0
|
||||
if deployment.Namespace == "" {
|
||||
deployment.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
if err := restore.DeploymentRestore(deployment); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1280,30 +1250,6 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {
|
||||
|
||||
restore.Commit()
|
||||
|
||||
// Create Job Summaries
|
||||
// COMPAT 0.4 -> 0.4.1
|
||||
// We can remove this in 0.5. This exists so that the server creates job
|
||||
// summaries if they were not present previously. When users upgrade to 0.5
|
||||
// from 0.4.1, the snapshot will contain job summaries so it will be safe to
|
||||
// remove this block.
|
||||
index, err := newState.Index("job_summary")
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't fetch index of job summary table: %v", err)
|
||||
}
|
||||
|
||||
// If the index is 0 that means there is no job summary in the snapshot so
|
||||
// we will have to create them
|
||||
if index == 0 {
|
||||
// query the latest index
|
||||
latestIndex, err := newState.LatestIndex()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to query latest index: %v", index)
|
||||
}
|
||||
if err := newState.ReconcileJobSummaries(latestIndex); err != nil {
|
||||
return fmt.Errorf("error reconciling summaries: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// COMPAT Remove in 0.10
|
||||
// Clean up active deployments that do not have a job
|
||||
if err := n.failLeakedDeployments(newState); err != nil {
|
||||
|
||||
@@ -2701,47 +2701,6 @@ func TestFSM_SnapshotRestore_SchedulerConfiguration(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestFSM_SnapshotRestore_AddMissingSummary(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Add some state
|
||||
fsm := testFSM(t)
|
||||
state := fsm.State()
|
||||
|
||||
// make an allocation
|
||||
alloc := mock.Alloc()
|
||||
state.UpsertJob(1010, alloc.Job)
|
||||
state.UpsertAllocs(1011, []*structs.Allocation{alloc})
|
||||
|
||||
// Delete the summary
|
||||
state.DeleteJobSummary(1040, alloc.Namespace, alloc.Job.ID)
|
||||
|
||||
// Delete the index
|
||||
if err := state.RemoveIndex("job_summary"); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
fsm2 := testSnapshotRestore(t, fsm)
|
||||
state2 := fsm2.State()
|
||||
latestIndex, _ := state.LatestIndex()
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
out, _ := state2.JobSummaryByID(ws, alloc.Namespace, alloc.Job.ID)
|
||||
expected := structs.JobSummary{
|
||||
JobID: alloc.Job.ID,
|
||||
Namespace: alloc.Job.Namespace,
|
||||
Summary: map[string]structs.TaskGroupSummary{
|
||||
"web": {
|
||||
Starting: 1,
|
||||
},
|
||||
},
|
||||
CreateIndex: 1010,
|
||||
ModifyIndex: latestIndex,
|
||||
}
|
||||
if !reflect.DeepEqual(&expected, out) {
|
||||
t.Fatalf("expected: %#v, actual: %#v", &expected, out)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFSM_ReconcileSummaries(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Add some state
|
||||
|
||||
@@ -268,15 +268,6 @@ func (s *Server) establishLeadership(stopCh chan struct{}) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// COMPAT 0.4 - 0.4.1
|
||||
// Reconcile the summaries of the registered jobs. We reconcile summaries
|
||||
// only if the server is 0.4.1 since summaries are not present in 0.4 they
|
||||
// might be incorrect after upgrading to 0.4.1 the summaries might not be
|
||||
// correct
|
||||
if err := s.reconcileJobSummaries(); err != nil {
|
||||
return fmt.Errorf("unable to reconcile job summaries: %v", err)
|
||||
}
|
||||
|
||||
// Start replication of ACLs and Policies if they are enabled,
|
||||
// and we are not the authoritative region.
|
||||
if s.config.ACLEnabled && s.config.Region != s.config.AuthoritativeRegion {
|
||||
@@ -798,25 +789,6 @@ func (s *Server) reconcileMember(member serf.Member) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// reconcileJobSummaries reconciles the summaries of all the jobs registered in
|
||||
// the system
|
||||
// COMPAT 0.4 -> 0.4.1
|
||||
func (s *Server) reconcileJobSummaries() error {
|
||||
index, err := s.fsm.state.LatestIndex()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to read latest index: %v", err)
|
||||
}
|
||||
s.logger.Debug("leader reconciling job summaries", "index", index)
|
||||
|
||||
args := &structs.GenericResponse{}
|
||||
msg := structs.ReconcileJobSummariesRequestType | structs.IgnoreUnknownTypeFlag
|
||||
if _, _, err = s.raftApply(msg, args); err != nil {
|
||||
return fmt.Errorf("reconciliation of job summaries failed: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// addRaftPeer is used to add a new Raft peer when a Nomad server joins
|
||||
func (s *Server) addRaftPeer(m serf.Member, parts *serverParts) error {
|
||||
// Check for possibility of multiple bootstrap nodes
|
||||
|
||||
@@ -261,9 +261,6 @@ func (s *StateStore) UpsertPlanResults(index uint64, results *structs.ApplyPlanR
|
||||
s.upsertDeploymentUpdates(index, results.DeploymentUpdates, txn)
|
||||
}
|
||||
|
||||
// COMPAT: Nomad versions before 0.7.1 did not include the eval ID when
|
||||
// applying the plan. Thus while we are upgrading, we ignore updating the
|
||||
// modify index of evaluations from older plans.
|
||||
if results.EvalID != "" {
|
||||
// Update the modify index of the eval id
|
||||
if err := s.updateEvalModifyIndex(txn, index, results.EvalID); err != nil {
|
||||
@@ -353,11 +350,6 @@ func (s *StateStore) UpsertJobSummary(index uint64, jobSummary *structs.JobSumma
|
||||
txn := s.db.Txn(true)
|
||||
defer txn.Abort()
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if jobSummary.Namespace == "" {
|
||||
jobSummary.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// Check if the job summary already exists
|
||||
existing, err := txn.First("job_summary", "id", jobSummary.Namespace, jobSummary.JobID)
|
||||
if err != nil {
|
||||
@@ -393,11 +385,6 @@ func (s *StateStore) DeleteJobSummary(index uint64, namespace, id string) error
|
||||
txn := s.db.Txn(true)
|
||||
defer txn.Abort()
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if namespace == "" {
|
||||
namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// Delete the job summary
|
||||
if _, err := txn.DeleteAll("job_summary", "id", namespace, id); err != nil {
|
||||
return fmt.Errorf("deleting job summary failed: %v", err)
|
||||
@@ -428,11 +415,6 @@ func (s *StateStore) upsertDeploymentImpl(index uint64, deployment *structs.Depl
|
||||
return fmt.Errorf("deployment lookup failed: %v", err)
|
||||
}
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if deployment.Namespace == "" {
|
||||
deployment.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// Setup the indexes correctly
|
||||
if existing != nil {
|
||||
deployment.CreateIndex = existing.(*structs.Deployment).CreateIndex
|
||||
@@ -539,11 +521,6 @@ func (s *StateStore) deploymentByIDImpl(ws memdb.WatchSet, deploymentID string,
|
||||
func (s *StateStore) DeploymentsByJobID(ws memdb.WatchSet, namespace, jobID string, all bool) ([]*structs.Deployment, error) {
|
||||
txn := s.db.Txn(false)
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if namespace == "" {
|
||||
namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
var job *structs.Job
|
||||
// Read job from state store
|
||||
_, existing, err := txn.FirstWatch("jobs", "id", namespace, jobID)
|
||||
@@ -587,11 +564,6 @@ func (s *StateStore) DeploymentsByJobID(ws memdb.WatchSet, namespace, jobID stri
|
||||
func (s *StateStore) LatestDeploymentByJobID(ws memdb.WatchSet, namespace, jobID string) (*structs.Deployment, error) {
|
||||
txn := s.db.Txn(false)
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if namespace == "" {
|
||||
namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// Get an iterator over the deployments
|
||||
iter, err := txn.Get("deployment", "job", namespace, jobID)
|
||||
if err != nil {
|
||||
@@ -820,7 +792,7 @@ func (s *StateStore) updateNodeDrainImpl(txn *memdb.Txn, index uint64, nodeID st
|
||||
}
|
||||
|
||||
// Update the drain in the copy
|
||||
copyNode.Drain = drain != nil // COMPAT: Remove in Nomad 0.9
|
||||
copyNode.Drain = drain != nil // COMPAT: Remove in Nomad 0.10
|
||||
copyNode.DrainStrategy = drain
|
||||
if drain != nil {
|
||||
copyNode.SchedulingEligibility = structs.NodeSchedulingIneligible
|
||||
@@ -1025,11 +997,6 @@ func (s *StateStore) UpsertJobTxn(index uint64, job *structs.Job, txn Txn) error
|
||||
|
||||
// upsertJobImpl is the implementation for registering a job or updating a job definition
|
||||
func (s *StateStore) upsertJobImpl(index uint64, job *structs.Job, keepVersion bool, txn *memdb.Txn) error {
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if job.Namespace == "" {
|
||||
job.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// Assert the namespace exists
|
||||
if exists, err := s.namespaceExists(txn, job.Namespace); err != nil {
|
||||
return err
|
||||
@@ -1116,11 +1083,6 @@ func (s *StateStore) DeleteJob(index uint64, namespace, jobID string) error {
|
||||
// DeleteJobTxn is used to deregister a job, like DeleteJob,
|
||||
// but in a transaction. Useful for when making multiple modifications atomically
|
||||
func (s *StateStore) DeleteJobTxn(index uint64, namespace, jobID string, txn Txn) error {
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if namespace == "" {
|
||||
namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// Lookup the node
|
||||
existing, err := txn.First("jobs", "id", namespace, jobID)
|
||||
if err != nil {
|
||||
@@ -1164,11 +1126,6 @@ func (s *StateStore) DeleteJobTxn(index uint64, namespace, jobID string, txn Txn
|
||||
// Update the modify index
|
||||
pSummary.ModifyIndex = index
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if pSummary.Namespace == "" {
|
||||
pSummary.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// Insert the summary
|
||||
if err := txn.Insert("job_summary", pSummary); err != nil {
|
||||
return fmt.Errorf("job summary insert failed: %v", err)
|
||||
@@ -1207,11 +1164,6 @@ func (s *StateStore) DeleteJobTxn(index uint64, namespace, jobID string, txn Txn
|
||||
|
||||
// deleteJobVersions deletes all versions of the given job.
|
||||
func (s *StateStore) deleteJobVersions(index uint64, job *structs.Job, txn *memdb.Txn) error {
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if job.Namespace == "" {
|
||||
job.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
iter, err := txn.Get("job_version", "id_prefix", job.Namespace, job.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1252,11 +1204,6 @@ func (s *StateStore) deleteJobVersions(index uint64, job *structs.Job, txn *memd
|
||||
// upsertJobVersion inserts a job into its historic version table and limits the
|
||||
// number of job versions that are tracked.
|
||||
func (s *StateStore) upsertJobVersion(index uint64, job *structs.Job, txn *memdb.Txn) error {
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if job.Namespace == "" {
|
||||
job.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// Insert the job
|
||||
if err := txn.Insert("job_version", job); err != nil {
|
||||
return fmt.Errorf("failed to insert job into job_version table: %v", err)
|
||||
@@ -1313,11 +1260,6 @@ func (s *StateStore) JobByID(ws memdb.WatchSet, namespace, id string) (*structs.
|
||||
// JobByIDTxn is used to lookup a job by its ID, like JobByID. JobByID returns the job version
|
||||
// accessible through in the transaction
|
||||
func (s *StateStore) JobByIDTxn(ws memdb.WatchSet, namespace, id string, txn Txn) (*structs.Job, error) {
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if namespace == "" {
|
||||
namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
watchCh, existing, err := txn.FirstWatch("jobs", "id", namespace, id)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("job lookup failed: %v", err)
|
||||
@@ -1334,11 +1276,6 @@ func (s *StateStore) JobByIDTxn(ws memdb.WatchSet, namespace, id string, txn Txn
|
||||
func (s *StateStore) JobsByIDPrefix(ws memdb.WatchSet, namespace, id string) (memdb.ResultIterator, error) {
|
||||
txn := s.db.Txn(false)
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if namespace == "" {
|
||||
namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
iter, err := txn.Get("jobs", "id_prefix", namespace, id)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("job lookup failed: %v", err)
|
||||
@@ -1353,11 +1290,6 @@ func (s *StateStore) JobsByIDPrefix(ws memdb.WatchSet, namespace, id string) (me
|
||||
func (s *StateStore) JobVersionsByID(ws memdb.WatchSet, namespace, id string) ([]*structs.Job, error) {
|
||||
txn := s.db.Txn(false)
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if namespace == "" {
|
||||
namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
return s.jobVersionByID(txn, &ws, namespace, id)
|
||||
}
|
||||
|
||||
@@ -1365,11 +1297,6 @@ func (s *StateStore) JobVersionsByID(ws memdb.WatchSet, namespace, id string) ([
|
||||
// versions of a job and is called under an existing transaction. A watch set
|
||||
// can optionally be passed in to add the job histories to the watch set.
|
||||
func (s *StateStore) jobVersionByID(txn *memdb.Txn, ws *memdb.WatchSet, namespace, id string) ([]*structs.Job, error) {
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if namespace == "" {
|
||||
namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// Get all the historic jobs for this ID
|
||||
iter, err := txn.Get("job_version", "id_prefix", namespace, id)
|
||||
if err != nil {
|
||||
@@ -1407,10 +1334,6 @@ func (s *StateStore) jobVersionByID(txn *memdb.Txn, ws *memdb.WatchSet, namespac
|
||||
// JobByIDAndVersion returns the job identified by its ID and Version. The
|
||||
// passed watchset may be nil.
|
||||
func (s *StateStore) JobByIDAndVersion(ws memdb.WatchSet, namespace, id string, version uint64) (*structs.Job, error) {
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if namespace == "" {
|
||||
namespace = structs.DefaultNamespace
|
||||
}
|
||||
txn := s.db.Txn(false)
|
||||
return s.jobByIDAndVersionImpl(ws, namespace, id, version, txn)
|
||||
}
|
||||
@@ -1419,10 +1342,6 @@ func (s *StateStore) JobByIDAndVersion(ws memdb.WatchSet, namespace, id string,
|
||||
// passed watchset may be nil.
|
||||
func (s *StateStore) jobByIDAndVersionImpl(ws memdb.WatchSet, namespace, id string,
|
||||
version uint64, txn *memdb.Txn) (*structs.Job, error) {
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if namespace == "" {
|
||||
namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
watchCh, existing, err := txn.FirstWatch("job_version", "id", namespace, id, version)
|
||||
if err != nil {
|
||||
@@ -1537,11 +1456,6 @@ func (s *StateStore) JobsByGC(ws memdb.WatchSet, gc bool) (memdb.ResultIterator,
|
||||
func (s *StateStore) JobSummaryByID(ws memdb.WatchSet, namespace, jobID string) (*structs.JobSummary, error) {
|
||||
txn := s.db.Txn(false)
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if namespace == "" {
|
||||
namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
watchCh, existing, err := txn.FirstWatch("job_summary", "id", namespace, jobID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1576,11 +1490,6 @@ func (s *StateStore) JobSummaries(ws memdb.WatchSet) (memdb.ResultIterator, erro
|
||||
func (s *StateStore) JobSummaryByPrefix(ws memdb.WatchSet, namespace, id string) (memdb.ResultIterator, error) {
|
||||
txn := s.db.Txn(false)
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if namespace == "" {
|
||||
namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
iter, err := txn.Get("job_summary", "id_prefix", namespace, id)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("eval lookup failed: %v", err)
|
||||
@@ -1596,11 +1505,6 @@ func (s *StateStore) UpsertPeriodicLaunch(index uint64, launch *structs.Periodic
|
||||
txn := s.db.Txn(true)
|
||||
defer txn.Abort()
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if launch.Namespace == "" {
|
||||
launch.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// Check if the job already exists
|
||||
existing, err := txn.First("periodic_launch", "id", launch.Namespace, launch.ID)
|
||||
if err != nil {
|
||||
@@ -1643,11 +1547,6 @@ func (s *StateStore) DeletePeriodicLaunch(index uint64, namespace, jobID string)
|
||||
// DeletePeriodicLaunchTxn is used to delete the periodic launch, like DeletePeriodicLaunch
|
||||
// but in a transaction. Useful for when making multiple modifications atomically
|
||||
func (s *StateStore) DeletePeriodicLaunchTxn(index uint64, namespace, jobID string, txn Txn) error {
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if namespace == "" {
|
||||
namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// Lookup the launch
|
||||
existing, err := txn.First("periodic_launch", "id", namespace, jobID)
|
||||
if err != nil {
|
||||
@@ -1673,11 +1572,6 @@ func (s *StateStore) DeletePeriodicLaunchTxn(index uint64, namespace, jobID stri
|
||||
func (s *StateStore) PeriodicLaunchByID(ws memdb.WatchSet, namespace, id string) (*structs.PeriodicLaunch, error) {
|
||||
txn := s.db.Txn(false)
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if namespace == "" {
|
||||
namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
watchCh, existing, err := txn.FirstWatch("periodic_launch", "id", namespace, id)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("periodic launch lookup failed: %v", err)
|
||||
@@ -1751,11 +1645,6 @@ func (s *StateStore) nestedUpsertEval(txn *memdb.Txn, index uint64, eval *struct
|
||||
return fmt.Errorf("eval lookup failed: %v", err)
|
||||
}
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if eval.Namespace == "" {
|
||||
eval.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// Update the indexes
|
||||
if existing != nil {
|
||||
eval.CreateIndex = existing.(*structs.Evaluation).CreateIndex
|
||||
@@ -1787,11 +1676,6 @@ func (s *StateStore) nestedUpsertEval(txn *memdb.Txn, index uint64, eval *struct
|
||||
|
||||
// Insert the job summary
|
||||
if hasSummaryChanged {
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if js.Namespace == "" {
|
||||
js.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
js.ModifyIndex = index
|
||||
if err := txn.Insert("job_summary", js); err != nil {
|
||||
return fmt.Errorf("job summary insert failed: %v", err)
|
||||
@@ -1826,11 +1710,6 @@ func (s *StateStore) nestedUpsertEval(txn *memdb.Txn, index uint64, eval *struct
|
||||
newEval.StatusDescription = fmt.Sprintf("evaluation %q successful", newEval.ID)
|
||||
newEval.ModifyIndex = index
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if newEval.Namespace == "" {
|
||||
newEval.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
if err := txn.Insert("evals", newEval); err != nil {
|
||||
return fmt.Errorf("eval insert failed: %v", err)
|
||||
}
|
||||
@@ -1960,11 +1839,6 @@ func (s *StateStore) EvalsByIDPrefix(ws memdb.WatchSet, namespace, id string) (m
|
||||
|
||||
ws.Add(iter.WatchCh())
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if namespace == "" {
|
||||
namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// Wrap the iterator in a filter
|
||||
wrap := memdb.NewFilterIterator(iter, evalNamespaceFilter(namespace))
|
||||
return wrap, nil
|
||||
@@ -1987,11 +1861,6 @@ func evalNamespaceFilter(namespace string) func(interface{}) bool {
|
||||
func (s *StateStore) EvalsByJob(ws memdb.WatchSet, namespace, jobID string) ([]*structs.Evaluation, error) {
|
||||
txn := s.db.Txn(false)
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if namespace == "" {
|
||||
namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// Get an iterator over the node allocations
|
||||
iter, err := txn.Get("evals", "job_prefix", namespace, jobID)
|
||||
if err != nil {
|
||||
@@ -2092,11 +1961,6 @@ func (s *StateStore) nestedUpdateAllocFromClient(txn *memdb.Txn, index uint64, a
|
||||
// Copy everything from the existing allocation
|
||||
copyAlloc := exist.Copy()
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if copyAlloc.Namespace == "" {
|
||||
copyAlloc.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// Pull in anything the client is the authority on
|
||||
copyAlloc.ClientStatus = alloc.ClientStatus
|
||||
copyAlloc.ClientDescription = alloc.ClientDescription
|
||||
@@ -2230,11 +2094,6 @@ func (s *StateStore) upsertAllocsImpl(index uint64, allocs []*structs.Allocation
|
||||
}
|
||||
}
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if alloc.Namespace == "" {
|
||||
alloc.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// OPTIMIZATION:
|
||||
// These should be given a map of new to old allocation and the updates
|
||||
// should be one on all changes. The current implementation causes O(n)
|
||||
@@ -2459,11 +2318,6 @@ func (s *StateStore) AllocsByNodeTerminal(ws memdb.WatchSet, node string, termin
|
||||
func (s *StateStore) AllocsByJob(ws memdb.WatchSet, namespace, jobID string, all bool) ([]*structs.Allocation, error) {
|
||||
txn := s.db.Txn(false)
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if namespace == "" {
|
||||
namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// Get the job
|
||||
var job *structs.Job
|
||||
rawJob, err := txn.First("jobs", "id", namespace, jobID)
|
||||
@@ -2752,11 +2606,6 @@ func (s *StateStore) updateDeploymentStatusImpl(index uint64, u *structs.Deploym
|
||||
copy.StatusDescription = u.StatusDescription
|
||||
copy.ModifyIndex = index
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if copy.Namespace == "" {
|
||||
copy.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// Insert the deployment
|
||||
if err := txn.Insert("deployment", copy); err != nil {
|
||||
return err
|
||||
@@ -2783,11 +2632,6 @@ func (s *StateStore) UpdateJobStability(index uint64, namespace, jobID string, j
|
||||
txn := s.db.Txn(true)
|
||||
defer txn.Abort()
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if namespace == "" {
|
||||
namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
if err := s.updateJobStabilityImpl(index, namespace, jobID, jobVersion, stable, txn); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2798,11 +2642,6 @@ func (s *StateStore) UpdateJobStability(index uint64, namespace, jobID string, j
|
||||
|
||||
// updateJobStabilityImpl updates the stability of the given job and version
|
||||
func (s *StateStore) updateJobStabilityImpl(index uint64, namespace, jobID string, jobVersion uint64, stable bool, txn *memdb.Txn) error {
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if namespace == "" {
|
||||
namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// Get the job that is referenced
|
||||
job, err := s.jobByIDAndVersionImpl(nil, namespace, jobID, jobVersion, txn)
|
||||
if err != nil {
|
||||
@@ -3231,11 +3070,6 @@ func (s *StateStore) ReconcileJobSummaries(index uint64) error {
|
||||
summary.Summary[tg.Name] = structs.TaskGroupSummary{}
|
||||
}
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if job.Namespace == "" {
|
||||
job.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// Find all the allocations for the jobs
|
||||
iterAllocs, err := txn.Get("allocs", "job", job.Namespace, job.ID)
|
||||
if err != nil {
|
||||
@@ -3299,10 +3133,6 @@ func (s *StateStore) ReconcileJobSummaries(index uint64) error {
|
||||
func (s *StateStore) setJobStatuses(index uint64, txn *memdb.Txn,
|
||||
jobs map[structs.NamespacedID]string, evalDelete bool) error {
|
||||
for tuple, forceStatus := range jobs {
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if tuple.Namespace == "" {
|
||||
tuple.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
existing, err := txn.First("jobs", "id", tuple.Namespace, tuple.ID)
|
||||
if err != nil {
|
||||
@@ -3355,11 +3185,6 @@ func (s *StateStore) setJobStatus(index uint64, txn *memdb.Txn,
|
||||
updated.Status = newStatus
|
||||
updated.ModifyIndex = index
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if updated.Namespace == "" {
|
||||
updated.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// Insert the job
|
||||
if err := txn.Insert("jobs", updated); err != nil {
|
||||
return fmt.Errorf("job insert failed: %v", err)
|
||||
@@ -3385,11 +3210,6 @@ func (s *StateStore) setJobStatus(index uint64, txn *memdb.Txn,
|
||||
pSummary.Children = new(structs.JobChildrenSummary)
|
||||
}
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if pSummary.Namespace == "" {
|
||||
pSummary.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// Determine the transition and update the correct fields
|
||||
children := pSummary.Children
|
||||
|
||||
@@ -3436,11 +3256,6 @@ func (s *StateStore) setJobStatus(index uint64, txn *memdb.Txn,
|
||||
}
|
||||
|
||||
func (s *StateStore) getJobStatus(txn *memdb.Txn, job *structs.Job, evalDelete bool) (string, error) {
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if job.Namespace == "" {
|
||||
job.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// System, Periodic and Parameterized jobs are running until explicitly
|
||||
// stopped
|
||||
if job.Type == structs.JobTypeSystem || job.IsParameterized() || job.IsPeriodic() {
|
||||
@@ -3499,11 +3314,6 @@ func (s *StateStore) getJobStatus(txn *memdb.Txn, job *structs.Job, evalDelete b
|
||||
func (s *StateStore) updateSummaryWithJob(index uint64, job *structs.Job,
|
||||
txn *memdb.Txn) error {
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if job.Namespace == "" {
|
||||
job.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// Update the job summary
|
||||
summaryRaw, err := txn.First("job_summary", "id", job.Namespace, job.ID)
|
||||
if err != nil {
|
||||
@@ -3543,11 +3353,6 @@ func (s *StateStore) updateSummaryWithJob(index uint64, job *structs.Job,
|
||||
if hasSummaryChanged {
|
||||
summary.ModifyIndex = index
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if summary.Namespace == "" {
|
||||
summary.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// Update the indexes table for job summary
|
||||
if err := txn.Insert("index", &IndexEntry{"job_summary", index}); err != nil {
|
||||
return fmt.Errorf("index update failed: %v", err)
|
||||
@@ -3662,10 +3467,6 @@ func (s *StateStore) updateSummaryWithAlloc(index uint64, alloc *structs.Allocat
|
||||
if alloc.Job == nil {
|
||||
return nil
|
||||
}
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if alloc.Namespace == "" {
|
||||
alloc.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
summaryRaw, err := txn.First("job_summary", "id", alloc.Namespace, alloc.JobID)
|
||||
if err != nil {
|
||||
@@ -3761,11 +3562,6 @@ func (s *StateStore) updateSummaryWithAlloc(index uint64, alloc *structs.Allocat
|
||||
if summaryChanged {
|
||||
jobSummary.ModifyIndex = index
|
||||
|
||||
// COMPAT 0.7: Upgrade old objects that do not have namespaces
|
||||
if jobSummary.Namespace == "" {
|
||||
jobSummary.Namespace = structs.DefaultNamespace
|
||||
}
|
||||
|
||||
// Update the indexes table for job summary
|
||||
if err := txn.Insert("index", &IndexEntry{"job_summary", index}); err != nil {
|
||||
return fmt.Errorf("index update failed: %v", err)
|
||||
|
||||
@@ -282,10 +282,7 @@ func (e *EvalEligibility) GetClasses() map[string]bool {
|
||||
|
||||
// JobStatus returns the eligibility status of the job.
|
||||
func (e *EvalEligibility) JobStatus(class string) ComputedClassFeasibility {
|
||||
// COMPAT: Computed node class was introduced in 0.3. Clients running < 0.3
|
||||
// will not have a computed class. The safest value to return is the escaped
|
||||
// case, since it disables any optimization.
|
||||
if e.jobEscaped || class == "" {
|
||||
if e.jobEscaped {
|
||||
return EvalComputedClassEscaped
|
||||
}
|
||||
|
||||
@@ -307,13 +304,6 @@ func (e *EvalEligibility) SetJobEligibility(eligible bool, class string) {
|
||||
|
||||
// TaskGroupStatus returns the eligibility status of the task group.
|
||||
func (e *EvalEligibility) TaskGroupStatus(tg, class string) ComputedClassFeasibility {
|
||||
// COMPAT: Computed node class was introduced in 0.3. Clients running < 0.3
|
||||
// will not have a computed class. The safest value to return is the escaped
|
||||
// case, since it disables any optimization.
|
||||
if class == "" {
|
||||
return EvalComputedClassEscaped
|
||||
}
|
||||
|
||||
if escaped, ok := e.tgEscapedConstraints[tg]; ok {
|
||||
if escaped {
|
||||
return EvalComputedClassEscaped
|
||||
|
||||
@@ -168,11 +168,6 @@ func TestEvalEligibility_JobStatus(t *testing.T) {
|
||||
if status := e.JobStatus(cc); status != EvalComputedClassEligible {
|
||||
t.Fatalf("JobStatus() returned %v; want %v", status, EvalComputedClassEligible)
|
||||
}
|
||||
|
||||
// Check that if I pass an empty class it returns escaped
|
||||
if status := e.JobStatus(""); status != EvalComputedClassEscaped {
|
||||
t.Fatalf("JobStatus() returned %v; want %v", status, EvalComputedClassEscaped)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvalEligibility_TaskGroupStatus(t *testing.T) {
|
||||
@@ -195,11 +190,6 @@ func TestEvalEligibility_TaskGroupStatus(t *testing.T) {
|
||||
if status := e.TaskGroupStatus(tg, cc); status != EvalComputedClassEligible {
|
||||
t.Fatalf("TaskGroupStatus() returned %v; want %v", status, EvalComputedClassEligible)
|
||||
}
|
||||
|
||||
// Check that if I pass an empty class it returns escaped
|
||||
if status := e.TaskGroupStatus(tg, ""); status != EvalComputedClassEscaped {
|
||||
t.Fatalf("TaskGroupStatus() returned %v; want %v", status, EvalComputedClassEscaped)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvalEligibility_SetJob(t *testing.T) {
|
||||
|
||||
@@ -68,8 +68,13 @@
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
input {
|
||||
input,
|
||||
.input {
|
||||
width: 100%;
|
||||
padding: 0.4em 1.75em 0.4em 2.25em;
|
||||
|
||||
&.is-compact {
|
||||
padding: 0.25em 0.75em 0.25em 2.25em;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
/* eslint-env node */
|
||||
|
||||
const USE_MIRAGE = true;
|
||||
let USE_MIRAGE = true;
|
||||
|
||||
if (process.env.USE_MIRAGE) {
|
||||
USE_MIRAGE = process.env.USE_MIRAGE == 'true';
|
||||
}
|
||||
|
||||
module.exports = function(environment) {
|
||||
var ENV = {
|
||||
|
||||
10
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
10
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
@@ -16,6 +16,7 @@ import (
|
||||
//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
|
||||
//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
|
||||
//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
|
||||
//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult
|
||||
|
||||
type atomicBool int32
|
||||
|
||||
@@ -79,6 +80,7 @@ type win32File struct {
|
||||
wg sync.WaitGroup
|
||||
wgLock sync.RWMutex
|
||||
closing atomicBool
|
||||
socket bool
|
||||
readDeadline deadlineHandler
|
||||
writeDeadline deadlineHandler
|
||||
}
|
||||
@@ -190,6 +192,10 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
|
||||
if f.closing.isSet() {
|
||||
err = ErrFileClosed
|
||||
}
|
||||
} else if err != nil && f.socket {
|
||||
// err is from Win32. Query the overlapped structure to get the winsock error.
|
||||
var bytes, flags uint32
|
||||
err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags)
|
||||
}
|
||||
case <-timeout:
|
||||
cancelIoEx(f.handle, &c.o)
|
||||
@@ -265,6 +271,10 @@ func (f *win32File) Flush() error {
|
||||
return syscall.FlushFileBuffers(f.handle)
|
||||
}
|
||||
|
||||
func (f *win32File) Fd() uintptr {
|
||||
return uintptr(f.handle)
|
||||
}
|
||||
|
||||
func (d *deadlineHandler) set(deadline time.Time) error {
|
||||
d.setLock.Lock()
|
||||
defer d.setLock.Unlock()
|
||||
|
||||
9
vendor/github.com/Microsoft/go-winio/go.mod
generated
vendored
Normal file
9
vendor/github.com/Microsoft/go-winio/go.mod
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
module github.com/Microsoft/go-winio
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/sirupsen/logrus v1.4.1
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b
|
||||
)
|
||||
16
vendor/github.com/Microsoft/go-winio/go.sum
generated
vendored
Normal file
16
vendor/github.com/Microsoft/go-winio/go.sum
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
305
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
Normal file
305
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
Normal file
@@ -0,0 +1,305 @@
|
||||
package winio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Microsoft/go-winio/pkg/guid"
|
||||
)
|
||||
|
||||
//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind
|
||||
|
||||
const (
|
||||
afHvSock = 34 // AF_HYPERV
|
||||
|
||||
socketError = ^uintptr(0)
|
||||
)
|
||||
|
||||
// An HvsockAddr is an address for a AF_HYPERV socket.
|
||||
type HvsockAddr struct {
|
||||
VMID guid.GUID
|
||||
ServiceID guid.GUID
|
||||
}
|
||||
|
||||
type rawHvsockAddr struct {
|
||||
Family uint16
|
||||
_ uint16
|
||||
VMID guid.GUID
|
||||
ServiceID guid.GUID
|
||||
}
|
||||
|
||||
// Network returns the address's network name, "hvsock".
|
||||
func (addr *HvsockAddr) Network() string {
|
||||
return "hvsock"
|
||||
}
|
||||
|
||||
func (addr *HvsockAddr) String() string {
|
||||
return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID)
|
||||
}
|
||||
|
||||
// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port.
|
||||
func VsockServiceID(port uint32) guid.GUID {
|
||||
g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3")
|
||||
g.Data1 = port
|
||||
return g
|
||||
}
|
||||
|
||||
func (addr *HvsockAddr) raw() rawHvsockAddr {
|
||||
return rawHvsockAddr{
|
||||
Family: afHvSock,
|
||||
VMID: addr.VMID,
|
||||
ServiceID: addr.ServiceID,
|
||||
}
|
||||
}
|
||||
|
||||
func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) {
|
||||
addr.VMID = raw.VMID
|
||||
addr.ServiceID = raw.ServiceID
|
||||
}
|
||||
|
||||
// HvsockListener is a socket listener for the AF_HYPERV address family.
|
||||
type HvsockListener struct {
|
||||
sock *win32File
|
||||
addr HvsockAddr
|
||||
}
|
||||
|
||||
// HvsockConn is a connected socket of the AF_HYPERV address family.
|
||||
type HvsockConn struct {
|
||||
sock *win32File
|
||||
local, remote HvsockAddr
|
||||
}
|
||||
|
||||
func newHvSocket() (*win32File, error) {
|
||||
fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1)
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("socket", err)
|
||||
}
|
||||
f, err := makeWin32File(fd)
|
||||
if err != nil {
|
||||
syscall.Close(fd)
|
||||
return nil, err
|
||||
}
|
||||
f.socket = true
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// ListenHvsock listens for connections on the specified hvsock address.
|
||||
func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) {
|
||||
l := &HvsockListener{addr: *addr}
|
||||
sock, err := newHvSocket()
|
||||
if err != nil {
|
||||
return nil, l.opErr("listen", err)
|
||||
}
|
||||
sa := addr.raw()
|
||||
err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa)))
|
||||
if err != nil {
|
||||
return nil, l.opErr("listen", os.NewSyscallError("socket", err))
|
||||
}
|
||||
err = syscall.Listen(sock.handle, 16)
|
||||
if err != nil {
|
||||
return nil, l.opErr("listen", os.NewSyscallError("listen", err))
|
||||
}
|
||||
return &HvsockListener{sock: sock, addr: *addr}, nil
|
||||
}
|
||||
|
||||
func (l *HvsockListener) opErr(op string, err error) error {
|
||||
return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err}
|
||||
}
|
||||
|
||||
// Addr returns the listener's network address.
|
||||
func (l *HvsockListener) Addr() net.Addr {
|
||||
return &l.addr
|
||||
}
|
||||
|
||||
// Accept waits for the next connection and returns it.
|
||||
func (l *HvsockListener) Accept() (_ net.Conn, err error) {
|
||||
sock, err := newHvSocket()
|
||||
if err != nil {
|
||||
return nil, l.opErr("accept", err)
|
||||
}
|
||||
defer func() {
|
||||
if sock != nil {
|
||||
sock.Close()
|
||||
}
|
||||
}()
|
||||
c, err := l.sock.prepareIo()
|
||||
if err != nil {
|
||||
return nil, l.opErr("accept", err)
|
||||
}
|
||||
defer l.sock.wg.Done()
|
||||
|
||||
// AcceptEx, per documentation, requires an extra 16 bytes per address.
|
||||
const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{}))
|
||||
var addrbuf [addrlen * 2]byte
|
||||
|
||||
var bytes uint32
|
||||
err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o)
|
||||
_, err = l.sock.asyncIo(c, nil, bytes, err)
|
||||
if err != nil {
|
||||
return nil, l.opErr("accept", os.NewSyscallError("acceptex", err))
|
||||
}
|
||||
conn := &HvsockConn{
|
||||
sock: sock,
|
||||
}
|
||||
conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0])))
|
||||
conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen])))
|
||||
sock = nil
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// Close closes the listener, causing any pending Accept calls to fail.
|
||||
func (l *HvsockListener) Close() error {
|
||||
return l.sock.Close()
|
||||
}
|
||||
|
||||
/* Need to finish ConnectEx handling
|
||||
func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) {
|
||||
sock, err := newHvSocket()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if sock != nil {
|
||||
sock.Close()
|
||||
}
|
||||
}()
|
||||
c, err := sock.prepareIo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer sock.wg.Done()
|
||||
var bytes uint32
|
||||
err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o)
|
||||
_, err = sock.asyncIo(ctx, c, nil, bytes, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn := &HvsockConn{
|
||||
sock: sock,
|
||||
remote: *addr,
|
||||
}
|
||||
sock = nil
|
||||
return conn, nil
|
||||
}
|
||||
*/
|
||||
|
||||
func (conn *HvsockConn) opErr(op string, err error) error {
|
||||
return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err}
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) Read(b []byte) (int, error) {
|
||||
c, err := conn.sock.prepareIo()
|
||||
if err != nil {
|
||||
return 0, conn.opErr("read", err)
|
||||
}
|
||||
defer conn.sock.wg.Done()
|
||||
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
|
||||
var flags, bytes uint32
|
||||
err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil)
|
||||
n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err)
|
||||
if err != nil {
|
||||
if _, ok := err.(syscall.Errno); ok {
|
||||
err = os.NewSyscallError("wsarecv", err)
|
||||
}
|
||||
return 0, conn.opErr("read", err)
|
||||
} else if n == 0 {
|
||||
err = io.EOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) Write(b []byte) (int, error) {
|
||||
t := 0
|
||||
for len(b) != 0 {
|
||||
n, err := conn.write(b)
|
||||
if err != nil {
|
||||
return t + n, err
|
||||
}
|
||||
t += n
|
||||
b = b[n:]
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) write(b []byte) (int, error) {
|
||||
c, err := conn.sock.prepareIo()
|
||||
if err != nil {
|
||||
return 0, conn.opErr("write", err)
|
||||
}
|
||||
defer conn.sock.wg.Done()
|
||||
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
|
||||
var bytes uint32
|
||||
err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil)
|
||||
n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err)
|
||||
if err != nil {
|
||||
if _, ok := err.(syscall.Errno); ok {
|
||||
err = os.NewSyscallError("wsasend", err)
|
||||
}
|
||||
return 0, conn.opErr("write", err)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close closes the socket connection, failing any pending read or write calls.
|
||||
func (conn *HvsockConn) Close() error {
|
||||
return conn.sock.Close()
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) shutdown(how int) error {
|
||||
err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD)
|
||||
if err != nil {
|
||||
return os.NewSyscallError("shutdown", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseRead shuts down the read end of the socket.
|
||||
func (conn *HvsockConn) CloseRead() error {
|
||||
err := conn.shutdown(syscall.SHUT_RD)
|
||||
if err != nil {
|
||||
return conn.opErr("close", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseWrite shuts down the write end of the socket, notifying the other endpoint that
|
||||
// no more data will be written.
|
||||
func (conn *HvsockConn) CloseWrite() error {
|
||||
err := conn.shutdown(syscall.SHUT_WR)
|
||||
if err != nil {
|
||||
return conn.opErr("close", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LocalAddr returns the local address of the connection.
|
||||
func (conn *HvsockConn) LocalAddr() net.Addr {
|
||||
return &conn.local
|
||||
}
|
||||
|
||||
// RemoteAddr returns the remote address of the connection.
|
||||
func (conn *HvsockConn) RemoteAddr() net.Addr {
|
||||
return &conn.remote
|
||||
}
|
||||
|
||||
// SetDeadline implements the net.Conn SetDeadline method.
|
||||
func (conn *HvsockConn) SetDeadline(t time.Time) error {
|
||||
conn.SetReadDeadline(t)
|
||||
conn.SetWriteDeadline(t)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetReadDeadline implements the net.Conn SetReadDeadline method.
|
||||
func (conn *HvsockConn) SetReadDeadline(t time.Time) error {
|
||||
return conn.sock.SetReadDeadline(t)
|
||||
}
|
||||
|
||||
// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
|
||||
func (conn *HvsockConn) SetWriteDeadline(t time.Time) error {
|
||||
return conn.sock.SetWriteDeadline(t)
|
||||
}
|
||||
269
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
269
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
@@ -3,10 +3,13 @@
|
||||
package winio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
@@ -18,6 +21,48 @@ import (
|
||||
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
||||
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
||||
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
|
||||
//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile
|
||||
//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
|
||||
//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U
|
||||
//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl
|
||||
|
||||
type ioStatusBlock struct {
|
||||
Status, Information uintptr
|
||||
}
|
||||
|
||||
type objectAttributes struct {
|
||||
Length uintptr
|
||||
RootDirectory uintptr
|
||||
ObjectName *unicodeString
|
||||
Attributes uintptr
|
||||
SecurityDescriptor *securityDescriptor
|
||||
SecurityQoS uintptr
|
||||
}
|
||||
|
||||
type unicodeString struct {
|
||||
Length uint16
|
||||
MaximumLength uint16
|
||||
Buffer uintptr
|
||||
}
|
||||
|
||||
type securityDescriptor struct {
|
||||
Revision byte
|
||||
Sbz1 byte
|
||||
Control uint16
|
||||
Owner uintptr
|
||||
Group uintptr
|
||||
Sacl uintptr
|
||||
Dacl uintptr
|
||||
}
|
||||
|
||||
type ntstatus int32
|
||||
|
||||
func (status ntstatus) Err() error {
|
||||
if status >= 0 {
|
||||
return nil
|
||||
}
|
||||
return rtlNtStatusToDosError(status)
|
||||
}
|
||||
|
||||
const (
|
||||
cERROR_PIPE_BUSY = syscall.Errno(231)
|
||||
@@ -25,21 +70,20 @@ const (
|
||||
cERROR_PIPE_CONNECTED = syscall.Errno(535)
|
||||
cERROR_SEM_TIMEOUT = syscall.Errno(121)
|
||||
|
||||
cPIPE_ACCESS_DUPLEX = 0x3
|
||||
cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000
|
||||
cSECURITY_SQOS_PRESENT = 0x100000
|
||||
cSECURITY_ANONYMOUS = 0
|
||||
|
||||
cPIPE_REJECT_REMOTE_CLIENTS = 0x8
|
||||
|
||||
cPIPE_UNLIMITED_INSTANCES = 255
|
||||
|
||||
cNMPWAIT_USE_DEFAULT_WAIT = 0
|
||||
cNMPWAIT_NOWAIT = 1
|
||||
cSECURITY_SQOS_PRESENT = 0x100000
|
||||
cSECURITY_ANONYMOUS = 0
|
||||
|
||||
cPIPE_TYPE_MESSAGE = 4
|
||||
|
||||
cPIPE_READMODE_MESSAGE = 2
|
||||
|
||||
cFILE_OPEN = 1
|
||||
cFILE_CREATE = 2
|
||||
|
||||
cFILE_PIPE_MESSAGE_TYPE = 1
|
||||
cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2
|
||||
|
||||
cSE_DACL_PRESENT = 4
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -137,9 +181,30 @@ func (s pipeAddress) String() string {
|
||||
return string(s)
|
||||
}
|
||||
|
||||
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
|
||||
func tryDialPipe(ctx context.Context, path *string) (syscall.Handle, error) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return syscall.Handle(0), ctx.Err()
|
||||
default:
|
||||
h, err := createFile(*path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
||||
if err == nil {
|
||||
return h, nil
|
||||
}
|
||||
if err != cERROR_PIPE_BUSY {
|
||||
return h, &os.PathError{Err: err, Op: "open", Path: *path}
|
||||
}
|
||||
// Wait 10 msec and try again. This is a rather simplistic
|
||||
// view, as we always try each 10 milliseconds.
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DialPipe connects to a named pipe by path, timing out if the connection
|
||||
// takes longer than the specified duration. If timeout is nil, then we use
|
||||
// a default timeout of 5 seconds. (We do not use WaitNamedPipe.)
|
||||
// a default timeout of 2 seconds. (We do not use WaitNamedPipe.)
|
||||
func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
||||
var absTimeout time.Time
|
||||
if timeout != nil {
|
||||
@@ -147,23 +212,22 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
|
||||
} else {
|
||||
absTimeout = time.Now().Add(time.Second * 2)
|
||||
}
|
||||
ctx, _ := context.WithDeadline(context.Background(), absTimeout)
|
||||
conn, err := DialPipeContext(ctx, path)
|
||||
if err == context.DeadlineExceeded {
|
||||
return nil, ErrTimeout
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
|
||||
// DialPipeContext attempts to connect to a named pipe by `path` until `ctx`
|
||||
// cancellation or timeout.
|
||||
func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
|
||||
var err error
|
||||
var h syscall.Handle
|
||||
for {
|
||||
h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
||||
if err != cERROR_PIPE_BUSY {
|
||||
break
|
||||
}
|
||||
if time.Now().After(absTimeout) {
|
||||
return nil, ErrTimeout
|
||||
}
|
||||
|
||||
// Wait 10 msec and try again. This is a rather simplistic
|
||||
// view, as we always try each 10 milliseconds.
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
}
|
||||
h, err = tryDialPipe(ctx, &path)
|
||||
if err != nil {
|
||||
return nil, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var flags uint32
|
||||
@@ -194,43 +258,87 @@ type acceptResponse struct {
|
||||
}
|
||||
|
||||
type win32PipeListener struct {
|
||||
firstHandle syscall.Handle
|
||||
path string
|
||||
securityDescriptor []byte
|
||||
config PipeConfig
|
||||
acceptCh chan (chan acceptResponse)
|
||||
closeCh chan int
|
||||
doneCh chan int
|
||||
firstHandle syscall.Handle
|
||||
path string
|
||||
config PipeConfig
|
||||
acceptCh chan (chan acceptResponse)
|
||||
closeCh chan int
|
||||
doneCh chan int
|
||||
}
|
||||
|
||||
func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig, first bool) (syscall.Handle, error) {
|
||||
var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED
|
||||
if first {
|
||||
flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE
|
||||
}
|
||||
|
||||
var mode uint32 = cPIPE_REJECT_REMOTE_CLIENTS
|
||||
if c.MessageMode {
|
||||
mode |= cPIPE_TYPE_MESSAGE
|
||||
}
|
||||
|
||||
sa := &syscall.SecurityAttributes{}
|
||||
sa.Length = uint32(unsafe.Sizeof(*sa))
|
||||
if securityDescriptor != nil {
|
||||
len := uint32(len(securityDescriptor))
|
||||
sa.SecurityDescriptor = localAlloc(0, len)
|
||||
defer localFree(sa.SecurityDescriptor)
|
||||
copy((*[0xffff]byte)(unsafe.Pointer(sa.SecurityDescriptor))[:], securityDescriptor)
|
||||
}
|
||||
h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa)
|
||||
func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) {
|
||||
path16, err := syscall.UTF16FromString(path)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
}
|
||||
|
||||
var oa objectAttributes
|
||||
oa.Length = unsafe.Sizeof(oa)
|
||||
|
||||
var ntPath unicodeString
|
||||
if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil {
|
||||
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
}
|
||||
defer localFree(ntPath.Buffer)
|
||||
oa.ObjectName = &ntPath
|
||||
|
||||
// The security descriptor is only needed for the first pipe.
|
||||
if first {
|
||||
if sd != nil {
|
||||
len := uint32(len(sd))
|
||||
sdb := localAlloc(0, len)
|
||||
defer localFree(sdb)
|
||||
copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd)
|
||||
oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb))
|
||||
} else {
|
||||
// Construct the default named pipe security descriptor.
|
||||
var dacl uintptr
|
||||
if err := rtlDefaultNpAcl(&dacl).Err(); err != nil {
|
||||
return 0, fmt.Errorf("getting default named pipe ACL: %s", err)
|
||||
}
|
||||
defer localFree(dacl)
|
||||
|
||||
sdb := &securityDescriptor{
|
||||
Revision: 1,
|
||||
Control: cSE_DACL_PRESENT,
|
||||
Dacl: dacl,
|
||||
}
|
||||
oa.SecurityDescriptor = sdb
|
||||
}
|
||||
}
|
||||
|
||||
typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS)
|
||||
if c.MessageMode {
|
||||
typ |= cFILE_PIPE_MESSAGE_TYPE
|
||||
}
|
||||
|
||||
disposition := uint32(cFILE_OPEN)
|
||||
access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE)
|
||||
if first {
|
||||
disposition = cFILE_CREATE
|
||||
// By not asking for read or write access, the named pipe file system
|
||||
// will put this pipe into an initially disconnected state, blocking
|
||||
// client connections until the next call with first == false.
|
||||
access = syscall.SYNCHRONIZE
|
||||
}
|
||||
|
||||
timeout := int64(-50 * 10000) // 50ms
|
||||
|
||||
var (
|
||||
h syscall.Handle
|
||||
iosb ioStatusBlock
|
||||
)
|
||||
err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err()
|
||||
if err != nil {
|
||||
return 0, &os.PathError{Op: "open", Path: path, Err: err}
|
||||
}
|
||||
|
||||
runtime.KeepAlive(ntPath)
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (l *win32PipeListener) makeServerPipe() (*win32File, error) {
|
||||
h, err := makeServerPipeHandle(l.path, l.securityDescriptor, &l.config, false)
|
||||
h, err := makeServerPipeHandle(l.path, nil, &l.config, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -321,6 +429,28 @@ type PipeConfig struct {
|
||||
OutputBufferSize int32
|
||||
}
|
||||
|
||||
// ListenOnlyPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe.
|
||||
// The pipe must already exist.
|
||||
func ListenOnlyPipe(path string, c *PipeConfig) (net.Listener, error) {
|
||||
if c == nil {
|
||||
c = &PipeConfig{}
|
||||
}
|
||||
h, err := makeServerPipeHandle(path, nil, c, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l := &win32PipeListener{
|
||||
firstHandle: h,
|
||||
path: path,
|
||||
config: *c,
|
||||
acceptCh: make(chan (chan acceptResponse)),
|
||||
closeCh: make(chan int),
|
||||
doneCh: make(chan int),
|
||||
}
|
||||
go l.listenerRoutine()
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe.
|
||||
// The pipe must not already exist.
|
||||
func ListenPipe(path string, c *PipeConfig) (net.Listener, error) {
|
||||
@@ -341,32 +471,13 @@ func ListenPipe(path string, c *PipeConfig) (net.Listener, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Create a client handle and connect it. This results in the pipe
|
||||
// instance always existing, so that clients see ERROR_PIPE_BUSY
|
||||
// rather than ERROR_FILE_NOT_FOUND. This ties the first instance
|
||||
// up so that no other instances can be used. This would have been
|
||||
// cleaner if the Win32 API matched CreateFile with ConnectNamedPipe
|
||||
// instead of CreateNamedPipe. (Apparently created named pipes are
|
||||
// considered to be in listening state regardless of whether any
|
||||
// active calls to ConnectNamedPipe are outstanding.)
|
||||
h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
|
||||
if err != nil {
|
||||
syscall.Close(h)
|
||||
return nil, err
|
||||
}
|
||||
// Close the client handle. The server side of the instance will
|
||||
// still be busy, leading to ERROR_PIPE_BUSY instead of
|
||||
// ERROR_NOT_FOUND, as long as we don't close the server handle,
|
||||
// or disconnect the client with DisconnectNamedPipe.
|
||||
syscall.Close(h2)
|
||||
l := &win32PipeListener{
|
||||
firstHandle: h,
|
||||
path: path,
|
||||
securityDescriptor: sd,
|
||||
config: *c,
|
||||
acceptCh: make(chan (chan acceptResponse)),
|
||||
closeCh: make(chan int),
|
||||
doneCh: make(chan int),
|
||||
firstHandle: h,
|
||||
path: path,
|
||||
config: *c,
|
||||
acceptCh: make(chan (chan acceptResponse)),
|
||||
closeCh: make(chan int),
|
||||
doneCh: make(chan int),
|
||||
}
|
||||
go l.listenerRoutine()
|
||||
return l, nil
|
||||
|
||||
187
vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
Normal file
187
vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
Normal file
@@ -0,0 +1,187 @@
|
||||
// Package guid provides a GUID type. The backing structure for a GUID is
|
||||
// identical to that used by the golang.org/x/sys/windows GUID type.
|
||||
// There are two main binary encodings used for a GUID, the big-endian encoding,
|
||||
// and the Windows (mixed-endian) encoding. See here for details:
|
||||
// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding
|
||||
package guid
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// Variant specifies which GUID variant (or "type") of the GUID. It determines
|
||||
// how the entirety of the rest of the GUID is interpreted.
|
||||
type Variant uint8
|
||||
|
||||
// The variants specified by RFC 4122.
|
||||
const (
|
||||
// VariantUnknown specifies a GUID variant which does not conform to one of
|
||||
// the variant encodings specified in RFC 4122.
|
||||
VariantUnknown Variant = iota
|
||||
VariantNCS
|
||||
VariantRFC4122
|
||||
VariantMicrosoft
|
||||
VariantFuture
|
||||
)
|
||||
|
||||
// Version specifies how the bits in the GUID were generated. For instance, a
|
||||
// version 4 GUID is randomly generated, and a version 5 is generated from the
|
||||
// hash of an input string.
|
||||
type Version uint8
|
||||
|
||||
var _ = (encoding.TextMarshaler)(GUID{})
|
||||
var _ = (encoding.TextUnmarshaler)(&GUID{})
|
||||
|
||||
// GUID represents a GUID/UUID. It has the same structure as
|
||||
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
||||
// that type. It is defined as its own type so that stringification and
|
||||
// marshaling can be supported. The representation matches that used by native
|
||||
// Windows code.
|
||||
type GUID windows.GUID
|
||||
|
||||
// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122.
|
||||
func NewV4() (GUID, error) {
|
||||
var b [16]byte
|
||||
if _, err := rand.Read(b[:]); err != nil {
|
||||
return GUID{}, err
|
||||
}
|
||||
|
||||
b[6] = (b[6] & 0x0f) | 0x40 // Version 4 (randomly generated)
|
||||
b[8] = (b[8] & 0x3f) | 0x80 // RFC4122 variant
|
||||
|
||||
return FromArray(b), nil
|
||||
}
|
||||
|
||||
func fromArray(b [16]byte, order binary.ByteOrder) GUID {
|
||||
var g GUID
|
||||
g.Data1 = order.Uint32(b[0:4])
|
||||
g.Data2 = order.Uint16(b[4:6])
|
||||
g.Data3 = order.Uint16(b[6:8])
|
||||
copy(g.Data4[:], b[8:16])
|
||||
return g
|
||||
}
|
||||
|
||||
func (g GUID) toArray(order binary.ByteOrder) [16]byte {
|
||||
b := [16]byte{}
|
||||
order.PutUint32(b[0:4], g.Data1)
|
||||
order.PutUint16(b[4:6], g.Data2)
|
||||
order.PutUint16(b[6:8], g.Data3)
|
||||
copy(b[8:16], g.Data4[:])
|
||||
return b
|
||||
}
|
||||
|
||||
// FromArray constructs a GUID from a big-endian encoding array of 16 bytes.
|
||||
func FromArray(b [16]byte) GUID {
|
||||
return fromArray(b, binary.BigEndian)
|
||||
}
|
||||
|
||||
// ToArray returns an array of 16 bytes representing the GUID in big-endian
|
||||
// encoding.
|
||||
func (g GUID) ToArray() [16]byte {
|
||||
return g.toArray(binary.BigEndian)
|
||||
}
|
||||
|
||||
// FromWindowsArray constructs a GUID from a Windows encoding array of bytes.
|
||||
func FromWindowsArray(b [16]byte) GUID {
|
||||
return fromArray(b, binary.LittleEndian)
|
||||
}
|
||||
|
||||
// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows
|
||||
// encoding.
|
||||
func (g GUID) ToWindowsArray() [16]byte {
|
||||
return g.toArray(binary.LittleEndian)
|
||||
}
|
||||
|
||||
func (g GUID) String() string {
|
||||
return fmt.Sprintf(
|
||||
"%08x-%04x-%04x-%04x-%012x",
|
||||
g.Data1,
|
||||
g.Data2,
|
||||
g.Data3,
|
||||
g.Data4[:2],
|
||||
g.Data4[2:])
|
||||
}
|
||||
|
||||
// FromString parses a string containing a GUID and returns the GUID. The only
|
||||
// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`
|
||||
// format.
|
||||
func FromString(s string) (GUID, error) {
|
||||
if len(s) != 36 {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
|
||||
var g GUID
|
||||
|
||||
data1, err := strconv.ParseUint(s[0:8], 16, 32)
|
||||
if err != nil {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
g.Data1 = uint32(data1)
|
||||
|
||||
data2, err := strconv.ParseUint(s[9:13], 16, 16)
|
||||
if err != nil {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
g.Data2 = uint16(data2)
|
||||
|
||||
data3, err := strconv.ParseUint(s[14:18], 16, 16)
|
||||
if err != nil {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
g.Data3 = uint16(data3)
|
||||
|
||||
for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} {
|
||||
v, err := strconv.ParseUint(s[x:x+2], 16, 8)
|
||||
if err != nil {
|
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s)
|
||||
}
|
||||
g.Data4[i] = uint8(v)
|
||||
}
|
||||
|
||||
return g, nil
|
||||
}
|
||||
|
||||
// Variant returns the GUID variant, as defined in RFC 4122.
|
||||
func (g GUID) Variant() Variant {
|
||||
b := g.Data4[0]
|
||||
if b&0x80 == 0 {
|
||||
return VariantNCS
|
||||
} else if b&0xc0 == 0x80 {
|
||||
return VariantRFC4122
|
||||
} else if b&0xe0 == 0xc0 {
|
||||
return VariantMicrosoft
|
||||
} else if b&0xe0 == 0xe0 {
|
||||
return VariantFuture
|
||||
}
|
||||
return VariantUnknown
|
||||
}
|
||||
|
||||
// Version returns the GUID version, as defined in RFC 4122.
|
||||
func (g GUID) Version() Version {
|
||||
return Version((g.Data3 & 0xF000) >> 12)
|
||||
}
|
||||
|
||||
// MarshalText returns the textual representation of the GUID.
|
||||
func (g GUID) MarshalText() ([]byte, error) {
|
||||
return []byte(g.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText takes the textual representation of a GUID, and unmarhals it
|
||||
// into this GUID.
|
||||
func (g *GUID) UnmarshalText(text []byte) error {
|
||||
g2, err := FromString(string(text))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*g = g2
|
||||
return nil
|
||||
}
|
||||
2
vendor/github.com/Microsoft/go-winio/syscall.go
generated
vendored
2
vendor/github.com/Microsoft/go-winio/syscall.go
generated
vendored
@@ -1,3 +1,3 @@
|
||||
package winio
|
||||
|
||||
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go
|
||||
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go
|
||||
|
||||
88
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
88
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
|
||||
// Code generated by 'go generate'; DO NOT EDIT.
|
||||
|
||||
package winio
|
||||
|
||||
@@ -38,19 +38,25 @@ func errnoErr(e syscall.Errno) error {
|
||||
|
||||
var (
|
||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||
modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
|
||||
modntdll = windows.NewLazySystemDLL("ntdll.dll")
|
||||
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
||||
|
||||
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
|
||||
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
|
||||
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
|
||||
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
|
||||
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
|
||||
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
|
||||
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
|
||||
procCreateFileW = modkernel32.NewProc("CreateFileW")
|
||||
procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW")
|
||||
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
|
||||
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
|
||||
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
||||
procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
|
||||
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
|
||||
procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
|
||||
procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl")
|
||||
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
|
||||
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
|
||||
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
|
||||
@@ -69,6 +75,7 @@ var (
|
||||
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
|
||||
procBackupRead = modkernel32.NewProc("BackupRead")
|
||||
procBackupWrite = modkernel32.NewProc("BackupWrite")
|
||||
procbind = modws2_32.NewProc("bind")
|
||||
)
|
||||
|
||||
func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
|
||||
@@ -120,6 +127,24 @@ func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err erro
|
||||
return
|
||||
}
|
||||
|
||||
func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
|
||||
var _p0 uint32
|
||||
if wait {
|
||||
_p0 = 1
|
||||
} else {
|
||||
_p0 = 0
|
||||
}
|
||||
r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
|
||||
if r1 == 0 {
|
||||
@@ -176,27 +201,6 @@ func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityA
|
||||
return
|
||||
}
|
||||
|
||||
func waitNamedPipe(name string, timeout uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _waitNamedPipe(_p0, timeout)
|
||||
}
|
||||
|
||||
func _waitNamedPipe(name *uint16, timeout uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
|
||||
if r1 == 0 {
|
||||
@@ -227,6 +231,32 @@ func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
|
||||
return
|
||||
}
|
||||
|
||||
func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) {
|
||||
r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
|
||||
status = ntstatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func rtlNtStatusToDosError(status ntstatus) (winerr error) {
|
||||
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
|
||||
if r0 != 0 {
|
||||
winerr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) {
|
||||
r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0)
|
||||
status = ntstatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) {
|
||||
r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0)
|
||||
status = ntstatus(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(accountName)
|
||||
@@ -518,3 +548,15 @@ func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, p
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
|
||||
if r1 == socketError {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
3
vendor/vendor.json
vendored
3
vendor/vendor.json
vendored
@@ -8,7 +8,8 @@
|
||||
{"path":"github.com/Azure/go-ansiterm/winterm","checksumSHA1":"3/UphB+6Hbx5otA4PjFjvObT+L4=","revision":"d6e3b3328b783f23731bc4d058875b0371ff8109","revisionTime":"2017-09-29T23:40:23Z","version":"master","versionExact":"master"},
|
||||
{"path":"github.com/DataDog/datadog-go/statsd","checksumSHA1":"WvApwvvSe3i/3KO8300dyeFmkbI=","revision":"b10af4b12965a1ad08d164f57d14195b4140d8de","revisionTime":"2017-08-09T10:47:06Z"},
|
||||
{"path":"github.com/LK4D4/joincontext","checksumSHA1":"Jmf4AnrptgBdQ5TPBJ2M89nooIQ=","revision":"1724345da6d5bcc8b66fefb843b607ab918e175c","revisionTime":"2017-10-26T17:01:39Z"},
|
||||
{"path":"github.com/Microsoft/go-winio","checksumSHA1":"PbR6ZKoLeSZl8aXxDQqXih0wSgE=","revision":"97e4973ce50b2ff5f09635a57e2b88a037aae829","revisionTime":"2018-08-23T22:24:21Z"},
|
||||
{"path":"github.com/Microsoft/go-winio","checksumSHA1":"nEVw+80Junfo7iEY7ThP7Ci9Pyk=","origin":"github.com/endocrimes/go-winio","revision":"fb47a8b419480a700368c176bc1d5d7e3393b98d","revisionTime":"2019-06-20T17:03:19Z","version":"dani/safe-relisten","versionExact":"dani/safe-relisten"},
|
||||
{"path":"github.com/Microsoft/go-winio/pkg/guid","checksumSHA1":"/ykkyb7gmtZC68n7T24xwbmlCBc=","origin":"github.com/endocrimes/go-winio/pkg/guid","revision":"fb47a8b419480a700368c176bc1d5d7e3393b98d","revisionTime":"2019-06-20T17:03:19Z","version":"dani/safe-relisten","versionExact":"dani/safe-relisten"},
|
||||
{"path":"github.com/NVIDIA/gpu-monitoring-tools","checksumSHA1":"kF1vk+8Xvb3nGBiw9+qbUc0SZ4M=","revision":"86f2a9fac6c5b597dc494420005144b8ef7ec9fb","revisionTime":"2018-08-29T22:20:09Z"},
|
||||
{"path":"github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml","checksumSHA1":"P8FATSSgpe5A17FyPrGpsX95Xw8=","revision":"86f2a9fac6c5b597dc494420005144b8ef7ec9fb","revisionTime":"2018-08-29T22:20:09Z"},
|
||||
{"path":"github.com/NYTimes/gziphandler","checksumSHA1":"jktW57+vJsziNVPeXMCoujTzdW4=","revision":"97ae7fbaf81620fe97840685304a78a306a39c64","revisionTime":"2017-09-16T00:36:49Z"},
|
||||
|
||||
@@ -217,3 +217,22 @@
|
||||
/guides/operations/upgrade/ /guides/upgrade/index.html
|
||||
/guides/operations/upgrade/index.html /guides/upgrade/index.html
|
||||
/guides/operations/upgrade/upgrade-specific.html /guides/upgrade/upgrade-specific.html
|
||||
|
||||
# Enterprise
|
||||
|
||||
# Reorganized Enterprise into single pager
|
||||
/docs/enterprise/namespaces /docs/enterprise/index.html#namespaces
|
||||
/docs/enterprise/namespaces/ /docs/enterprise/index.html#namespaces
|
||||
/docs/enterprise/namespaces/index.html /docs/enterprise/index.html#namespaces
|
||||
/docs/enterprise/quotas /docs/enterprise/index.html#resource-quotas
|
||||
/docs/enterprise/quotas/ /docs/enterprise/index.html#resource-quotas
|
||||
/docs/enterprise/quotas/index.html /docs/enterprise/index.html#resource-quotas
|
||||
/docs/enterprise/preemption /docs/enterprise/index.html#preemption
|
||||
/docs/enterprise/preemption/ /docs/enterprise/index.html#preemption
|
||||
/docs/enterprise/preemption/index.html /docs/enterprise/index.html#preemption
|
||||
/docs/enterprise/sentinel /docs/enterprise/index.html#sentinel-policies
|
||||
/docs/enterprise/sentinel/ /docs/enterprise/index.html#sentinel-policies
|
||||
/docs/enterprise/sentinel/index.html /docs/enterprise/index.html#sentinel-policies
|
||||
/docs/enterprise/autopilot /docs/enterprise/index.html#nomad-enterprise-platform
|
||||
/docs/enterprise/autopilot/ /docs/enterprise/index.html#nomad-enterprise-platform
|
||||
/docs/enterprise/autopilot/index.html /docs/enterprise/index.html#nomad-enterprise-platform
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
---
|
||||
layout: "docs"
|
||||
page_title: "Nomad Enterprise Advanced Autopilot"
|
||||
sidebar_current: "docs-enterprise-autopilot"
|
||||
description: |-
|
||||
Nomad Enterprise supports Advanced Autopilot capabilities which enable fully
|
||||
automated server upgrades, higher throughput for reads and scheduling, and hot
|
||||
server failover on a per availability zone basis.
|
||||
---
|
||||
|
||||
# Nomad Enterprise Advanced Autopilot
|
||||
|
||||
[Nomad Enterprise](https://www.hashicorp.com/go/nomad-enterprise) supports Advanced Autopilot capabilities which enable fully
|
||||
automated server upgrades, higher throughput for reads and scheduling, and hot
|
||||
server failover on a per availability zone basis. See the sections below for
|
||||
additional details on each of these capabilities.
|
||||
|
||||
* **Automated Upgrades:** Advanced Autopilot enables an upgrade pattern that
|
||||
allows operators to deploy a complete cluster of new servers and then simply wait
|
||||
for the upgrade to complete. As the new servers join the cluster, server
|
||||
introduction logic checks the version of each Nomad server. If the version is
|
||||
higher than the version on the current set of voters, it will avoid promoting
|
||||
the new servers to voters until the number of new servers matches the number of
|
||||
existing servers at the previous version. Once the numbers match, Autopilot will
|
||||
begin to promote new servers and demote old ones.
|
||||
|
||||
* **Enhanced Read Scalability:** With Advanced Autopilot, servers can be
|
||||
explicitly marked as non-voters. Non-voters will receive the replication stream
|
||||
but will not take part in quorum (required by the leader before log entries can
|
||||
be committed). Adding explicit non-voters will scale reads and scheduling without
|
||||
impacting write latency.
|
||||
|
||||
* **Redundancy Zones:** Advanced Autopilot redundancy zones make it possible to
|
||||
have more servers than availability zones. For example, in an environment with
|
||||
three availability zones it's now possible to run one voter and one non-voter in
|
||||
each availability zone, for a total of six servers. If an availability zone is
|
||||
completely lost, only one voter will be lost, so the cluster remains available.
|
||||
If a voter is lost in an availability zone, Autopilot will promote the non-voter
|
||||
to voter automatically, putting the hot standby server into service quickly.
|
||||
|
||||
See the [Nomad Autopilot Guide](/guides/operations/autopilot.html)
|
||||
for a comprehensive overview of Nomad's open source and enterprise Autopilot features.
|
||||
|
||||
Click [here](https://www.hashicorp.com/go/nomad-enterprise) to set up a demo or
|
||||
request a trial of Nomad Enterprise.
|
||||
@@ -9,24 +9,55 @@ description: |-
|
||||
|
||||
# Nomad Enterprise
|
||||
|
||||
[Nomad Enterprise](https://www.hashicorp.com/go/nomad-enterprise) adds collaboration,
|
||||
operational, and governance capabilities to Nomad. Namespaces allow multiple
|
||||
teams to safely use a shared multi-region deployment. With Resource Quotas,
|
||||
operators can limit resource consumption across teams or projects. Sentinel
|
||||
policies enable enforcement of arbitrary fine-grained policies on job submission.
|
||||
Preemption capabilities enable the scheduler to temporarily evict lower priority
|
||||
allocations for service and batch jobs so that higher priority allocations can be placed.
|
||||
Advanced Autopilot capabilities enable automated server upgrades, enhanced scalability
|
||||
for reads and scheduling, and hot server failover on a per availability zone basis. See the
|
||||
links below for a detailed overview of each feature.
|
||||
Nomad Enterprise adds collaboration, operational, and governance capabilities to Nomad. Nomad Enterprise is available as a base Platform package with an optional Governance & Policy add-on module.
|
||||
|
||||
- [Namespaces](/docs/enterprise/namespaces/index.html)
|
||||
- [Resource Quotas](/docs/enterprise/quotas/index.html)
|
||||
- [Sentinel Policies](/docs/enterprise/sentinel/index.html)
|
||||
- [Preemption](/docs/enterprise/preemption/index.html)
|
||||
- [Advanced Autopilot](/docs/enterprise/autopilot/index.html)
|
||||
Please navigate the sub-sections for more information about each package and its features in detail.
|
||||
|
||||
Click [here](https://www.hashicorp.com/go/nomad-enterprise) to set up a demo or request a trial
|
||||
## Nomad Enterprise Platform
|
||||
Nomad Enterprise Platform enables operators to easily upgrade Nomad as well as enhances performance and availability through Advanced Autopilot features such as Automated Upgrades, Enhanced Read Scalability, and Redundancy Zones.
|
||||
|
||||
### Automated Upgrades
|
||||
Automated Upgrades allows operators to deploy a complete cluster of new servers and then simply wait for the upgrade to complete. As the new servers join the cluster, server logic checks the version of each Nomad server node. If the version is higher than the version on the current set of voters, it will avoid promoting the new servers to voters until the number of new servers matches the number of existing servers at the previous version. Once the numbers match, Nomad will begin to promote new servers and demote old ones.
|
||||
|
||||
See the [Autopilot - Upgrade Migrations](https://www.nomadproject.io/guides/operations/autopilot.html#upgrade-migrations) documentation for a thorough overview.
|
||||
|
||||
### Enhanced Read Scalability
|
||||
This feature enables an operator to introduce non-voting server nodes to a Nomad cluster. Non-voting servers will receive the replication stream but will not take part in quorum (required by the leader before log entries can be committed). Adding explicit non-voters will scale reads and scheduling without impacting write latency.
|
||||
|
||||
See the [Autopilot - Read Scalability](https://www.nomadproject.io/guides/operations/autopilot.html#server-read-and-scheduling-scaling) documentation for a thorough overview.
|
||||
|
||||
### Redundancy Zones
|
||||
Redundancy Zones enables an operator to deploy a non-voting server as a hot standby server on a per availability zone basis. For example, in an environment with three availability zones an operator can run one voter and one non-voter in each availability zone, for a total of six servers. If an availability zone is completely lost, only one voter will be lost, so the cluster remains available. If a voter is lost in an availability zone, Nomad will promote the non-voter to a voter automatically, putting the hot standby server into service quickly.
|
||||
|
||||
See the [Autopilot - Redundancy Zones](https://www.nomadproject.io/guides/operations/autopilot.html#redundancy-zones) documentation for a thorough overview.
|
||||
|
||||
## Governance & Policy
|
||||
Governance & Policy features are part of an add-on module that enables an organization to securely operate Nomad at scale across multiple teams through features such as Namespaces, Resource Quotas, Sentinel Policies, and Preemption.
|
||||
|
||||
### Namespaces
|
||||
Namespaces enable multiple teams to safely use a shared multi-region Nomad environment and reduce cluster fleet size. In Nomad Enterprise, a shared cluster can be partitioned into multiple namespaces which allow jobs and their associated objects to be isolated from each other and other users of the cluster.
|
||||
|
||||
Namespaces enhance the usability of a shared cluster by isolating teams from the jobs of others, by providing fine grain access control to jobs when coupled with ACLs, and by preventing bad actors from negatively impacting the whole cluster.
|
||||
|
||||
See the [Namespaces Guide](https://www.nomadproject.io/docs/enterprise/namespaces/index.html) for a thorough overview.
|
||||
|
||||
### Resource Quotas
|
||||
Resource Quotas enable an operator to limit resource consumption across teams or projects to reduce waste and align budgets. In Nomad Enterprise, operators can define quota specifications and apply them to namespaces. When a quota is attached to a namespace, the jobs within the namespace may not consume more resources than the quota specification allows.
|
||||
|
||||
This allows operators to partition a shared cluster and ensure that no single actor can consume the whole resources of the cluster.
|
||||
|
||||
See the [Resource Quotas Guide](https://www.nomadproject.io/docs/enterprise/quotas/index.html) for a thorough overview.
|
||||
|
||||
### Sentinel Policies
|
||||
In Nomad Enterprise, operators can create Sentinel policies for fine-grained policy enforcement. Sentinel policies build on top of the ACL system and allow operators to define policies such as disallowing jobs to be submitted to production on Fridays or only allowing users to run jobs that use pre-authorized Docker images. Sentinel policies are defined as code, giving operators considerable flexibility to meet compliance requirements.
|
||||
|
||||
See the [Sentinel Policies Guide](https://www.nomadproject.io/guides/governance-and-policy/sentinel/sentinel-policy.html) for a thorough overview.
|
||||
|
||||
### Preemption
|
||||
When a Nomad cluster is at capacity for a given set of placement constraints, any allocations that result from a newly scheduled service or batch job will remain in the pending state until sufficient resources become available - regardless of the defined priority.
|
||||
|
||||
Preemption enables Nomad's scheduler to automatically evict lower priority allocations of service and batch jobs so that allocations from higher priority jobs can be placed. This behavior ensures that critical workloads can run when resources are limited or when partial outages require workloads to be rescheduled across a smaller set of client nodes.
|
||||
|
||||
## Try Nomad Enterprise
|
||||
Click [here](https://www.hashicorp.com/go/nomad-enterprise) to set up a demo or request a trial
|
||||
of Nomad Enterprise.
|
||||
|
||||
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
---
|
||||
layout: "docs"
|
||||
page_title: "Nomad Enterprise Namespaces"
|
||||
sidebar_current: "docs-enterprise-namespaces"
|
||||
description: |-
|
||||
Nomad Enterprise provides support for namespaces, which allows jobs and their
|
||||
associated objects to be segmented from each other and other users of the
|
||||
cluster.
|
||||
---
|
||||
|
||||
# Nomad Enterprise Namespaces
|
||||
|
||||
In [Nomad Enterprise](https://www.hashicorp.com/go/nomad-enterprise), a shared
|
||||
cluster can be partitioned into [namespaces](/guides/governance-and-policy/namespaces.html) which allows
|
||||
jobs and their associated objects to be isolated from each other and other users
|
||||
of the cluster.
|
||||
|
||||
Namespaces enhance the usability of a shared cluster by isolating teams from the
|
||||
jobs of others, provide fine grain access control to jobs when coupled with
|
||||
[ACLs](/guides/security/acl.html), and can prevent bad actors from negatively impacting
|
||||
the whole cluster when used in conjunction with
|
||||
[resource quotas](/guides/governance-and-policy/quotas.html). See the
|
||||
[Namespaces Guide](/guides/governance-and-policy/namespaces.html) for a thorough overview.
|
||||
|
||||
Click [here](https://www.hashicorp.com/go/nomad-enterprise) to set up a demo or
|
||||
request a trial of Nomad Enterprise.
|
||||
@@ -1,28 +0,0 @@
|
||||
---
|
||||
layout: "docs"
|
||||
page_title: "Nomad Enterprise Preemption"
|
||||
sidebar_current: "docs-enterprise-preemption"
|
||||
description: |-
|
||||
Nomad Enterprise preemption capabilities enable the scheduler to temporarily
|
||||
evict lower priority allocations for service and batch jobs so that
|
||||
higher priority allocations can be placed.
|
||||
---
|
||||
|
||||
# Nomad Enterprise Preemption
|
||||
|
||||
When a Nomad cluster is at capacity for a given set of placement constraints, any allocations
|
||||
that result from a newly scheduled service or batch job will remain in the pending state until
|
||||
sufficient resources become available - regardless of the defined priority.
|
||||
|
||||
[Preemption](/docs/internals/scheduling/preemption.html) capabilities in
|
||||
[Nomad Enterprise](https://www.hashicorp.com/go/nomad-enterprise) enable the scheduler to temporarily
|
||||
evict lower [priority](/docs/job-specification/job.html#priority) allocations from service and
|
||||
batch jobs so that the allocations from higher priority jobs can be placed. This behavior
|
||||
ensures that critical workloads can run when resources are limited or when partial outages require
|
||||
workloads to be rescheduled across a smaller set of client nodes.
|
||||
|
||||
See the [Preemption internals documentation](/docs/internals/scheduling/preemption.html) for a
|
||||
more detailed overview. Preemption for service and batch jobs can be enabled using the [scheduler config API endpoint](/api/operator.html#update-scheduler-configuration).
|
||||
|
||||
Click [here](https://www.hashicorp.com/go/nomad-enterprise) to set up a demo or
|
||||
request a trial of Nomad Enterprise.
|
||||
@@ -1,23 +0,0 @@
|
||||
---
|
||||
layout: "docs"
|
||||
page_title: "Nomad Enterprise Resource Quotas"
|
||||
sidebar_current: "docs-enterprise-quotas"
|
||||
description: |-
|
||||
Nomad Enterprise provides support for applying resource quotas to namespaces
|
||||
which restricts the overall resources that jobs within the namespace are
|
||||
allowed to consume.
|
||||
---
|
||||
|
||||
# Nomad Enterprise Resource Quotas
|
||||
|
||||
In [Nomad Enterprise](https://www.hashicorp.com/go/nomad-enterprise), operators can
|
||||
define [quota specifications](/guides/governance-and-policy/quotas.html) and apply them to namespaces.
|
||||
When a quota is attached to a namespace, the jobs within the namespace may not
|
||||
consume more resources than the quota specification allows.
|
||||
|
||||
This allows operators to partition a shared cluster and ensure that no single
|
||||
actor can consume the whole resources of the cluster. See the
|
||||
[Resource Quotas Guide](/guides/governance-and-policy/quotas.html) for more details.
|
||||
|
||||
Click [here](https://www.hashicorp.com/go/nomad-enterprise) to set up a demo or
|
||||
request a trial of Nomad Enterprise.
|
||||
@@ -1,36 +0,0 @@
|
||||
---
|
||||
layout: "docs"
|
||||
page_title: "Nomad Enterprise Sentinel Policy Enforcement"
|
||||
sidebar_current: "docs-enterprise-sentinel"
|
||||
description: |-
|
||||
Nomad Enterprise provides support for policy enforcement using Sentinel.
|
||||
---
|
||||
|
||||
# Nomad Enterprise Sentinel Policy Enforcement
|
||||
|
||||
In [Nomad Enterprise](https://www.hashicorp.com/go/nomad-enterprise), operators can
|
||||
create [Sentinel policies](/guides/governance-and-policy/sentinel/sentinel-policy.html) for fine-grained policy
|
||||
enforcement. Sentinel policies build on top of the ACL system and allow operators to define
|
||||
policies such as disallowing jobs to be submitted to production on
|
||||
Fridays. These extremely rich policies are defined as code. For example, to
|
||||
restrict jobs to only using the Docker driver, the operator would define and apply
|
||||
the following policy:
|
||||
|
||||
```
|
||||
# Only allows Docker based tasks
|
||||
main = rule { all_drivers_docker }
|
||||
|
||||
# all_drivers_docker checks that all the drivers in use are Docker
|
||||
all_drivers_docker = rule {
|
||||
all job.task_groups as tg {
|
||||
all tg.tasks as task {
|
||||
task.driver is "docker"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
See the [Sentinel Policies Guide](/guides/governance-and-policy/sentinel/sentinel-policy.html) for additional details and examples.
|
||||
|
||||
Click [here](https://www.hashicorp.com/go/nomad-enterprise) to set up a demo or
|
||||
request a trial of Nomad Enterprise.
|
||||
@@ -8,6 +8,7 @@ description: |-
|
||||
|
||||
# Nomad Operations
|
||||
|
||||
The Nomad Operations guides section provides best practices and guidance for
|
||||
operating Nomad in a real-world production setting. Please navigate the
|
||||
appropriate sub-sections for more information.
|
||||
The Nomad Operations guides section provides best practices and guidance for
|
||||
operating Nomad in a real-world production setting.
|
||||
|
||||
Please navigate the appropriate sub-sections for more information.
|
||||
|
||||
@@ -529,20 +529,26 @@
|
||||
<li<%= sidebar_current("docs-enterprise") %>>
|
||||
<a href="/docs/enterprise/index.html">Nomad Enterprise</a>
|
||||
<ul class="nav">
|
||||
<li<%= sidebar_current("docs-enterprise-upgrades") %>>
|
||||
<a href="/docs/enterprise/index.html#automated-upgrades">Automated Upgrades</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-enterprise-scalability") %>>
|
||||
<a href="/docs/enterprise/index.html#enhanced-read-scalability">Enhanced Read Scalability</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-enterprise-redundancy") %>>
|
||||
<a href="/docs/enterprise/index.html#redundancy-zones">Redundancy Zones</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-enterprise-namespaces") %>>
|
||||
<a href="/docs/enterprise/namespaces/index.html">Namespaces</a>
|
||||
<a href="/docs/enterprise/index.html#namespaces">Namespaces</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-enterprise-quotas") %>>
|
||||
<a href="/docs/enterprise/quotas/index.html">Resource Quotas</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-enterprise-sentinel") %>>
|
||||
<a href="/docs/enterprise/sentinel/index.html">Sentinel Policies</a>
|
||||
<a href="/docs/enterprise/index.html#resource-quotas">Resource Quotas</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-enterprise-preemption") %>>
|
||||
<a href="/docs/enterprise/preemption/index.html">Preemption</a>
|
||||
<a href="/docs/enterprise/index.html#preemption">Preemption</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-enterprise-autopilot") %>>
|
||||
<a href="/docs/enterprise/autopilot/index.html">Advanced Autopilot</a>
|
||||
<li<%= sidebar_current("docs-enterprise-sentinel") %>>
|
||||
<a href="/docs/enterprise/index.html#sentinel-policies">Sentinel Policies</a>
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
|
||||
Reference in New Issue
Block a user