mirror of
https://github.com/kemko/nomad.git
synced 2026-01-06 18:35:44 +03:00
Merge branch 'master' into b-drain-batch
This commit is contained in:
@@ -48,7 +48,7 @@ IMPROVEMENTS:
|
||||
* driver/lxc: Add volumes config to LXC driver [[GH-3687](https://github.com/hashicorp/nomad/issues/3687)]
|
||||
* driver/rkt: Allow overriding group [[GH-3990](https://github.com/hashicorp/nomad/issues/3990)]
|
||||
* telemetry: Support DataDog tags [[GH-3839](https://github.com/hashicorp/nomad/issues/3839)]
|
||||
* vault: Allow Nomad to create orphaned tokens for allocations [[GH-3922](https://github.com/hashicorp/nomad/issues/3922)]
|
||||
* vault: Allow Nomad to create orphaned tokens for allocations [[GH-3992](https://github.com/hashicorp/nomad/issues/3992)]
|
||||
|
||||
BUG FIXES:
|
||||
* core: Fix search endpoint forwarding for multi-region clusters [[GH-3680](https://github.com/hashicorp/nomad/issues/3680)]
|
||||
|
||||
@@ -39,7 +39,7 @@ func (r *AllocRunner) watchHealth(ctx context.Context) {
|
||||
}
|
||||
|
||||
// No need to watch health as it's already set
|
||||
if alloc.DeploymentStatus.IsHealthy() || alloc.DeploymentStatus.IsUnhealthy() {
|
||||
if alloc.DeploymentStatus.HasHealth() {
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -136,7 +136,7 @@ func TestAllocRunner_DeploymentHealth_Unhealthy_BadStart(t *testing.T) {
|
||||
if last == nil {
|
||||
return false, fmt.Errorf("No updates")
|
||||
}
|
||||
if last.DeploymentStatus == nil || last.DeploymentStatus.Healthy == nil {
|
||||
if !last.DeploymentStatus.HasHealth() {
|
||||
return false, fmt.Errorf("want deployment status unhealthy; got unset")
|
||||
} else if *last.DeploymentStatus.Healthy {
|
||||
return false, fmt.Errorf("want deployment status unhealthy; got healthy")
|
||||
@@ -186,7 +186,7 @@ func TestAllocRunner_DeploymentHealth_Unhealthy_Deadline(t *testing.T) {
|
||||
if last == nil {
|
||||
return false, fmt.Errorf("No updates")
|
||||
}
|
||||
if last.DeploymentStatus == nil || last.DeploymentStatus.Healthy == nil {
|
||||
if !last.DeploymentStatus.HasHealth() {
|
||||
return false, fmt.Errorf("want deployment status unhealthy; got unset")
|
||||
} else if *last.DeploymentStatus.Healthy {
|
||||
return false, fmt.Errorf("want deployment status unhealthy; got healthy")
|
||||
@@ -240,7 +240,7 @@ func TestAllocRunner_DeploymentHealth_Healthy_NoChecks(t *testing.T) {
|
||||
if last == nil {
|
||||
return false, fmt.Errorf("No updates")
|
||||
}
|
||||
if last.DeploymentStatus == nil || last.DeploymentStatus.Healthy == nil {
|
||||
if !last.DeploymentStatus.HasHealth() {
|
||||
return false, fmt.Errorf("want deployment status unhealthy; got unset")
|
||||
} else if !*last.DeploymentStatus.Healthy {
|
||||
return false, fmt.Errorf("want deployment status healthy; got unhealthy")
|
||||
@@ -330,7 +330,7 @@ func TestAllocRunner_DeploymentHealth_Healthy_Checks(t *testing.T) {
|
||||
if last == nil {
|
||||
return false, fmt.Errorf("No updates")
|
||||
}
|
||||
if last.DeploymentStatus == nil || last.DeploymentStatus.Healthy == nil {
|
||||
if !last.DeploymentStatus.HasHealth() {
|
||||
return false, fmt.Errorf("want deployment status unhealthy; got unset")
|
||||
} else if !*last.DeploymentStatus.Healthy {
|
||||
return false, fmt.Errorf("want deployment status healthy; got unhealthy")
|
||||
@@ -396,7 +396,7 @@ func TestAllocRunner_DeploymentHealth_Unhealthy_Checks(t *testing.T) {
|
||||
if last == nil {
|
||||
return false, fmt.Errorf("No updates")
|
||||
}
|
||||
if last.DeploymentStatus == nil || last.DeploymentStatus.Healthy == nil {
|
||||
if !last.DeploymentStatus.HasHealth() {
|
||||
return false, fmt.Errorf("want deployment status unhealthy; got unset")
|
||||
} else if *last.DeploymentStatus.Healthy {
|
||||
return false, fmt.Errorf("want deployment status unhealthy; got healthy")
|
||||
@@ -443,7 +443,7 @@ func TestAllocRunner_DeploymentHealth_Healthy_UpdatedDeployment(t *testing.T) {
|
||||
if last == nil {
|
||||
return false, fmt.Errorf("No updates")
|
||||
}
|
||||
if last.DeploymentStatus == nil || last.DeploymentStatus.Healthy == nil {
|
||||
if !last.DeploymentStatus.HasHealth() {
|
||||
return false, fmt.Errorf("want deployment status unhealthy; got unset")
|
||||
} else if !*last.DeploymentStatus.Healthy {
|
||||
return false, fmt.Errorf("want deployment status healthy; got unhealthy")
|
||||
@@ -464,7 +464,7 @@ func TestAllocRunner_DeploymentHealth_Healthy_UpdatedDeployment(t *testing.T) {
|
||||
if newCount <= oldCount {
|
||||
return false, fmt.Errorf("No new updates")
|
||||
}
|
||||
if last.DeploymentStatus == nil || last.DeploymentStatus.Healthy == nil {
|
||||
if !last.DeploymentStatus.HasHealth() {
|
||||
return false, fmt.Errorf("want deployment status unhealthy; got unset")
|
||||
} else if !*last.DeploymentStatus.Healthy {
|
||||
return false, fmt.Errorf("want deployment status healthy; got unhealthy")
|
||||
@@ -505,7 +505,7 @@ func TestAllocRunner_DeploymentHealth_Healthy_Migration(t *testing.T) {
|
||||
if last == nil {
|
||||
return false, fmt.Errorf("No updates")
|
||||
}
|
||||
if last.DeploymentStatus == nil || last.DeploymentStatus.Healthy == nil {
|
||||
if !last.DeploymentStatus.HasHealth() {
|
||||
return false, fmt.Errorf("want deployment status unhealthy; got unset")
|
||||
} else if !*last.DeploymentStatus.Healthy {
|
||||
return false, fmt.Errorf("want deployment status healthy; got unhealthy")
|
||||
|
||||
@@ -430,8 +430,8 @@ func (c *NodeStatusCommand) outputNodeEvent(events []*api.NodeEvent) {
|
||||
|
||||
for i, event := range events {
|
||||
timestamp := formatTime(event.Timestamp)
|
||||
subsystem := event.Subsystem
|
||||
msg := formatEventMessage(event.Message, event.Details["driver"])
|
||||
subsystem := formatEventSubsystem(event.Subsystem, event.Details["driver"])
|
||||
msg := event.Message
|
||||
if c.verbose {
|
||||
details := formatEventDetails(event.Details)
|
||||
nodeEvents[size-i] = fmt.Sprintf("%s|%s|%s|%s", timestamp, subsystem, msg, details)
|
||||
@@ -442,12 +442,14 @@ func (c *NodeStatusCommand) outputNodeEvent(events []*api.NodeEvent) {
|
||||
c.Ui.Output(formatList(nodeEvents))
|
||||
}
|
||||
|
||||
func formatEventMessage(message, driverName string) string {
|
||||
func formatEventSubsystem(subsystem, driverName string) string {
|
||||
if driverName == "" {
|
||||
return message
|
||||
return subsystem
|
||||
}
|
||||
|
||||
return fmt.Sprintf("Driver: %s, %s", driverName, message)
|
||||
// If this event is for a driver, append the driver name to make the message
|
||||
// clearer
|
||||
return fmt.Sprintf("Driver: %s", driverName)
|
||||
}
|
||||
|
||||
func formatEventDetails(details map[string]string) string {
|
||||
|
||||
@@ -10,14 +10,14 @@ job "demo2" {
|
||||
|
||||
config {
|
||||
command = "bash"
|
||||
args = ["-c", "sleep 5000"]
|
||||
args = ["-c", "sleep 7000"]
|
||||
}
|
||||
}
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
min_healthy_time = "5s"
|
||||
healthy_deadline = "10m"
|
||||
min_healthy_time = "1s"
|
||||
healthy_deadline = "1m"
|
||||
auto_revert = false
|
||||
}
|
||||
|
||||
@@ -28,8 +28,6 @@ job "demo2" {
|
||||
|
||||
reschedule {
|
||||
unlimited = "true"
|
||||
|
||||
# attempts = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,14 +10,14 @@ job "demo3" {
|
||||
|
||||
config {
|
||||
command = "bash"
|
||||
args = ["-c", "ssleep 5000"]
|
||||
args = ["-c", "sleep 5000"]
|
||||
}
|
||||
}
|
||||
|
||||
update {
|
||||
max_parallel = 1
|
||||
min_healthy_time = "5s"
|
||||
healthy_deadline = "10m"
|
||||
min_healthy_time = "1s"
|
||||
healthy_deadline = "1m"
|
||||
auto_revert = true
|
||||
}
|
||||
|
||||
|
||||
@@ -239,6 +239,39 @@ var _ = Describe("Server Side Restart Tests", func() {
|
||||
|
||||
})
|
||||
|
||||
Context("Reschedule with max parallel/auto_revert false", func() {
|
||||
BeforeEach(func() {
|
||||
specFile = "input/rescheduling_maxp.hcl"
|
||||
})
|
||||
It("Should have running allocs and successful deployment", func() {
|
||||
Eventually(allocStatuses, 3*time.Second, time.Second).Should(
|
||||
ConsistOf([]string{"running", "running", "running"}))
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
Eventually(deploymentStatus(), 2*time.Second, time.Second).Should(
|
||||
ContainElement(structs.DeploymentStatusSuccessful))
|
||||
})
|
||||
|
||||
Context("Updating job to make allocs fail", func() {
|
||||
It("Should have no rescheduled allocs", func() {
|
||||
job.TaskGroups[0].Tasks[0].Config["args"] = []string{"-c", "lol"}
|
||||
_, _, err := jobs.Register(job, nil)
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
Eventually(allocStatusesRescheduled, 2*time.Second, time.Second).Should(BeEmpty())
|
||||
|
||||
// Should have 1 failed from max_parallel
|
||||
Eventually(allocStatuses, 3*time.Second, time.Second).Should(
|
||||
ConsistOf([]string{"complete", "failed", "running", "running"}))
|
||||
|
||||
// Verify new deployment and its status
|
||||
time.Sleep(2 * time.Second)
|
||||
Eventually(deploymentStatus(), 2*time.Second, time.Second).Should(
|
||||
ContainElement(structs.DeploymentStatusFailed))
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
@@ -369,10 +369,7 @@ func handleTaskGroup(snap *state.StateSnapshot, batch bool, tg *structs.TaskGrou
|
||||
|
||||
// If the service alloc is running and has its deployment status set, it
|
||||
// is considered healthy from a migration standpoint.
|
||||
if !batch &&
|
||||
!alloc.TerminalStatus() &&
|
||||
alloc.DeploymentStatus != nil &&
|
||||
alloc.DeploymentStatus.Healthy != nil {
|
||||
if !batch && !alloc.TerminalStatus() && alloc.DeploymentStatus.HasHealth() {
|
||||
healthy++
|
||||
}
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ func allocPromoter(t *testing.T, ctx context.Context,
|
||||
continue
|
||||
}
|
||||
|
||||
if alloc.DeploymentStatus != nil && alloc.DeploymentStatus.Healthy != nil {
|
||||
if alloc.DeploymentStatus.HasHealth() {
|
||||
continue
|
||||
}
|
||||
newAlloc := alloc.Copy()
|
||||
|
||||
@@ -269,6 +269,9 @@ func (n *nomadFSM) applyUpsertNode(buf []byte, index uint64) interface{} {
|
||||
panic(fmt.Errorf("failed to decode request: %v", err))
|
||||
}
|
||||
|
||||
// Handle upgrade paths
|
||||
req.Node.Canonicalize()
|
||||
|
||||
if err := n.state.UpsertNode(index, req.Node); err != nil {
|
||||
n.logger.Printf("[ERR] nomad.fsm: UpsertNode failed: %v", err)
|
||||
return err
|
||||
@@ -1048,6 +1051,10 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {
|
||||
if err := dec.Decode(node); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Handle upgrade paths
|
||||
node.Canonicalize()
|
||||
|
||||
if err := restore.NodeRestore(node); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -176,6 +176,35 @@ func TestFSM_UpsertNode(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestFSM_UpsertNode_Canonicalize(t *testing.T) {
|
||||
t.Parallel()
|
||||
require := require.New(t)
|
||||
|
||||
fsm := testFSM(t)
|
||||
fsm.blockedEvals.SetEnabled(true)
|
||||
|
||||
// Setup a node without eligiblity
|
||||
node := mock.Node()
|
||||
node.SchedulingEligibility = ""
|
||||
|
||||
req := structs.NodeRegisterRequest{
|
||||
Node: node,
|
||||
}
|
||||
buf, err := structs.Encode(structs.NodeRegisterRequestType, req)
|
||||
require.Nil(err)
|
||||
|
||||
resp := fsm.Apply(makeLog(buf))
|
||||
require.Nil(resp)
|
||||
|
||||
// Verify we are registered
|
||||
ws := memdb.NewWatchSet()
|
||||
n, err := fsm.State().NodeByID(ws, req.Node.ID)
|
||||
require.Nil(err)
|
||||
require.NotNil(n)
|
||||
require.EqualValues(1, n.CreateIndex)
|
||||
require.Equal(structs.NodeSchedulingEligible, n.SchedulingEligibility)
|
||||
}
|
||||
|
||||
func TestFSM_DeregisterNode(t *testing.T) {
|
||||
t.Parallel()
|
||||
fsm := testFSM(t)
|
||||
@@ -2196,15 +2225,18 @@ func TestFSM_SnapshotRestore_Nodes(t *testing.T) {
|
||||
state := fsm.State()
|
||||
node1 := mock.Node()
|
||||
state.UpsertNode(1000, node1)
|
||||
|
||||
// Upgrade this node
|
||||
node2 := mock.Node()
|
||||
node2.SchedulingEligibility = ""
|
||||
state.UpsertNode(1001, node2)
|
||||
|
||||
// Verify the contents
|
||||
fsm2 := testSnapshotRestore(t, fsm)
|
||||
state2 := fsm2.State()
|
||||
ws := memdb.NewWatchSet()
|
||||
out1, _ := state2.NodeByID(ws, node1.ID)
|
||||
out2, _ := state2.NodeByID(ws, node2.ID)
|
||||
out1, _ := state2.NodeByID(nil, node1.ID)
|
||||
out2, _ := state2.NodeByID(nil, node2.ID)
|
||||
node2.SchedulingEligibility = structs.NodeSchedulingEligible
|
||||
if !reflect.DeepEqual(node1, out1) {
|
||||
t.Fatalf("bad: \n%#v\n%#v", out1, node1)
|
||||
}
|
||||
|
||||
@@ -3246,8 +3246,8 @@ func (s *StateStore) updateDeploymentWithAlloc(index uint64, alloc, existing *st
|
||||
|
||||
// If there was no existing allocation, this is a placement and we increment
|
||||
// the placement
|
||||
existingHealthSet := existing != nil && existing.DeploymentStatus != nil && existing.DeploymentStatus.Healthy != nil
|
||||
allocHealthSet := alloc.DeploymentStatus != nil && alloc.DeploymentStatus.Healthy != nil
|
||||
existingHealthSet := existing != nil && existing.DeploymentStatus.HasHealth()
|
||||
allocHealthSet := alloc.DeploymentStatus.HasHealth()
|
||||
if existing == nil || existing.DeploymentID != alloc.DeploymentID {
|
||||
placed++
|
||||
} else if !existingHealthSet && allocHealthSet {
|
||||
|
||||
@@ -1426,6 +1426,23 @@ func (n *Node) Ready() bool {
|
||||
return n.Status == NodeStatusReady && !n.Drain && n.SchedulingEligibility == NodeSchedulingEligible
|
||||
}
|
||||
|
||||
func (n *Node) Canonicalize() {
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// COMPAT Remove in 0.10
|
||||
// In v0.8.0 we introduced scheduling eligibility, so we need to set it for
|
||||
// upgrading nodes
|
||||
if n.SchedulingEligibility == "" {
|
||||
if n.Drain {
|
||||
n.SchedulingEligibility = NodeSchedulingIneligible
|
||||
} else {
|
||||
n.SchedulingEligibility = NodeSchedulingEligible
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) Copy() *Node {
|
||||
if n == nil {
|
||||
return nil
|
||||
@@ -5730,16 +5747,30 @@ func (a *Allocation) RescheduleEligible(reschedulePolicy *ReschedulePolicy, fail
|
||||
}
|
||||
|
||||
// LastEventTime is the time of the last task event in the allocation.
|
||||
// It is used to determine allocation failure time.
|
||||
// It is used to determine allocation failure time. If the FinishedAt field
|
||||
// is not set, the alloc's modify time is used
|
||||
func (a *Allocation) LastEventTime() time.Time {
|
||||
var lastEventTime time.Time
|
||||
if a.TaskStates != nil {
|
||||
for _, e := range a.TaskStates {
|
||||
if lastEventTime.IsZero() || e.FinishedAt.After(lastEventTime) {
|
||||
lastEventTime = e.FinishedAt
|
||||
for _, s := range a.TaskStates {
|
||||
if lastEventTime.IsZero() || s.FinishedAt.After(lastEventTime) {
|
||||
lastEventTime = s.FinishedAt
|
||||
}
|
||||
}
|
||||
}
|
||||
// If no tasks have FinsihedAt set, examine task events
|
||||
if lastEventTime.IsZero() {
|
||||
for _, s := range a.TaskStates {
|
||||
for _, e := range s.Events {
|
||||
if lastEventTime.IsZero() || e.Time > lastEventTime.UnixNano() {
|
||||
lastEventTime = time.Unix(0, e.Time).UTC()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if lastEventTime.IsZero() {
|
||||
return time.Unix(0, a.ModifyTime).UTC()
|
||||
}
|
||||
return lastEventTime
|
||||
}
|
||||
|
||||
@@ -6079,6 +6110,11 @@ type AllocDeploymentStatus struct {
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
||||
// HasHealth returns true if the allocation has its health set.
|
||||
func (a *AllocDeploymentStatus) HasHealth() bool {
|
||||
return a != nil && a.Healthy != nil
|
||||
}
|
||||
|
||||
// IsHealthy returns if the allocation is marked as healthy as part of a
|
||||
// deployment
|
||||
func (a *AllocDeploymentStatus) IsHealthy() bool {
|
||||
|
||||
@@ -2655,35 +2655,34 @@ func TestAllocation_LastEventTime(t *testing.T) {
|
||||
taskState map[string]*TaskState
|
||||
expectedLastEventTime time.Time
|
||||
}
|
||||
var timeZero time.Time
|
||||
|
||||
t1 := time.Now()
|
||||
t1 := time.Now().UTC()
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
desc: "nil task state",
|
||||
expectedLastEventTime: timeZero,
|
||||
expectedLastEventTime: t1,
|
||||
},
|
||||
{
|
||||
desc: "empty task state",
|
||||
taskState: make(map[string]*TaskState),
|
||||
expectedLastEventTime: timeZero,
|
||||
expectedLastEventTime: t1,
|
||||
},
|
||||
{
|
||||
desc: "Finished At not set",
|
||||
taskState: map[string]*TaskState{"foo": {State: "start",
|
||||
StartedAt: t1.Add(-2 * time.Hour)}},
|
||||
expectedLastEventTime: timeZero,
|
||||
expectedLastEventTime: t1,
|
||||
},
|
||||
{
|
||||
desc: "One finished event",
|
||||
desc: "One finished ",
|
||||
taskState: map[string]*TaskState{"foo": {State: "start",
|
||||
StartedAt: t1.Add(-2 * time.Hour),
|
||||
FinishedAt: t1.Add(-1 * time.Hour)}},
|
||||
expectedLastEventTime: t1.Add(-1 * time.Hour),
|
||||
},
|
||||
{
|
||||
desc: "Multiple events",
|
||||
desc: "Multiple task groups",
|
||||
taskState: map[string]*TaskState{"foo": {State: "start",
|
||||
StartedAt: t1.Add(-2 * time.Hour),
|
||||
FinishedAt: t1.Add(-1 * time.Hour)},
|
||||
@@ -2692,10 +2691,33 @@ func TestAllocation_LastEventTime(t *testing.T) {
|
||||
FinishedAt: t1.Add(-40 * time.Minute)}},
|
||||
expectedLastEventTime: t1.Add(-40 * time.Minute),
|
||||
},
|
||||
{
|
||||
desc: "No finishedAt set, one task event",
|
||||
taskState: map[string]*TaskState{"foo": {
|
||||
State: "run",
|
||||
StartedAt: t1.Add(-2 * time.Hour),
|
||||
Events: []*TaskEvent{
|
||||
{Type: "start", Time: t1.Add(-20 * time.Minute).UnixNano()},
|
||||
}},
|
||||
},
|
||||
expectedLastEventTime: t1.Add(-20 * time.Minute),
|
||||
},
|
||||
{
|
||||
desc: "No finishedAt set, many task events",
|
||||
taskState: map[string]*TaskState{"foo": {
|
||||
State: "run",
|
||||
StartedAt: t1.Add(-2 * time.Hour),
|
||||
Events: []*TaskEvent{
|
||||
{Type: "start", Time: t1.Add(-20 * time.Minute).UnixNano()},
|
||||
{Type: "status change", Time: t1.Add(-10 * time.Minute).UnixNano()},
|
||||
}},
|
||||
},
|
||||
expectedLastEventTime: t1.Add(-10 * time.Minute),
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
alloc := &Allocation{}
|
||||
alloc := &Allocation{CreateTime: t1.UnixNano(), ModifyTime: t1.UnixNano()}
|
||||
alloc.TaskStates = tc.taskState
|
||||
require.Equal(t, tc.expectedLastEventTime, alloc.LastEventTime())
|
||||
})
|
||||
@@ -2727,10 +2749,11 @@ func TestAllocation_NextDelay(t *testing.T) {
|
||||
reschedulePolicy: &ReschedulePolicy{
|
||||
DelayFunction: "constant",
|
||||
Delay: 5 * time.Second,
|
||||
Unlimited: true,
|
||||
},
|
||||
alloc: &Allocation{ClientStatus: AllocClientStatusFailed},
|
||||
expectedRescheduleTime: time.Time{},
|
||||
expectedRescheduleEligible: false,
|
||||
alloc: &Allocation{ClientStatus: AllocClientStatusFailed, ModifyTime: now.UnixNano()},
|
||||
expectedRescheduleTime: now.UTC().Add(5 * time.Second),
|
||||
expectedRescheduleEligible: true,
|
||||
},
|
||||
{
|
||||
desc: "linear delay, unlimited restarts, no reschedule tracker",
|
||||
@@ -3656,3 +3679,19 @@ func TestBatchFuture(t *testing.T) {
|
||||
t.Fatalf("bad: %d", bf.Index())
|
||||
}
|
||||
}
|
||||
|
||||
func TestNode_Canonicalize(t *testing.T) {
|
||||
t.Parallel()
|
||||
require := require.New(t)
|
||||
|
||||
// Make sure the eligiblity is set properly
|
||||
node := &Node{}
|
||||
node.Canonicalize()
|
||||
require.Equal(NodeSchedulingEligible, node.SchedulingEligibility)
|
||||
|
||||
node = &Node{
|
||||
Drain: true,
|
||||
}
|
||||
node.Canonicalize()
|
||||
require.Equal(NodeSchedulingIneligible, node.SchedulingEligibility)
|
||||
}
|
||||
|
||||
@@ -10,7 +10,27 @@ export default ApplicationAdapter.extend({
|
||||
store: service(),
|
||||
|
||||
xhrs: computed(function() {
|
||||
return {};
|
||||
return {
|
||||
list: {},
|
||||
track(key, xhr) {
|
||||
if (this.list[key]) {
|
||||
this.list[key].push(xhr);
|
||||
} else {
|
||||
this.list[key] = [xhr];
|
||||
}
|
||||
},
|
||||
cancel(key) {
|
||||
while (this.list[key] && this.list[key].length) {
|
||||
this.remove(key, this.list[key][0]);
|
||||
}
|
||||
},
|
||||
remove(key, xhr) {
|
||||
if (this.list[key]) {
|
||||
xhr.abort();
|
||||
this.list[key].removeObject(xhr);
|
||||
}
|
||||
},
|
||||
};
|
||||
}),
|
||||
|
||||
ajaxOptions() {
|
||||
@@ -22,9 +42,9 @@ export default ApplicationAdapter.extend({
|
||||
if (previousBeforeSend) {
|
||||
previousBeforeSend(...arguments);
|
||||
}
|
||||
this.get('xhrs')[key] = jqXHR;
|
||||
this.get('xhrs').track(key, jqXHR);
|
||||
jqXHR.always(() => {
|
||||
delete this.get('xhrs')[key];
|
||||
this.get('xhrs').remove(key, jqXHR);
|
||||
});
|
||||
};
|
||||
|
||||
@@ -129,10 +149,7 @@ export default ApplicationAdapter.extend({
|
||||
return;
|
||||
}
|
||||
const url = this.urlForFindRecord(id, modelName);
|
||||
const xhr = this.get('xhrs')[url];
|
||||
if (xhr) {
|
||||
xhr.abort();
|
||||
}
|
||||
this.get('xhrs').cancel(url);
|
||||
},
|
||||
|
||||
cancelFindAll(modelName) {
|
||||
@@ -144,10 +161,7 @@ export default ApplicationAdapter.extend({
|
||||
if (params) {
|
||||
url = `${url}?${params}`;
|
||||
}
|
||||
const xhr = this.get('xhrs')[url];
|
||||
if (xhr) {
|
||||
xhr.abort();
|
||||
}
|
||||
this.get('xhrs').cancel(url);
|
||||
},
|
||||
|
||||
cancelReloadRelationship(model, relationshipName) {
|
||||
@@ -161,10 +175,7 @@ export default ApplicationAdapter.extend({
|
||||
);
|
||||
} else {
|
||||
const url = model[relationship.kind](relationship.key).link();
|
||||
const xhr = this.get('xhrs')[url];
|
||||
if (xhr) {
|
||||
xhr.abort();
|
||||
}
|
||||
this.get('xhrs').cancel(url);
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
3
ui/app/components/job-page/system.js
Normal file
3
ui/app/components/job-page/system.js
Normal file
@@ -0,0 +1,3 @@
|
||||
import AbstractJobPage from './abstract';
|
||||
|
||||
export default AbstractJobPage.extend();
|
||||
@@ -257,6 +257,41 @@ test('relationship reloads can be canceled', function(assert) {
|
||||
});
|
||||
});
|
||||
|
||||
test('requests can be canceled even if multiple requests for the same URL were made', function(assert) {
|
||||
const { pretender } = this.server;
|
||||
const jobId = JSON.stringify(['job-1', 'default']);
|
||||
|
||||
pretender.get('/v1/job/:id', () => [200, {}, '{}'], true);
|
||||
|
||||
this.subject().findRecord(null, { modelName: 'job' }, jobId, {
|
||||
reload: true,
|
||||
adapterOptions: { watch: true },
|
||||
});
|
||||
|
||||
this.subject().findRecord(null, { modelName: 'job' }, jobId, {
|
||||
reload: true,
|
||||
adapterOptions: { watch: true },
|
||||
});
|
||||
|
||||
const { request: xhr } = pretender.requestReferences[0];
|
||||
assert.equal(xhr.status, 0, 'Request is still pending');
|
||||
assert.equal(pretender.requestReferences.length, 2, 'Two findRecord requests were made');
|
||||
assert.equal(
|
||||
pretender.requestReferences.mapBy('url').uniq().length,
|
||||
1,
|
||||
'The two requests have the same URL'
|
||||
);
|
||||
|
||||
// Schedule the cancelation before waiting
|
||||
run.next(() => {
|
||||
this.subject().cancelFindRecord('job', jobId);
|
||||
});
|
||||
|
||||
return wait().then(() => {
|
||||
assert.ok(xhr.aborted, 'Request was aborted');
|
||||
});
|
||||
});
|
||||
|
||||
function makeMockModel(id, options) {
|
||||
return assign(
|
||||
{
|
||||
|
||||
@@ -153,6 +153,26 @@ $ curl \
|
||||
"NodeClass": "",
|
||||
"ComputedClass": "v1:10952212473894849978",
|
||||
"Drain": false,
|
||||
"Events": [
|
||||
{
|
||||
"CreateIndex": 0,
|
||||
"Details": null,
|
||||
"Message": "Node Registered",
|
||||
"Subsystem": "Cluster",
|
||||
"Timestamp": "2018-03-29T16:26:48Z"
|
||||
},
|
||||
{
|
||||
"CreateIndex": 11,
|
||||
"Details":
|
||||
{
|
||||
"driver": "docker"
|
||||
},
|
||||
"Message": "Driver docker is not detected",
|
||||
"Subsystem": "Driver",
|
||||
"Timestamp": "2018-03-29T16:27:48.556094143Z"
|
||||
}
|
||||
],
|
||||
|
||||
"Status": "ready",
|
||||
"StatusDescription": "",
|
||||
"StatusUpdatedAt": 1495748907,
|
||||
@@ -660,3 +680,30 @@ $ curl \
|
||||
"KnownLeader": false
|
||||
}
|
||||
```
|
||||
|
||||
#### Field Reference
|
||||
|
||||
- Events - A list of the last 10 node events for this node. A node event is a
|
||||
high level concept of noteworthy events for a node.
|
||||
|
||||
Each node event has the following fields:
|
||||
|
||||
- `Message` - The specific message for the event, detailing what occurred.
|
||||
|
||||
- `Subsystem` - The subsystem where the node event took place. Subsysystems
|
||||
include:
|
||||
|
||||
- `Drain` - The Nomad server draining subsystem.
|
||||
|
||||
- `Driver` - The Nomad client driver subsystem.
|
||||
|
||||
- `Heartbeat` - Either Nomad client or server heartbeating subsystem.
|
||||
|
||||
- `Cluster` - Nomad server cluster management subsystem.
|
||||
|
||||
- `Details` - Any further details about the event, formatted as a key/value
|
||||
pair.
|
||||
|
||||
- `Timestamp` - Each node event has an ISO 8601 timestamp.
|
||||
|
||||
- `CreateIndex` - The Raft index at which the event was committed.
|
||||
|
||||
@@ -99,6 +99,20 @@ Drain = false
|
||||
Status = ready
|
||||
Uptime = 17h42m50s
|
||||
|
||||
Drivers
|
||||
Driver Detected Healthy
|
||||
docker false false
|
||||
exec true true
|
||||
java true true
|
||||
qemu true true
|
||||
raw_exec true true
|
||||
rkt true true
|
||||
|
||||
Node Events
|
||||
Time Subsystem Message
|
||||
2018-03-29T17:24:42Z Driver: docker Driver docker is not detected
|
||||
2018-03-29T17:23:42Z Cluster Node Registered
|
||||
|
||||
Allocated Resources
|
||||
CPU Memory Disk IOPS
|
||||
500/2600 MHz 256 MiB/2.0 GiB 300 MiB/32 GiB 0/0
|
||||
@@ -128,6 +142,20 @@ Drain = false
|
||||
Status = ready
|
||||
Uptime = 17h7m41s
|
||||
|
||||
Drivers
|
||||
Driver Detected Healthy
|
||||
docker false false
|
||||
exec true true
|
||||
java true true
|
||||
qemu true true
|
||||
raw_exec true true
|
||||
rkt true true
|
||||
|
||||
Node Events
|
||||
Time Subsystem Message
|
||||
2018-03-29T17:24:42Z Driver: docker Driver docker is not detected
|
||||
2018-03-29T17:23:42Z Cluster Node Registered
|
||||
|
||||
Allocated Resources
|
||||
CPU Memory Disk IOPS
|
||||
2500/2600 MHz 1.3 GiB/2.0 GiB 1.5 GiB/32 GiB 0/0
|
||||
@@ -179,6 +207,20 @@ Drain = false
|
||||
Status = ready
|
||||
Uptime = 17h7m41s
|
||||
|
||||
Drivers
|
||||
Driver Detected Healthy
|
||||
docker false false
|
||||
exec true true
|
||||
java true true
|
||||
qemu true true
|
||||
raw_exec true true
|
||||
rkt true true
|
||||
|
||||
Node Events
|
||||
Time Subsystem Message
|
||||
2018-03-29T17:24:42Z Driver: docker Driver docker is not detected
|
||||
2018-03-29T17:23:42Z Cluster Node Registered
|
||||
|
||||
Allocated Resources
|
||||
CPU Memory Disk IOPS
|
||||
2500/2600 MHz 1.3 GiB/2.0 GiB 1.5 GiB/32 GiB 0/0
|
||||
@@ -246,6 +288,20 @@ Drain = false
|
||||
Status = ready
|
||||
Uptime = 17h7m41s
|
||||
|
||||
Drivers
|
||||
Driver Detected Healthy Message Time
|
||||
docker false false Driver docker is not detected 2018-03-29T17:24:42Z
|
||||
exec true true <none> 2018-03-29T17:23:42Z
|
||||
java true true <none> 2018-03-29T17:23:41Z
|
||||
qemu true true <none> 2018-03-29T17:23:41Z
|
||||
raw_exec true true <none> 2018-03-29T17:23:42Z
|
||||
rkt true true <none> 2018-03-29T17:23:42Z
|
||||
|
||||
Node Events
|
||||
Time Subsystem Message Details
|
||||
2018-03-29T17:24:42Z Driver: docker Driver docker is not detected driver: docker,
|
||||
2018-03-29T17:23:42Z Cluster Node Registered <none>
|
||||
|
||||
Allocated Resources
|
||||
CPU Memory Disk IOPS
|
||||
2500/2600 MHz 1.3 GiB/2.0 GiB 1.5 GiB/32 GiB 0/0
|
||||
|
||||
@@ -97,12 +97,16 @@ does not automatically enable service discovery.
|
||||
interpolated and revalidated. This can cause certain service names to pass validation at submit time but fail
|
||||
at runtime.
|
||||
|
||||
- `port` `(string: <optional>)` - Specifies the label of the port on which this
|
||||
service is running. Note this is the _label_ of the port and not the port
|
||||
number unless `address_mode = driver`. The port label must match one defined
|
||||
in the [`network`][network] stanza unless you're also using
|
||||
`address_mode="driver"`. Numeric ports may be used when in driver addressing
|
||||
mode.
|
||||
- `port` `(string: <optional>)` - Specifies the port to advertise for this
|
||||
service. The value of `port` depends on which [`address_mode`](#address_mode)
|
||||
is being used:
|
||||
|
||||
- `driver` - Advertise the port determined by the driver (eg Docker or rkt).
|
||||
The `port` may be a numeric port or a port label specified in the driver's
|
||||
`port_map`.
|
||||
|
||||
- `host` - Advertise the host port for this service. `port` must match a port
|
||||
_label_ specified in the [`network`][network] stanza.
|
||||
|
||||
- `tags` `(array<string>: [])` - Specifies the list of tags to associate with
|
||||
this service. If this is not supplied, no tags will be assigned to the service
|
||||
|
||||
Reference in New Issue
Block a user