mirror of
https://github.com/kemko/nomad.git
synced 2026-01-01 16:05:42 +03:00
* [ui] Service job status panel (#16134) * it begins * Hacky demo enabled * Still very hacky but seems deece * Floor of at least 3 must be shown * Width from on-high * Other statuses considered * More sensible allocTypes listing * Beginnings of a legend * Total number of allocs running now maps over job.groups * Lintfix * base the number of slots to hold open on actual tallies, which should never exceed totalAllocs * Versions get yer versions here * Versions lookin like versions * Mirage fixup * Adds Remaining as an alloc chart status and adds historical status option * Get tests passing again by making job status static for a sec * Historical status panel click actions moved into their own component class * job detail tests plz chill * Testing if percy is fickle * Hyper-specfic on summary distribution bar identifier * Perhaps the 2nd allocSummary item no longer exists with the more accurate afterCreate data * UI Test eschewing the page pattern * Bones of a new acceptance test * Track width changes explicitly with window-resize * testlintfix * Alloc counting tests * Alloc grouping test * Alloc grouping with complex resizing * Refined the list of showable statuses * PR feedback addressed * renamed allocation-row to allocation-status-row * [ui, job status] Make panel status mode a queryParam (#16345) * queryParam changing * Test for QP in panel * Adding @tracked to legacy controller * Move the job of switching to Historical out to larger context * integration test mock passed func * [ui] Service job deployment status panel (#16383) * A very fast and loose deployment panel * Removing Unknown status from the panel * Set up oldAllocs list in constructor, rather than as a getter/tracked var * Small amount of template cleanup * Refactored latest-deployment new logic back into panel.js * Revert now-unused latest-deployment component * margin bottom when ungrouped also * Basic integration tests for job deployment status panel * Updates complete alloc colour to green for new visualizations only (#16618) * Updates complete alloc colour to green for new visualizations only * Pale green instead of dark green for viz in general * [ui] Job Deployment Status: History and Update Props (#16518) * Deployment history wooooooo * Styled deployment history * Update Params * lintfix * Types and groups for updateParams * Live-updating history * Harden with types, error states, and pending states * Refactor updateParams to use trigger component * [ui] Deployment History search (#16608) * Functioning searchbox * Some nice animations for history items * History search test * Fixing up some old mirage conventions * some a11y rule override to account for scss keyframes * Split panel into deploying and steady components * HandleError passed from job index * gridified panel elements * TotalAllocs added to deploying.js * Width perc to px * [ui] Splitting deployment allocs by status, health, and canary status (#16766) * Initial attempt with lots of scratchpad work * Style mods per UI discussion * Fix canary overflow bug * Dont show canary or health for steady/prev-alloc blocks * Steady state * Thanks Julie * Fixes steady-state versions * Legen, wait for it... * Test fixes now that we have a minimum block size * PR prep * Shimmer effect on pending and unplaced allocs (#16801) * Shimmer effect on pending and unplaced * Dont show animation in the legend * [ui, deployments] Linking allocblocks and legends to allocation / allocations index routes (#16821) * Conditional link-to component and basic linking to allocations and allocation routes * Job versions filter added to allocations index page * Steady state legends link * Legend links * Badge count links for versions * Fix: faded class on steady-state legend items * version link now wont show completed ones * Fix a11y violations with link labels * Combining some template conditional logic * [ui, deployments] Conversions on long nanosecond update params (#16882) * Conversions on long nanosecond nums * Early return in updateParamGroups comp prop * [ui, deployments] Mirage Actively Deploying Job and Deployment Integration Tests (#16888) * Start of deployment alloc test scaffolding * Bit of test cleanup and canary for ungrouped allocs * Flakey but more robust integrations for deployment panel * De-flake acceptance tests and add an actively deploying job to mirage * Jitter-less alloc status distribution removes my bad math * bugfix caused by summary.desiredTotal non-null * More interesting mirage active deployment alloc breakdown * Further tests for previous-allocs row * Previous alloc legend tests * Percy snapshots added to integration test * changelog
488 lines
14 KiB
JavaScript
488 lines
14 KiB
JavaScript
// @ts-check
|
|
import { module, test } from 'qunit';
|
|
import { setupApplicationTest } from 'ember-qunit';
|
|
|
|
import {
|
|
click,
|
|
visit,
|
|
find,
|
|
findAll,
|
|
fillIn,
|
|
triggerEvent,
|
|
} from '@ember/test-helpers';
|
|
|
|
import { setupMirage } from 'ember-cli-mirage/test-support';
|
|
import faker from 'nomad-ui/mirage/faker';
|
|
import percySnapshot from '@percy/ember';
|
|
import a11yAudit from 'nomad-ui/tests/helpers/a11y-audit';
|
|
// TODO: Mirage is not type-friendly / assigns "server" as a global. Try to work around this shortcoming.
|
|
|
|
module('Acceptance | job status panel', function (hooks) {
|
|
setupApplicationTest(hooks);
|
|
setupMirage(hooks);
|
|
|
|
hooks.beforeEach(async function () {
|
|
server.create('node');
|
|
});
|
|
|
|
test('Status panel lets you switch between Current and Historical', async function (assert) {
|
|
assert.expect(5);
|
|
let job = server.create('job', {
|
|
status: 'running',
|
|
datacenters: ['*'],
|
|
type: 'service',
|
|
createAllocations: true,
|
|
});
|
|
|
|
await visit(`/jobs/${job.id}`);
|
|
assert.dom('.job-status-panel').exists();
|
|
await a11yAudit(assert);
|
|
await percySnapshot(assert);
|
|
|
|
assert
|
|
.dom('[data-test-status-mode="current"]')
|
|
.exists('Current mode by default');
|
|
|
|
await click('[data-test-status-mode-current]');
|
|
|
|
assert
|
|
.dom('[data-test-status-mode="current"]')
|
|
.exists('Clicking active mode makes no change');
|
|
|
|
await click('[data-test-status-mode-historical]');
|
|
|
|
assert
|
|
.dom('[data-test-status-mode="historical"]')
|
|
.exists('Lets you switch to historical mode');
|
|
});
|
|
|
|
test('Status panel observes query parameters for current/historical', async function (assert) {
|
|
assert.expect(2);
|
|
let job = server.create('job', {
|
|
status: 'running',
|
|
datacenters: ['*'],
|
|
type: 'service',
|
|
createAllocations: true,
|
|
});
|
|
|
|
await visit(`/jobs/${job.id}?statusMode=historical`);
|
|
assert.dom('.job-status-panel').exists();
|
|
|
|
assert
|
|
.dom('[data-test-status-mode="historical"]')
|
|
.exists('Historical mode when rendered with queryParams');
|
|
});
|
|
|
|
test('Status Panel shows accurate number and types of ungrouped allocation blocks', async function (assert) {
|
|
assert.expect(7);
|
|
|
|
faker.seed(1);
|
|
|
|
let groupTaskCount = 10;
|
|
|
|
let job = server.create('job', {
|
|
status: 'running',
|
|
datacenters: ['*'],
|
|
type: 'service',
|
|
resourceSpec: ['M: 256, C: 500'], // a single group
|
|
createAllocations: true,
|
|
allocStatusDistribution: {
|
|
running: 1,
|
|
failed: 0,
|
|
unknown: 0,
|
|
lost: 0,
|
|
},
|
|
groupTaskCount,
|
|
shallow: true,
|
|
});
|
|
|
|
await visit(`/jobs/${job.id}`);
|
|
assert.dom('.job-status-panel').exists();
|
|
|
|
let jobAllocCount = server.db.allocations.where({
|
|
jobId: job.id,
|
|
}).length;
|
|
|
|
assert.equal(
|
|
jobAllocCount,
|
|
groupTaskCount * job.taskGroups.length,
|
|
'Correect number of allocs generated (metatest)'
|
|
);
|
|
assert
|
|
.dom('.ungrouped-allocs .represented-allocation.running')
|
|
.exists(
|
|
{ count: jobAllocCount },
|
|
`All ${jobAllocCount} allocations are represented in the status panel`
|
|
);
|
|
|
|
groupTaskCount = 20;
|
|
|
|
job = server.create('job', {
|
|
status: 'running',
|
|
datacenters: ['*'],
|
|
type: 'service',
|
|
resourceSpec: ['M: 256, C: 500'], // a single group
|
|
createAllocations: true,
|
|
allocStatusDistribution: {
|
|
running: 0.5,
|
|
failed: 0.5,
|
|
unknown: 0,
|
|
lost: 0,
|
|
},
|
|
groupTaskCount,
|
|
noActiveDeployment: true,
|
|
shallow: true,
|
|
});
|
|
|
|
await visit(`/jobs/${job.id}`);
|
|
assert.dom('.job-status-panel').exists();
|
|
|
|
let runningAllocCount = server.db.allocations.where({
|
|
jobId: job.id,
|
|
clientStatus: 'running',
|
|
}).length;
|
|
|
|
let failedAllocCount = server.db.allocations.where({
|
|
jobId: job.id,
|
|
clientStatus: 'failed',
|
|
}).length;
|
|
|
|
assert.equal(
|
|
runningAllocCount + failedAllocCount,
|
|
groupTaskCount * job.taskGroups.length,
|
|
'Correect number of allocs generated (metatest)'
|
|
);
|
|
assert
|
|
.dom('.ungrouped-allocs .represented-allocation.running')
|
|
.exists(
|
|
{ count: runningAllocCount },
|
|
`All ${runningAllocCount} running allocations are represented in the status panel`
|
|
);
|
|
assert
|
|
.dom('.ungrouped-allocs .represented-allocation.failed')
|
|
.exists(
|
|
{ count: failedAllocCount },
|
|
`All ${failedAllocCount} failed allocations are represented in the status panel`
|
|
);
|
|
await percySnapshot(assert);
|
|
});
|
|
|
|
test('Status Panel groups allocations when they get past a threshold', async function (assert) {
|
|
assert.expect(6);
|
|
|
|
faker.seed(1);
|
|
|
|
let groupTaskCount = 20;
|
|
|
|
let job = server.create('job', {
|
|
status: 'running',
|
|
datacenters: ['*'],
|
|
type: 'service',
|
|
resourceSpec: ['M: 256, C: 500'], // a single group
|
|
createAllocations: true,
|
|
allocStatusDistribution: {
|
|
running: 1,
|
|
failed: 0,
|
|
unknown: 0,
|
|
lost: 0,
|
|
},
|
|
groupTaskCount,
|
|
shallow: true,
|
|
});
|
|
|
|
await visit(`/jobs/${job.id}`);
|
|
assert.dom('.job-status-panel').exists();
|
|
|
|
let jobAllocCount = server.db.allocations.where({
|
|
jobId: job.id,
|
|
}).length;
|
|
|
|
assert
|
|
.dom('.ungrouped-allocs .represented-allocation.running')
|
|
.exists(
|
|
{ count: jobAllocCount },
|
|
`All ${jobAllocCount} allocations are represented in the status panel, ungrouped`
|
|
);
|
|
|
|
groupTaskCount = 40;
|
|
|
|
job = server.create('job', {
|
|
status: 'running',
|
|
datacenters: ['*'],
|
|
type: 'service',
|
|
resourceSpec: ['M: 256, C: 500'], // a single group
|
|
createAllocations: true,
|
|
allocStatusDistribution: {
|
|
running: 1,
|
|
failed: 0,
|
|
unknown: 0,
|
|
lost: 0,
|
|
},
|
|
groupTaskCount,
|
|
shallow: true,
|
|
});
|
|
|
|
await visit(`/jobs/${job.id}`);
|
|
assert.dom('.job-status-panel').exists();
|
|
|
|
jobAllocCount = server.db.allocations.where({
|
|
jobId: job.id,
|
|
}).length;
|
|
|
|
// At standard test resolution, 40 allocations will attempt to display 20 ungrouped, and 20 grouped.
|
|
let desiredUngroupedAllocCount = 20;
|
|
assert
|
|
.dom('.ungrouped-allocs .represented-allocation.running')
|
|
.exists(
|
|
{ count: desiredUngroupedAllocCount },
|
|
`${desiredUngroupedAllocCount} allocations are represented ungrouped`
|
|
);
|
|
|
|
assert
|
|
.dom('.represented-allocation.rest')
|
|
.exists('Allocations are numerous enough that a summary block exists');
|
|
assert
|
|
.dom('.represented-allocation.rest')
|
|
.hasText(
|
|
`+${groupTaskCount - desiredUngroupedAllocCount}`,
|
|
'Summary block has the correct number of grouped allocs'
|
|
);
|
|
|
|
await percySnapshot(assert);
|
|
});
|
|
|
|
test('Status Panel groups allocations when they get past a threshold, multiple statuses', async function (assert) {
|
|
let groupTaskCount = 50;
|
|
|
|
let job = server.create('job', {
|
|
status: 'running',
|
|
datacenters: ['*'],
|
|
type: 'service',
|
|
resourceSpec: ['M: 256, C: 500'], // a single group
|
|
createAllocations: true,
|
|
allocStatusDistribution: {
|
|
running: 0.5,
|
|
failed: 0.3,
|
|
pending: 0.1,
|
|
lost: 0.1,
|
|
},
|
|
groupTaskCount,
|
|
shallow: true,
|
|
});
|
|
|
|
await visit(`/jobs/${job.id}`);
|
|
assert.dom('.job-status-panel').exists();
|
|
|
|
// With 50 allocs split across 4 statuses distributed as above, we can expect 25 running, 16 failed, 6 pending, and 4 remaining.
|
|
// At standard test resolution, each status will be ungrouped/grouped as follows:
|
|
// 25 running: 9 ungrouped, 17 grouped
|
|
// 15 failed: 5 ungrouped, 10 grouped
|
|
// 5 pending: 0 ungrouped, 5 grouped
|
|
// 5 lost: 0 ungrouped, 5 grouped. Represented as "Unplaced"
|
|
|
|
assert
|
|
.dom('.ungrouped-allocs .represented-allocation.running')
|
|
.exists({ count: 9 }, '9 running allocations are represented ungrouped');
|
|
assert
|
|
.dom('.represented-allocation.rest.running')
|
|
.exists(
|
|
'Running allocations are numerous enough that a summary block exists'
|
|
);
|
|
assert
|
|
.dom('.represented-allocation.rest.running')
|
|
.hasText(
|
|
'+16',
|
|
'Summary block has the correct number of grouped running allocs'
|
|
);
|
|
|
|
assert
|
|
.dom('.ungrouped-allocs .represented-allocation.failed')
|
|
.exists({ count: 5 }, '5 failed allocations are represented ungrouped');
|
|
assert
|
|
.dom('.represented-allocation.rest.failed')
|
|
.exists(
|
|
'Failed allocations are numerous enough that a summary block exists'
|
|
);
|
|
assert
|
|
.dom('.represented-allocation.rest.failed')
|
|
.hasText(
|
|
'+10',
|
|
'Summary block has the correct number of grouped failed allocs'
|
|
);
|
|
|
|
assert
|
|
.dom('.ungrouped-allocs .represented-allocation.pending')
|
|
.exists({ count: 0 }, '0 pending allocations are represented ungrouped');
|
|
assert
|
|
.dom('.represented-allocation.rest.pending')
|
|
.exists(
|
|
'pending allocations are numerous enough that a summary block exists'
|
|
);
|
|
assert
|
|
.dom('.represented-allocation.rest.pending')
|
|
.hasText(
|
|
'5',
|
|
'Summary block has the correct number of grouped pending allocs'
|
|
);
|
|
|
|
assert
|
|
.dom('.ungrouped-allocs .represented-allocation.unplaced')
|
|
.exists({ count: 0 }, '0 unplaced allocations are represented ungrouped');
|
|
assert
|
|
.dom('.represented-allocation.rest.unplaced')
|
|
.exists(
|
|
'Unplaced allocations are numerous enough that a summary block exists'
|
|
);
|
|
assert
|
|
.dom('.represented-allocation.rest.unplaced')
|
|
.hasText(
|
|
'5',
|
|
'Summary block has the correct number of grouped unplaced allocs'
|
|
);
|
|
await percySnapshot(
|
|
'Status Panel groups allocations when they get past a threshold, multiple statuses (full width)'
|
|
);
|
|
|
|
// Simulate a window resize event; will recompute how many of each ought to be grouped.
|
|
|
|
// At 1100px, only running and failed allocations have some ungrouped allocs
|
|
find('.page-body').style.width = '1100px';
|
|
await triggerEvent(window, 'resize');
|
|
|
|
await percySnapshot(
|
|
'Status Panel groups allocations when they get past a threshold, multiple statuses (1100px)'
|
|
);
|
|
|
|
assert
|
|
.dom('.ungrouped-allocs .represented-allocation.running')
|
|
.exists({ count: 7 }, '7 running allocations are represented ungrouped');
|
|
assert
|
|
.dom('.represented-allocation.rest.running')
|
|
.exists(
|
|
'Running allocations are numerous enough that a summary block exists'
|
|
);
|
|
assert
|
|
.dom('.represented-allocation.rest.running')
|
|
.hasText(
|
|
'+18',
|
|
'Summary block has the correct number of grouped running allocs'
|
|
);
|
|
|
|
assert
|
|
.dom('.ungrouped-allocs .represented-allocation.failed')
|
|
.exists({ count: 4 }, '4 failed allocations are represented ungrouped');
|
|
assert
|
|
.dom('.represented-allocation.rest.failed')
|
|
.exists(
|
|
'Failed allocations are numerous enough that a summary block exists'
|
|
);
|
|
assert
|
|
.dom('.represented-allocation.rest.failed')
|
|
.hasText(
|
|
'+11',
|
|
'Summary block has the correct number of grouped failed allocs'
|
|
);
|
|
|
|
// At 500px, only running allocations have some ungrouped allocs. The rest are all fully grouped.
|
|
find('.page-body').style.width = '800px';
|
|
await triggerEvent(window, 'resize');
|
|
|
|
await percySnapshot(
|
|
'Status Panel groups allocations when they get past a threshold, multiple statuses (500px)'
|
|
);
|
|
|
|
assert
|
|
.dom('.ungrouped-allocs .represented-allocation.running')
|
|
.exists({ count: 4 }, '4 running allocations are represented ungrouped');
|
|
assert
|
|
.dom('.represented-allocation.rest.running')
|
|
.exists(
|
|
'Running allocations are numerous enough that a summary block exists'
|
|
);
|
|
assert
|
|
.dom('.represented-allocation.rest.running')
|
|
.hasText(
|
|
'+21',
|
|
'Summary block has the correct number of grouped running allocs'
|
|
);
|
|
|
|
assert
|
|
.dom('.ungrouped-allocs .represented-allocation.failed')
|
|
.doesNotExist('no failed allocations are represented ungrouped');
|
|
assert
|
|
.dom('.represented-allocation.rest.failed')
|
|
.exists(
|
|
'Failed allocations are numerous enough that a summary block exists'
|
|
);
|
|
assert
|
|
.dom('.represented-allocation.rest.failed')
|
|
.hasText(
|
|
'15',
|
|
'Summary block has the correct number of grouped failed allocs'
|
|
);
|
|
});
|
|
|
|
module('deployment history', function () {
|
|
test('Deployment history can be searched', async function (assert) {
|
|
faker.seed(1);
|
|
|
|
let groupTaskCount = 10;
|
|
|
|
let job = server.create('job', {
|
|
status: 'running',
|
|
datacenters: ['*'],
|
|
type: 'service',
|
|
resourceSpec: ['M: 256, C: 500'], // a single group
|
|
createAllocations: true,
|
|
allocStatusDistribution: {
|
|
running: 1,
|
|
failed: 0,
|
|
unknown: 0,
|
|
lost: 0,
|
|
},
|
|
groupTaskCount,
|
|
shallow: true,
|
|
activeDeployment: true,
|
|
version: 0,
|
|
});
|
|
|
|
let state = server.create('task-state');
|
|
state.events = server.schema.taskEvents.where({ taskStateId: state.id });
|
|
|
|
server.schema.allocations.where({ jobId: job.id }).update({
|
|
taskStateIds: [state.id],
|
|
jobVersion: 0,
|
|
});
|
|
|
|
await visit(`/jobs/${job.id}`);
|
|
assert.dom('.job-status-panel').exists();
|
|
|
|
const serverEvents = server.schema.taskEvents.where({
|
|
taskStateId: state.id,
|
|
});
|
|
const shownEvents = findAll('.timeline-object');
|
|
const jobAllocations = server.db.allocations.where({ jobId: job.id });
|
|
assert.equal(
|
|
shownEvents.length,
|
|
serverEvents.length * jobAllocations.length,
|
|
'All events are shown'
|
|
);
|
|
|
|
await fillIn(
|
|
'[data-test-history-search] input',
|
|
serverEvents.models[0].message
|
|
);
|
|
assert.equal(
|
|
findAll('.timeline-object').length,
|
|
jobAllocations.length,
|
|
'Only events matching the search are shown'
|
|
);
|
|
|
|
await fillIn('[data-test-history-search] input', 'foo bar baz');
|
|
assert
|
|
.dom('[data-test-history-search-no-match]')
|
|
.exists('No match message is shown');
|
|
});
|
|
});
|
|
});
|