Files
nomad/ui/app/serializers/job.js
Phil Renaud 16479af38d Jobs Index Page: Live Updates + Pagination (#20452)
* Hook and latch on the initial index

* Serialization and restart of controller and table

* de-log

* allocBlocks reimplemented at job model level

* totalAllocs doesnt mean on jobmodel what it did in steady.js

* Hamburgers to sausages

* Hacky way to bring new jobs back around and parent job handling in list view

* Getting closer to hook/latch

* Latch from update on hook from initialize, but fickle

* Note on multiple-watch problem

* Sensible monday morning comment removal

* use of abortController to handle transition and reset events

* Next token will now update when there's an on-page shift

* Very rough anti-jostle technique

* Demoable, now to move things out of route and into controller

* Into the controller, generally

* Smarter cancellations

* Reset abortController on index models run, and system/sysbatch jobs now have an improved groupCountSum computed property

* Prev Page reverse querying

* n+1th jobs existing will trigger nextToken/pagination display

* Start of a GET/POST statuses return

* Namespace fix

* Unblock tests

* Realizing to my small horror that this skipURLModification flag may be too heavy handed

* Lintfix

* Default liveupdates localStorage setting to true

* Pagination and index rethink

* Big uncoupling of watchable and url-append stuff

* Testfixes for region, search, and keyboard

* Job row class for test purposes

* Allocations in test now contain events

* Starting on the jobs list tests in earnest

* Forbidden state de-bubbling cleanup

* Job list page size fixes

* Facet/Search/Filter jobs list tests skipped

* Maybe it's the automatic mirage logging

* Unbreak task unit test

* Pre-sort sort

* styling for jobs list pagination and general PR cleanup

* moving from Job.ActiveDeploymentID to Job.LatestDeployment.ID

* modifyIndex-based pagination (#20350)

* modifyIndex-based pagination

* modifyIndex gets its own column and pagination compacted with icons

* A generic withPagination handler for mirage

* Some live-PR changes

* Pagination and button disabled tests

* Job update handling tests for jobs index

* assertion timeout in case of long setTimeouts

* assert.timeouts down to 500ms

* de-to-do

* Clarifying comment and test descriptions

* Bugfix: resizing your browser on the new jobs index page would make the viz grow forever (#20458)

* [ui] Searching and filtering options (#20459)

* Beginnings of a search box for filter expressions

* jobSearchBox integration test

* jobs list updateFilter initial test

* Basic jobs list filtering tests

* First attempt at side-by-side facets and search with a computed filter

* Weirdly close to an iterative approach but checked isnt tracked properly

* Big rework to make filter composition and decomposition work nicely with the url

* Namespace facet dropdown added

* NodePool facet dropdown added

* hdsFacet for future testing and basic namespace filtering test

* Namespace filter existence test

* Status filtering

* Node pool/dynamic facet test

* Test patchups

* Attempt at optimize test fix

* Allocation re-load on optimize page explainer

* The Big Un-Skip

* Post-PR-review cleanup

* todo-squashing

* [ui] Handle parent/child jobs with the paginated Jobs Index route (#20493)

* First pass at a non-watchQuery version

* Parameterized jobs get child fetching and jobs index status style for parent jobs

* Completed allocs vs Running allocs in a child-job context, and fix an issue where moving from parent to parent would not reset index

* Testfix and better handling empty-child-statuses-list

* Parent/child test case

* Dont show empty allocation-status bars for parent jobs with no children

* Splits Settings into 2 sections, sign-in/profile and user settings (#20535)

* Changelog
2024-05-06 17:09:37 -04:00

250 lines
6.9 KiB
JavaScript

/**
* Copyright (c) HashiCorp, Inc.
* SPDX-License-Identifier: BUSL-1.1
*/
import { assign } from '@ember/polyfills';
import ApplicationSerializer from './application';
import queryString from 'query-string';
import classic from 'ember-classic-decorator';
@classic
export default class JobSerializer extends ApplicationSerializer {
attrs = {
parameterized: 'ParameterizedJob',
};
separateNanos = ['SubmitTime'];
normalize(typeHash, hash) {
hash.NamespaceID = hash.Namespace;
// ID is a composite of both the job ID and the namespace the job is in
hash.PlainId = hash.ID;
hash.ID = JSON.stringify([hash.ID, hash.NamespaceID || 'default']);
// ParentID comes in as "" instead of null
if (!hash.ParentID) {
hash.ParentID = null;
} else {
hash.ParentID = JSON.stringify([
hash.ParentID,
hash.NamespaceID || 'default',
]);
}
// Job Summary is always at /:job-id/summary, but since it can also come from
// the job list, it's better for Ember Data to be linked by ID association.
hash.SummaryID = hash.ID;
// Periodic is a boolean on list and an object on single
if (hash.Periodic instanceof Object) {
hash.PeriodicDetails = hash.Periodic;
hash.Periodic = true;
}
// Parameterized behaves like Periodic
if (hash.ParameterizedJob instanceof Object) {
hash.ParameterizedDetails = hash.ParameterizedJob;
hash.ParameterizedJob = true;
}
// If the hash contains summary information, push it into the store
// as a job-summary model.
if (hash.JobSummary) {
this.store.pushPayload('job-summary', {
'job-summary': [hash.JobSummary],
});
}
return super.normalize(typeHash, hash);
}
normalizeQueryResponse(store, primaryModelClass, payload, id, requestType) {
// What jobs did we ask for?
if (payload._requestBody?.jobs) {
let requestedJobIDs = payload._requestBody.jobs;
// If they dont match the jobIDs we got back, we need to create an empty one
// for the ones we didnt get back.
payload.forEach((job) => {
job.AssumeGC = false;
});
let missingJobIDs = requestedJobIDs.filter(
(j) =>
!payload.find((p) => p.ID === j.id && p.Namespace === j.namespace)
);
missingJobIDs.forEach((job) => {
payload.push({
ID: job.id,
Namespace: job.namespace,
Allocs: [],
AssumeGC: true,
});
job.relationships = {
allocations: {
data: [],
},
};
});
// Note: we want our returned jobs to come back in the order we requested them,
// including jobs that were missing from the initial request.
payload.sort((a, b) => {
return (
requestedJobIDs.findIndex(
(j) => j.id === a.ID && j.namespace === a.Namespace
) -
requestedJobIDs.findIndex(
(j) => j.id === b.ID && j.namespace === b.Namespace
)
);
});
delete payload._requestBody;
}
const jobs = payload;
// Signal that it's a query response at individual normalization level for allocation placement
// Sort by ModifyIndex, reverse
jobs.sort((a, b) => b.ModifyIndex - a.ModifyIndex);
jobs.forEach((job) => {
if (job.Allocs) {
job.relationships = {
allocations: {
data: job.Allocs.map((alloc) => ({
id: alloc.id,
type: 'allocation',
})),
},
};
}
if (job.LatestDeployment) {
job.LatestDeploymentSummary = job.LatestDeployment;
delete job.LatestDeployment;
}
job._aggregate = true;
});
return super.normalizeQueryResponse(
store,
primaryModelClass,
jobs,
id,
requestType
);
}
extractRelationships(modelClass, hash) {
const namespace =
!hash.NamespaceID || hash.NamespaceID === 'default'
? undefined
: hash.NamespaceID;
const { modelName } = modelClass;
const apiNamespace = this.store
.adapterFor(modelClass.modelName)
.get('namespace');
const [jobURL] = this.store
.adapterFor(modelName)
.buildURL(modelName, hash.ID, hash, 'findRecord')
.split('?');
const variableLookup = hash.ParentID
? JSON.parse(hash.ParentID)[0]
: hash.PlainId;
if (hash._aggregate && hash.Allocs) {
// Manually push allocations to store
// These allocations have enough information to be useful on a jobs index page,
// but less than the /allocations endpoint for an individual job might give us.
// As such, pages like /optimize require a specific call to the endpoint
// of any jobs' allocations to get more detailed information.
hash.Allocs.forEach((alloc) => {
this.store.push({
data: {
id: alloc.ID,
type: 'allocation',
attributes: {
clientStatus: alloc.ClientStatus,
deploymentStatus: {
Healthy: alloc.DeploymentStatus.Healthy,
Canary: alloc.DeploymentStatus.Canary,
},
nodeID: alloc.NodeID,
},
},
});
});
delete hash._aggregate;
}
return assign(super.extractRelationships(...arguments), {
allocations: {
data: hash.Allocs?.map((alloc) => ({
id: alloc.ID,
type: 'allocation',
})),
links: {
related: buildURL(`${jobURL}/allocations`, { namespace }),
},
},
versions: {
links: {
related: buildURL(`${jobURL}/versions`, { namespace, diffs: true }),
},
},
deployments: {
links: {
related: buildURL(`${jobURL}/deployments`, { namespace }),
},
},
latestDeployment: {
links: {
related: buildURL(`${jobURL}/deployment`, { namespace }),
},
},
evaluations: {
links: {
related: buildURL(`${jobURL}/evaluations`, { namespace }),
},
},
services: {
links: {
related: buildURL(`${jobURL}/services`, { namespace }),
},
},
variables: {
links: {
related: buildURL(`/${apiNamespace}/vars`, {
prefix: `nomad/jobs/${variableLookup}`,
namespace,
}),
},
},
scaleState: {
links: {
related: buildURL(`${jobURL}/scale`, { namespace }),
},
},
recommendationSummaries: {
links: {
related: buildURL(`/${apiNamespace}/recommendations`, {
job: hash.PlainId,
namespace: hash.NamespaceID || 'default',
}),
},
},
});
}
}
function buildURL(path, queryParams) {
const qpString = queryString.stringify(queryParams);
if (qpString) {
return `${path}?${qpString}`;
}
return path;
}