mirror of
https://github.com/kemko/nomad.git
synced 2026-01-03 08:55:43 +03:00
* Hook and latch on the initial index * Serialization and restart of controller and table * de-log * allocBlocks reimplemented at job model level * totalAllocs doesnt mean on jobmodel what it did in steady.js * Hamburgers to sausages * Hacky way to bring new jobs back around and parent job handling in list view * Getting closer to hook/latch * Latch from update on hook from initialize, but fickle * Note on multiple-watch problem * Sensible monday morning comment removal * use of abortController to handle transition and reset events * Next token will now update when there's an on-page shift * Very rough anti-jostle technique * Demoable, now to move things out of route and into controller * Into the controller, generally * Smarter cancellations * Reset abortController on index models run, and system/sysbatch jobs now have an improved groupCountSum computed property * Prev Page reverse querying * n+1th jobs existing will trigger nextToken/pagination display * Start of a GET/POST statuses return * Namespace fix * Unblock tests * Realizing to my small horror that this skipURLModification flag may be too heavy handed * Lintfix * Default liveupdates localStorage setting to true * Pagination and index rethink * Big uncoupling of watchable and url-append stuff * Testfixes for region, search, and keyboard * Job row class for test purposes * Allocations in test now contain events * Starting on the jobs list tests in earnest * Forbidden state de-bubbling cleanup * Job list page size fixes * Facet/Search/Filter jobs list tests skipped * Maybe it's the automatic mirage logging * Unbreak task unit test * Pre-sort sort * styling for jobs list pagination and general PR cleanup * moving from Job.ActiveDeploymentID to Job.LatestDeployment.ID * modifyIndex-based pagination (#20350) * modifyIndex-based pagination * modifyIndex gets its own column and pagination compacted with icons * A generic withPagination handler for mirage * Some live-PR changes * Pagination and button disabled tests * Job update handling tests for jobs index * assertion timeout in case of long setTimeouts * assert.timeouts down to 500ms * de-to-do * Clarifying comment and test descriptions * Bugfix: resizing your browser on the new jobs index page would make the viz grow forever (#20458) * [ui] Searching and filtering options (#20459) * Beginnings of a search box for filter expressions * jobSearchBox integration test * jobs list updateFilter initial test * Basic jobs list filtering tests * First attempt at side-by-side facets and search with a computed filter * Weirdly close to an iterative approach but checked isnt tracked properly * Big rework to make filter composition and decomposition work nicely with the url * Namespace facet dropdown added * NodePool facet dropdown added * hdsFacet for future testing and basic namespace filtering test * Namespace filter existence test * Status filtering * Node pool/dynamic facet test * Test patchups * Attempt at optimize test fix * Allocation re-load on optimize page explainer * The Big Un-Skip * Post-PR-review cleanup * todo-squashing * [ui] Handle parent/child jobs with the paginated Jobs Index route (#20493) * First pass at a non-watchQuery version * Parameterized jobs get child fetching and jobs index status style for parent jobs * Completed allocs vs Running allocs in a child-job context, and fix an issue where moving from parent to parent would not reset index * Testfix and better handling empty-child-statuses-list * Parent/child test case * Dont show empty allocation-status bars for parent jobs with no children * Splits Settings into 2 sections, sign-in/profile and user settings (#20535) * Changelog
104 lines
3.3 KiB
JavaScript
104 lines
3.3 KiB
JavaScript
/**
|
|
* Copyright (c) HashiCorp, Inc.
|
|
* SPDX-License-Identifier: BUSL-1.1
|
|
*/
|
|
|
|
import { inject as service } from '@ember/service';
|
|
import { get } from '@ember/object';
|
|
import ApplicationSerializer from './application';
|
|
import classic from 'ember-classic-decorator';
|
|
|
|
const taskGroupFromJob = (job, taskGroupName) => {
|
|
const taskGroups = job && job.TaskGroups;
|
|
const taskGroup =
|
|
taskGroups && taskGroups.find((group) => group.Name === taskGroupName);
|
|
return taskGroup ? taskGroup : null;
|
|
};
|
|
|
|
const merge = (tasks) => {
|
|
const mergedResources = {
|
|
Cpu: { CpuShares: 0 },
|
|
Memory: { MemoryMB: 0 },
|
|
Disk: { DiskMB: 0 },
|
|
};
|
|
|
|
return tasks.reduce((resources, task) => {
|
|
resources.Cpu.CpuShares += (task.Cpu && task.Cpu.CpuShares) || 0;
|
|
resources.Memory.MemoryMB += (task.Memory && task.Memory.MemoryMB) || 0;
|
|
resources.Disk.DiskMB += (task.Disk && task.Disk.DiskMB) || 0;
|
|
return resources;
|
|
}, mergedResources);
|
|
};
|
|
|
|
@classic
|
|
export default class AllocationSerializer extends ApplicationSerializer {
|
|
@service system;
|
|
|
|
attrs = {
|
|
taskGroupName: 'TaskGroup',
|
|
states: 'TaskStates',
|
|
};
|
|
|
|
separateNanos = ['CreateTime', 'ModifyTime'];
|
|
|
|
normalize(typeHash, hash) {
|
|
// Transform the map-based TaskStates object into an array-based
|
|
// TaskState fragment list
|
|
const states = hash.TaskStates || {};
|
|
hash.TaskStates = Object.keys(states)
|
|
.sort()
|
|
.map((key) => {
|
|
const state = states[key] || {};
|
|
// make sure events, if null, is an empty array
|
|
state.Events = state.Events || [];
|
|
const summary = { Name: key };
|
|
Object.keys(state).forEach(
|
|
(stateKey) => (summary[stateKey] = state[stateKey])
|
|
);
|
|
summary.Resources =
|
|
hash.AllocatedResources && hash.AllocatedResources.Tasks[key];
|
|
return summary;
|
|
});
|
|
|
|
hash.JobVersion =
|
|
hash.JobVersion != null ? hash.JobVersion : get(hash, 'Job.Version');
|
|
|
|
hash.PlainJobId = hash.JobID;
|
|
hash.Namespace = hash.Namespace || get(hash, 'Job.Namespace') || 'default';
|
|
hash.JobID = JSON.stringify([hash.JobID, hash.Namespace]);
|
|
|
|
hash.RescheduleEvents = (hash.RescheduleTracker || {}).Events;
|
|
|
|
hash.IsMigrating = (hash.DesiredTransition || {}).Migrate;
|
|
|
|
// API returns empty strings instead of null
|
|
hash.PreviousAllocationID = hash.PreviousAllocation
|
|
? hash.PreviousAllocation
|
|
: null;
|
|
hash.NextAllocationID = hash.NextAllocation ? hash.NextAllocation : null;
|
|
hash.FollowUpEvaluationID = hash.FollowupEvalID
|
|
? hash.FollowupEvalID
|
|
: null;
|
|
|
|
hash.PreemptedAllocationIDs = hash.PreemptedAllocations || [];
|
|
hash.PreemptedByAllocationID = hash.PreemptedByAllocation || null;
|
|
hash.WasPreempted = !!hash.PreemptedByAllocationID;
|
|
|
|
const shared = hash.AllocatedResources && hash.AllocatedResources.Shared;
|
|
hash.AllocatedResources =
|
|
hash.AllocatedResources &&
|
|
merge(Object.values(hash.AllocatedResources.Tasks));
|
|
if (shared) {
|
|
hash.AllocatedResources.Ports = shared.Ports;
|
|
hash.AllocatedResources.Networks = shared.Networks;
|
|
}
|
|
|
|
// The Job definition for an allocation is only included in findRecord responses.
|
|
hash.AllocationTaskGroup = !hash.Job
|
|
? null
|
|
: taskGroupFromJob(hash.Job, hash.TaskGroup);
|
|
|
|
return super.normalize(typeHash, hash);
|
|
}
|
|
}
|