mirror of
https://github.com/kemko/nomad.git
synced 2026-01-10 04:15:41 +03:00
Merge pull request #8726 from hashicorp/ui/mirage-job-resources
UI: Allow for setting job resource requirements in via the Mirage job factory
This commit is contained in:
@@ -27,10 +27,10 @@ export const STORAGE_PROVIDERS = ['ebs', 'zfs', 'nfs', 'cow', 'moo'];
|
||||
|
||||
export function generateResources(options = {}) {
|
||||
return {
|
||||
CPU: faker.helpers.randomize(CPU_RESERVATIONS),
|
||||
MemoryMB: faker.helpers.randomize(MEMORY_RESERVATIONS),
|
||||
DiskMB: faker.helpers.randomize(DISK_RESERVATIONS),
|
||||
IOPS: faker.helpers.randomize(IOPS_RESERVATIONS),
|
||||
CPU: options.CPU || faker.helpers.randomize(CPU_RESERVATIONS),
|
||||
MemoryMB: options.MemoryMB || faker.helpers.randomize(MEMORY_RESERVATIONS),
|
||||
DiskMB: options.DiskMB || faker.helpers.randomize(DISK_RESERVATIONS),
|
||||
IOPS: options.IOPS || faker.helpers.randomize(IOPS_RESERVATIONS),
|
||||
Networks: generateNetworks(options.networks),
|
||||
Ports: generatePorts(options.networks),
|
||||
};
|
||||
@@ -73,15 +73,17 @@ export function generateNetworks(options = {}) {
|
||||
}
|
||||
|
||||
export function generatePorts(options = {}) {
|
||||
return Array(faker.random.number({
|
||||
min: options.minPorts != null ? options.minPorts : 0,
|
||||
max: options.maxPorts != null ? options.maxPorts : 2
|
||||
}))
|
||||
return Array(
|
||||
faker.random.number({
|
||||
min: options.minPorts != null ? options.minPorts : 0,
|
||||
max: options.maxPorts != null ? options.maxPorts : 2,
|
||||
})
|
||||
)
|
||||
.fill(null)
|
||||
.map(() => ({
|
||||
Label: faker.hacker.noun(),
|
||||
Value: faker.random.number({ min: 5000, max: 60000 }),
|
||||
To: faker.random.number({ min: 5000, max: 60000 }),
|
||||
HostIP: faker.random.boolean() ? faker.internet.ip() : faker.internet.ipv6(),
|
||||
}))
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
@@ -40,16 +40,18 @@ export default Factory.extend({
|
||||
withTaskWithPorts: trait({
|
||||
afterCreate(allocation, server) {
|
||||
const taskGroup = server.db.taskGroups.findBy({ name: allocation.taskGroup });
|
||||
const resources = taskGroup.taskIds.map(id =>
|
||||
server.create(
|
||||
const resources = taskGroup.taskIds.map(id => {
|
||||
const task = server.db.tasks.find(id);
|
||||
return server.create(
|
||||
'task-resource',
|
||||
{
|
||||
allocation,
|
||||
name: server.db.tasks.find(id).name,
|
||||
name: task.name,
|
||||
resources: task.Resources,
|
||||
},
|
||||
'withReservedPorts'
|
||||
)
|
||||
);
|
||||
);
|
||||
});
|
||||
|
||||
allocation.update({ taskResourceIds: resources.mapBy('id') });
|
||||
},
|
||||
@@ -58,16 +60,18 @@ export default Factory.extend({
|
||||
withoutTaskWithPorts: trait({
|
||||
afterCreate(allocation, server) {
|
||||
const taskGroup = server.db.taskGroups.findBy({ name: allocation.taskGroup });
|
||||
const resources = taskGroup.taskIds.map(id =>
|
||||
server.create(
|
||||
const resources = taskGroup.taskIds.map(id => {
|
||||
const task = server.db.tasks.find(id);
|
||||
return server.create(
|
||||
'task-resource',
|
||||
{
|
||||
allocation,
|
||||
name: server.db.tasks.find(id).name,
|
||||
name: task.name,
|
||||
resources: task.Resources,
|
||||
},
|
||||
'withoutReservedPorts'
|
||||
)
|
||||
);
|
||||
);
|
||||
});
|
||||
|
||||
allocation.update({ taskResourceIds: resources.mapBy('id') });
|
||||
},
|
||||
@@ -191,12 +195,14 @@ export default Factory.extend({
|
||||
})
|
||||
);
|
||||
|
||||
const resources = taskGroup.taskIds.map(id =>
|
||||
server.create('task-resource', {
|
||||
const resources = taskGroup.taskIds.map(id => {
|
||||
const task = server.db.tasks.find(id);
|
||||
return server.create('task-resource', {
|
||||
allocation,
|
||||
name: server.db.tasks.find(id).name,
|
||||
})
|
||||
);
|
||||
name: task.name,
|
||||
resources: task.Resources,
|
||||
});
|
||||
});
|
||||
|
||||
allocation.update({
|
||||
taskStateIds: allocation.clientStatus === 'pending' ? [] : states.mapBy('id'),
|
||||
|
||||
@@ -20,7 +20,19 @@ export default Factory.extend({
|
||||
|
||||
version: 1,
|
||||
|
||||
groupsCount: () => faker.random.number({ min: 1, max: 2 }),
|
||||
// When provided, the resourceSpec will inform how many task groups to create
|
||||
// and how much of each resource that task group reserves.
|
||||
//
|
||||
// One task group, 256 MiB memory and 500 Mhz cpu
|
||||
// resourceSpec: ['M: 256, C: 500']
|
||||
//
|
||||
// Two task groups
|
||||
// resourceSpec: ['M: 256, C: 500', 'M: 1024, C: 1200']
|
||||
resourceSpec: null,
|
||||
|
||||
groupsCount() {
|
||||
return this.resourceSpec ? this.resourceSpec.length : faker.random.number({ min: 1, max: 2 });
|
||||
},
|
||||
|
||||
region: () => 'global',
|
||||
type: () => faker.helpers.randomize(JOB_TYPES),
|
||||
@@ -135,9 +147,22 @@ export default Factory.extend({
|
||||
groupProps.count = job.groupTaskCount;
|
||||
}
|
||||
|
||||
const groups = job.noHostVolumes
|
||||
? server.createList('task-group', job.groupsCount, 'noHostVolumes', groupProps)
|
||||
: server.createList('task-group', job.groupsCount, groupProps);
|
||||
let groups;
|
||||
if (job.noHostVolumes) {
|
||||
groups = provide(job.groupsCount, (_, idx) =>
|
||||
server.create('task-group', 'noHostVolumes', {
|
||||
...groupProps,
|
||||
resourceSpec: job.resourceSpec && job.resourceSpec.length && job.resourceSpec[idx],
|
||||
})
|
||||
);
|
||||
} else {
|
||||
groups = provide(job.groupsCount, (_, idx) =>
|
||||
server.create('task-group', {
|
||||
...groupProps,
|
||||
resourceSpec: job.resourceSpec && job.resourceSpec.length && job.resourceSpec[idx],
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
job.update({
|
||||
taskGroupIds: groups.mapBy('id'),
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { Factory, trait } from 'ember-cli-mirage';
|
||||
import faker from 'nomad-ui/mirage/faker';
|
||||
import { provide } from '../utils';
|
||||
import { generateResources } from '../common';
|
||||
|
||||
const DISK_RESERVATIONS = [200, 500, 1000, 2000, 5000, 10000, 100000];
|
||||
|
||||
@@ -36,6 +37,9 @@ export default Factory.extend({
|
||||
// When true, only creates allocations
|
||||
shallow: false,
|
||||
|
||||
// When set, passed into tasks to set resource values
|
||||
resourceSpec: null,
|
||||
|
||||
afterCreate(group, server) {
|
||||
let taskIds = [];
|
||||
let volumes = Object.keys(group.volumes);
|
||||
@@ -66,12 +70,20 @@ export default Factory.extend({
|
||||
}
|
||||
|
||||
if (!group.shallow) {
|
||||
const tasks = provide(group.count, () => {
|
||||
const resources =
|
||||
group.resourceSpec && divide(group.count, parseResourceSpec(group.resourceSpec));
|
||||
const tasks = provide(group.count, (_, idx) => {
|
||||
const mounts = faker.helpers
|
||||
.shuffle(volumes)
|
||||
.slice(0, faker.random.number({ min: 1, max: 3 }));
|
||||
|
||||
const maybeResources = {};
|
||||
if (resources) {
|
||||
maybeResources.Resources = generateResources(resources[idx]);
|
||||
}
|
||||
return server.create('task', {
|
||||
taskGroup: group,
|
||||
...maybeResources,
|
||||
volumeMounts: mounts.map(mount => ({
|
||||
Volume: mount,
|
||||
Destination: `/${faker.internet.userName()}/${faker.internet.domainWord()}/${faker.internet.color()}`,
|
||||
@@ -136,3 +148,60 @@ function makeHostVolumes() {
|
||||
return hash;
|
||||
}, {});
|
||||
}
|
||||
|
||||
function parseResourceSpec(spec) {
|
||||
const mapping = {
|
||||
M: 'MemoryMB',
|
||||
C: 'CPU',
|
||||
D: 'DiskMB',
|
||||
I: 'IOPS',
|
||||
};
|
||||
|
||||
const terms = spec.split(',').map(t => {
|
||||
const [k, v] = t
|
||||
.trim()
|
||||
.split(':')
|
||||
.map(kv => kv.trim());
|
||||
return [k, +v];
|
||||
});
|
||||
|
||||
return terms.reduce((hash, term) => {
|
||||
hash[mapping[term[0]]] = term[1];
|
||||
return hash;
|
||||
}, {});
|
||||
}
|
||||
|
||||
// Split a single resources object into N resource objects where
|
||||
// the sum of each property of the new resources objects equals
|
||||
// the original resources properties
|
||||
// ex: divide(2, { Mem: 400, Cpu: 250 }) -> [{ Mem: 80, Cpu: 50 }, { Mem: 320, Cpu: 200 }]
|
||||
function divide(count, resources) {
|
||||
const wheel = roulette(1, count);
|
||||
|
||||
const ret = provide(count, (_, idx) => {
|
||||
return Object.keys(resources).reduce((hash, key) => {
|
||||
hash[key] = Math.round(resources[key] * wheel[idx]);
|
||||
return hash;
|
||||
}, {});
|
||||
});
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Roulette splits a number into N divisions
|
||||
// Variance is a value between 0 and 1 that determines how much each division in
|
||||
// size. At 0 each division is even, at 1, it's entirely random but the sum of all
|
||||
// divisions is guaranteed to equal the total value.
|
||||
function roulette(total, divisions, variance = 0.8) {
|
||||
let roulette = new Array(divisions).fill(total / divisions);
|
||||
roulette.forEach((v, i) => {
|
||||
if (i === roulette.length - 1) return;
|
||||
roulette.splice(i, 2, ...rngDistribute(roulette[i], roulette[i + 1], variance));
|
||||
});
|
||||
return roulette;
|
||||
}
|
||||
|
||||
function rngDistribute(a, b, variance = 0.8) {
|
||||
const move = a * faker.random.number({ min: 0, max: variance, precision: 0.01 });
|
||||
return [a - move, b + move];
|
||||
}
|
||||
|
||||
@@ -18,5 +18,14 @@ export default ApplicationSerializer.extend({
|
||||
|
||||
function serializeAllocation(allocation) {
|
||||
allocation.TaskStates = allocation.TaskStates.reduce(arrToObj('Name'), {});
|
||||
allocation.Resources = allocation.TaskResources.mapBy('Resources').reduce(
|
||||
(hash, resources) => {
|
||||
['CPU', 'DiskMB', 'IOPS', 'MemoryMB'].forEach(key => (hash[key] += resources[key]));
|
||||
hash.Networks = resources.Networks;
|
||||
hash.Ports = resources.Ports;
|
||||
return hash;
|
||||
},
|
||||
{ CPU: 0, DiskMB: 0, IOPS: 0, MemoryMB: 0 }
|
||||
);
|
||||
allocation.TaskResources = allocation.TaskResources.reduce(arrToObj('Name', 'Resources'), {});
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user