mirror of
https://github.com/kemko/nomad.git
synced 2026-01-06 10:25:42 +03:00
tests: remove duplicate import statements.
This commit is contained in:
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
"github.com/hashicorp/nomad/nomad"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
nstructs "github.com/hashicorp/nomad/nomad/structs"
|
||||
nconfig "github.com/hashicorp/nomad/nomad/structs/config"
|
||||
"github.com/hashicorp/nomad/plugins/drivers"
|
||||
@@ -758,7 +757,7 @@ func TestAlloc_ExecStreaming_ACL_Basic(t *testing.T) {
|
||||
policyBad := mock.NamespacePolicy("other", "", []string{acl.NamespaceCapabilityDeny})
|
||||
tokenBad := mock.CreatePolicyAndToken(t, s.State(), 1005, "invalid", policyBad)
|
||||
|
||||
policyGood := mock.NamespacePolicy(structs.DefaultNamespace, "",
|
||||
policyGood := mock.NamespacePolicy(nstructs.DefaultNamespace, "",
|
||||
[]string{acl.NamespaceCapabilityAllocExec, acl.NamespaceCapabilityReadFS})
|
||||
tokenGood := mock.CreatePolicyAndToken(t, s.State(), 1009, "valid2", policyGood)
|
||||
|
||||
@@ -873,11 +872,11 @@ func TestAlloc_ExecStreaming_ACL_WithIsolation_Image(t *testing.T) {
|
||||
policyBad := mock.NamespacePolicy("other", "", []string{acl.NamespaceCapabilityDeny})
|
||||
tokenBad := mock.CreatePolicyAndToken(t, s.State(), 1005, "invalid", policyBad)
|
||||
|
||||
policyAllocExec := mock.NamespacePolicy(structs.DefaultNamespace, "",
|
||||
policyAllocExec := mock.NamespacePolicy(nstructs.DefaultNamespace, "",
|
||||
[]string{acl.NamespaceCapabilityAllocExec})
|
||||
tokenAllocExec := mock.CreatePolicyAndToken(t, s.State(), 1009, "valid2", policyAllocExec)
|
||||
|
||||
policyAllocNodeExec := mock.NamespacePolicy(structs.DefaultNamespace, "",
|
||||
policyAllocNodeExec := mock.NamespacePolicy(nstructs.DefaultNamespace, "",
|
||||
[]string{acl.NamespaceCapabilityAllocExec, acl.NamespaceCapabilityAllocNodeExec})
|
||||
tokenAllocNodeExec := mock.CreatePolicyAndToken(t, s.State(), 1009, "valid2", policyAllocNodeExec)
|
||||
|
||||
@@ -1022,11 +1021,11 @@ func TestAlloc_ExecStreaming_ACL_WithIsolation_Chroot(t *testing.T) {
|
||||
policyBad := mock.NamespacePolicy("other", "", []string{acl.NamespaceCapabilityDeny})
|
||||
tokenBad := mock.CreatePolicyAndToken(t, s.State(), 1005, "invalid", policyBad)
|
||||
|
||||
policyAllocExec := mock.NamespacePolicy(structs.DefaultNamespace, "",
|
||||
policyAllocExec := mock.NamespacePolicy(nstructs.DefaultNamespace, "",
|
||||
[]string{acl.NamespaceCapabilityAllocExec})
|
||||
tokenAllocExec := mock.CreatePolicyAndToken(t, s.State(), 1009, "alloc-exec", policyAllocExec)
|
||||
|
||||
policyAllocNodeExec := mock.NamespacePolicy(structs.DefaultNamespace, "",
|
||||
policyAllocNodeExec := mock.NamespacePolicy(nstructs.DefaultNamespace, "",
|
||||
[]string{acl.NamespaceCapabilityAllocExec, acl.NamespaceCapabilityAllocNodeExec})
|
||||
tokenAllocNodeExec := mock.CreatePolicyAndToken(t, s.State(), 1009, "alloc-node-exec", policyAllocNodeExec)
|
||||
|
||||
@@ -1166,11 +1165,11 @@ func TestAlloc_ExecStreaming_ACL_WithIsolation_None(t *testing.T) {
|
||||
policyBad := mock.NamespacePolicy("other", "", []string{acl.NamespaceCapabilityDeny})
|
||||
tokenBad := mock.CreatePolicyAndToken(t, s.State(), 1005, "invalid", policyBad)
|
||||
|
||||
policyAllocExec := mock.NamespacePolicy(structs.DefaultNamespace, "",
|
||||
policyAllocExec := mock.NamespacePolicy(nstructs.DefaultNamespace, "",
|
||||
[]string{acl.NamespaceCapabilityAllocExec})
|
||||
tokenAllocExec := mock.CreatePolicyAndToken(t, s.State(), 1009, "alloc-exec", policyAllocExec)
|
||||
|
||||
policyAllocNodeExec := mock.NamespacePolicy(structs.DefaultNamespace, "",
|
||||
policyAllocNodeExec := mock.NamespacePolicy(nstructs.DefaultNamespace, "",
|
||||
[]string{acl.NamespaceCapabilityAllocExec, acl.NamespaceCapabilityAllocNodeExec})
|
||||
tokenAllocNodeExec := mock.CreatePolicyAndToken(t, s.State(), 1009, "alloc-node-exec", policyAllocNodeExec)
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
||||
"github.com/hashicorp/nomad/client/consul"
|
||||
consulapi "github.com/hashicorp/nomad/client/consul"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
@@ -186,7 +185,7 @@ func TestSIDSHook_deriveSIToken(t *testing.T) {
|
||||
Kind: taskKind,
|
||||
},
|
||||
logger: testlog.HCLogger(t),
|
||||
sidsClient: consul.NewMockServiceIdentitiesClient(),
|
||||
sidsClient: consulapi.NewMockServiceIdentitiesClient(),
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
@@ -199,7 +198,7 @@ func TestSIDSHook_deriveSIToken_timeout(t *testing.T) {
|
||||
t.Parallel()
|
||||
r := require.New(t)
|
||||
|
||||
siClient := consul.NewMockServiceIdentitiesClient()
|
||||
siClient := consulapi.NewMockServiceIdentitiesClient()
|
||||
siClient.DeriveTokenFn = func(allocation *structs.Allocation, strings []string) (m map[string]string, err error) {
|
||||
select {
|
||||
// block forever, hopefully triggering a timeout in the caller
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/hashicorp/nomad/client/allocdir"
|
||||
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/client/consul"
|
||||
consulapi "github.com/hashicorp/nomad/client/consul"
|
||||
"github.com/hashicorp/nomad/client/devicemanager"
|
||||
"github.com/hashicorp/nomad/client/pluginmanager/drivermanager"
|
||||
@@ -935,7 +934,7 @@ func TestTaskRunner_ShutdownDelay(t *testing.T) {
|
||||
tr, conf, cleanup := runTestTaskRunner(t, alloc, task.Name)
|
||||
defer cleanup()
|
||||
|
||||
mockConsul := conf.Consul.(*consul.MockConsulServiceClient)
|
||||
mockConsul := conf.Consul.(*consulapi.MockConsulServiceClient)
|
||||
|
||||
// Wait for the task to start
|
||||
testWaitForTaskToStart(t, tr)
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/hashicorp/nomad/nomad/structs/config"
|
||||
sconfig "github.com/hashicorp/nomad/nomad/structs/config"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -250,12 +249,12 @@ func TestAgent_ServerConfig_Limits_Error(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
expectedErr string
|
||||
limits sconfig.Limits
|
||||
limits config.Limits
|
||||
}{
|
||||
{
|
||||
name: "Negative Timeout",
|
||||
expectedErr: "rpc_handshake_timeout must be >= 0",
|
||||
limits: sconfig.Limits{
|
||||
limits: config.Limits{
|
||||
RPCHandshakeTimeout: "-5s",
|
||||
RPCMaxConnsPerClient: helper.IntToPtr(100),
|
||||
},
|
||||
@@ -263,7 +262,7 @@ func TestAgent_ServerConfig_Limits_Error(t *testing.T) {
|
||||
{
|
||||
name: "Invalid Timeout",
|
||||
expectedErr: "error parsing rpc_handshake_timeout",
|
||||
limits: sconfig.Limits{
|
||||
limits: config.Limits{
|
||||
RPCHandshakeTimeout: "s",
|
||||
RPCMaxConnsPerClient: helper.IntToPtr(100),
|
||||
},
|
||||
@@ -271,7 +270,7 @@ func TestAgent_ServerConfig_Limits_Error(t *testing.T) {
|
||||
{
|
||||
name: "Missing Timeout",
|
||||
expectedErr: "error parsing rpc_handshake_timeout",
|
||||
limits: sconfig.Limits{
|
||||
limits: config.Limits{
|
||||
RPCHandshakeTimeout: "",
|
||||
RPCMaxConnsPerClient: helper.IntToPtr(100),
|
||||
},
|
||||
@@ -279,7 +278,7 @@ func TestAgent_ServerConfig_Limits_Error(t *testing.T) {
|
||||
{
|
||||
name: "Negative Connection Limit",
|
||||
expectedErr: "rpc_max_conns_per_client must be > 25; found: -100",
|
||||
limits: sconfig.Limits{
|
||||
limits: config.Limits{
|
||||
RPCHandshakeTimeout: "5s",
|
||||
RPCMaxConnsPerClient: helper.IntToPtr(-100),
|
||||
},
|
||||
@@ -287,9 +286,9 @@ func TestAgent_ServerConfig_Limits_Error(t *testing.T) {
|
||||
{
|
||||
name: "Low Connection Limit",
|
||||
expectedErr: "rpc_max_conns_per_client must be > 25; found: 20",
|
||||
limits: sconfig.Limits{
|
||||
limits: config.Limits{
|
||||
RPCHandshakeTimeout: "5s",
|
||||
RPCMaxConnsPerClient: helper.IntToPtr(sconfig.LimitsNonStreamingConnsPerClient),
|
||||
RPCMaxConnsPerClient: helper.IntToPtr(config.LimitsNonStreamingConnsPerClient),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -316,7 +315,7 @@ func TestAgent_ServerConfig_Limits_OK(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
limits sconfig.Limits
|
||||
limits config.Limits
|
||||
}{
|
||||
{
|
||||
name: "Default",
|
||||
@@ -324,28 +323,28 @@ func TestAgent_ServerConfig_Limits_OK(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "Zero+nil is valid to disable",
|
||||
limits: sconfig.Limits{
|
||||
limits: config.Limits{
|
||||
RPCHandshakeTimeout: "0",
|
||||
RPCMaxConnsPerClient: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Zeros are valid",
|
||||
limits: sconfig.Limits{
|
||||
limits: config.Limits{
|
||||
RPCHandshakeTimeout: "0s",
|
||||
RPCMaxConnsPerClient: helper.IntToPtr(0),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Low limits are valid",
|
||||
limits: sconfig.Limits{
|
||||
limits: config.Limits{
|
||||
RPCHandshakeTimeout: "1ms",
|
||||
RPCMaxConnsPerClient: helper.IntToPtr(26),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "High limits are valid",
|
||||
limits: sconfig.Limits{
|
||||
limits: config.Limits{
|
||||
RPCHandshakeTimeout: "5h",
|
||||
RPCMaxConnsPerClient: helper.IntToPtr(100000),
|
||||
},
|
||||
@@ -560,10 +559,10 @@ func TestAgent_HTTPCheck(t *testing.T) {
|
||||
config: &Config{
|
||||
AdvertiseAddrs: &AdvertiseAddrs{HTTP: "advertise:4646"},
|
||||
normalizedAddrs: &Addresses{HTTP: "normalized:4646"},
|
||||
Consul: &sconfig.ConsulConfig{
|
||||
Consul: &config.ConsulConfig{
|
||||
ChecksUseAdvertise: helper.BoolToPtr(false),
|
||||
},
|
||||
TLSConfig: &sconfig.TLSConfig{EnableHTTP: false},
|
||||
TLSConfig: &config.TLSConfig{EnableHTTP: false},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -700,7 +699,7 @@ func TestServer_Reload_TLS_Shared_Keyloader(t *testing.T) {
|
||||
)
|
||||
|
||||
agent := NewTestAgent(t, t.Name(), func(c *Config) {
|
||||
c.TLSConfig = &sconfig.TLSConfig{
|
||||
c.TLSConfig = &config.TLSConfig{
|
||||
EnableHTTP: true,
|
||||
EnableRPC: true,
|
||||
VerifyServerHostname: true,
|
||||
@@ -720,7 +719,7 @@ func TestServer_Reload_TLS_Shared_Keyloader(t *testing.T) {
|
||||
|
||||
// Switch to the correct certificates and reload
|
||||
newConfig := &Config{
|
||||
TLSConfig: &sconfig.TLSConfig{
|
||||
TLSConfig: &config.TLSConfig{
|
||||
EnableHTTP: true,
|
||||
EnableRPC: true,
|
||||
VerifyServerHostname: true,
|
||||
@@ -767,7 +766,7 @@ func TestServer_Reload_TLS_Certificate(t *testing.T) {
|
||||
)
|
||||
|
||||
agentConfig := &Config{
|
||||
TLSConfig: &sconfig.TLSConfig{
|
||||
TLSConfig: &config.TLSConfig{
|
||||
EnableHTTP: true,
|
||||
EnableRPC: true,
|
||||
VerifyServerHostname: true,
|
||||
@@ -783,7 +782,7 @@ func TestServer_Reload_TLS_Certificate(t *testing.T) {
|
||||
}
|
||||
|
||||
newConfig := &Config{
|
||||
TLSConfig: &sconfig.TLSConfig{
|
||||
TLSConfig: &config.TLSConfig{
|
||||
EnableHTTP: true,
|
||||
EnableRPC: true,
|
||||
VerifyServerHostname: true,
|
||||
@@ -816,7 +815,7 @@ func TestServer_Reload_TLS_Certificate_Invalid(t *testing.T) {
|
||||
)
|
||||
|
||||
agentConfig := &Config{
|
||||
TLSConfig: &sconfig.TLSConfig{
|
||||
TLSConfig: &config.TLSConfig{
|
||||
EnableHTTP: true,
|
||||
EnableRPC: true,
|
||||
VerifyServerHostname: true,
|
||||
@@ -832,7 +831,7 @@ func TestServer_Reload_TLS_Certificate_Invalid(t *testing.T) {
|
||||
}
|
||||
|
||||
newConfig := &Config{
|
||||
TLSConfig: &sconfig.TLSConfig{
|
||||
TLSConfig: &config.TLSConfig{
|
||||
EnableHTTP: true,
|
||||
EnableRPC: true,
|
||||
VerifyServerHostname: true,
|
||||
@@ -859,9 +858,9 @@ func Test_GetConfig(t *testing.T) {
|
||||
Ports: &Ports{},
|
||||
Addresses: &Addresses{},
|
||||
AdvertiseAddrs: &AdvertiseAddrs{},
|
||||
Vault: &sconfig.VaultConfig{},
|
||||
Consul: &sconfig.ConsulConfig{},
|
||||
Sentinel: &sconfig.SentinelConfig{},
|
||||
Vault: &config.VaultConfig{},
|
||||
Consul: &config.ConsulConfig{},
|
||||
Sentinel: &config.SentinelConfig{},
|
||||
}
|
||||
|
||||
agent := &Agent{
|
||||
@@ -903,7 +902,7 @@ func TestServer_Reload_TLS_UpgradeToTLS(t *testing.T) {
|
||||
logger := testlog.HCLogger(t)
|
||||
|
||||
agentConfig := &Config{
|
||||
TLSConfig: &sconfig.TLSConfig{},
|
||||
TLSConfig: &config.TLSConfig{},
|
||||
}
|
||||
|
||||
agent := &Agent{
|
||||
@@ -913,7 +912,7 @@ func TestServer_Reload_TLS_UpgradeToTLS(t *testing.T) {
|
||||
}
|
||||
|
||||
newConfig := &Config{
|
||||
TLSConfig: &sconfig.TLSConfig{
|
||||
TLSConfig: &config.TLSConfig{
|
||||
EnableHTTP: true,
|
||||
EnableRPC: true,
|
||||
VerifyServerHostname: true,
|
||||
@@ -946,7 +945,7 @@ func TestServer_Reload_TLS_DowngradeFromTLS(t *testing.T) {
|
||||
logger := testlog.HCLogger(t)
|
||||
|
||||
agentConfig := &Config{
|
||||
TLSConfig: &sconfig.TLSConfig{
|
||||
TLSConfig: &config.TLSConfig{
|
||||
EnableHTTP: true,
|
||||
EnableRPC: true,
|
||||
VerifyServerHostname: true,
|
||||
@@ -963,7 +962,7 @@ func TestServer_Reload_TLS_DowngradeFromTLS(t *testing.T) {
|
||||
}
|
||||
|
||||
newConfig := &Config{
|
||||
TLSConfig: &sconfig.TLSConfig{},
|
||||
TLSConfig: &config.TLSConfig{},
|
||||
}
|
||||
|
||||
assert.False(agentConfig.TLSConfig.IsEmpty())
|
||||
@@ -987,7 +986,7 @@ func TestServer_ShouldReload_ReturnFalseForNoChanges(t *testing.T) {
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
sameAgentConfig := &Config{
|
||||
TLSConfig: &sconfig.TLSConfig{
|
||||
TLSConfig: &config.TLSConfig{
|
||||
EnableHTTP: true,
|
||||
EnableRPC: true,
|
||||
VerifyServerHostname: true,
|
||||
@@ -998,7 +997,7 @@ func TestServer_ShouldReload_ReturnFalseForNoChanges(t *testing.T) {
|
||||
}
|
||||
|
||||
agent := NewTestAgent(t, t.Name(), func(c *Config) {
|
||||
c.TLSConfig = &sconfig.TLSConfig{
|
||||
c.TLSConfig = &config.TLSConfig{
|
||||
EnableHTTP: true,
|
||||
EnableRPC: true,
|
||||
VerifyServerHostname: true,
|
||||
@@ -1027,7 +1026,7 @@ func TestServer_ShouldReload_ReturnTrueForOnlyHTTPChanges(t *testing.T) {
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
sameAgentConfig := &Config{
|
||||
TLSConfig: &sconfig.TLSConfig{
|
||||
TLSConfig: &config.TLSConfig{
|
||||
EnableHTTP: false,
|
||||
EnableRPC: true,
|
||||
VerifyServerHostname: true,
|
||||
@@ -1038,7 +1037,7 @@ func TestServer_ShouldReload_ReturnTrueForOnlyHTTPChanges(t *testing.T) {
|
||||
}
|
||||
|
||||
agent := NewTestAgent(t, t.Name(), func(c *Config) {
|
||||
c.TLSConfig = &sconfig.TLSConfig{
|
||||
c.TLSConfig = &config.TLSConfig{
|
||||
EnableHTTP: true,
|
||||
EnableRPC: true,
|
||||
VerifyServerHostname: true,
|
||||
@@ -1067,7 +1066,7 @@ func TestServer_ShouldReload_ReturnTrueForOnlyRPCChanges(t *testing.T) {
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
sameAgentConfig := &Config{
|
||||
TLSConfig: &sconfig.TLSConfig{
|
||||
TLSConfig: &config.TLSConfig{
|
||||
EnableHTTP: true,
|
||||
EnableRPC: true,
|
||||
VerifyServerHostname: true,
|
||||
@@ -1078,7 +1077,7 @@ func TestServer_ShouldReload_ReturnTrueForOnlyRPCChanges(t *testing.T) {
|
||||
}
|
||||
|
||||
agent := NewTestAgent(t, t.Name(), func(c *Config) {
|
||||
c.TLSConfig = &sconfig.TLSConfig{
|
||||
c.TLSConfig = &config.TLSConfig{
|
||||
EnableHTTP: true,
|
||||
EnableRPC: false,
|
||||
VerifyServerHostname: true,
|
||||
@@ -1109,7 +1108,7 @@ func TestServer_ShouldReload_ReturnTrueForConfigChanges(t *testing.T) {
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
agent := NewTestAgent(t, t.Name(), func(c *Config) {
|
||||
c.TLSConfig = &sconfig.TLSConfig{
|
||||
c.TLSConfig = &config.TLSConfig{
|
||||
EnableHTTP: true,
|
||||
EnableRPC: true,
|
||||
VerifyServerHostname: true,
|
||||
@@ -1121,7 +1120,7 @@ func TestServer_ShouldReload_ReturnTrueForConfigChanges(t *testing.T) {
|
||||
defer agent.Shutdown()
|
||||
|
||||
newConfig := &Config{
|
||||
TLSConfig: &sconfig.TLSConfig{
|
||||
TLSConfig: &config.TLSConfig{
|
||||
EnableHTTP: true,
|
||||
EnableRPC: true,
|
||||
VerifyServerHostname: true,
|
||||
@@ -1179,7 +1178,7 @@ func TestServer_ShouldReload_ReturnTrueForFileChanges(t *testing.T) {
|
||||
logger := testlog.HCLogger(t)
|
||||
|
||||
agentConfig := &Config{
|
||||
TLSConfig: &sconfig.TLSConfig{
|
||||
TLSConfig: &config.TLSConfig{
|
||||
EnableHTTP: true,
|
||||
EnableRPC: true,
|
||||
VerifyServerHostname: true,
|
||||
@@ -1224,7 +1223,7 @@ func TestServer_ShouldReload_ReturnTrueForFileChanges(t *testing.T) {
|
||||
require.Nil(err)
|
||||
|
||||
newAgentConfig := &Config{
|
||||
TLSConfig: &sconfig.TLSConfig{
|
||||
TLSConfig: &config.TLSConfig{
|
||||
EnableHTTP: true,
|
||||
EnableRPC: true,
|
||||
VerifyServerHostname: true,
|
||||
@@ -1254,7 +1253,7 @@ func TestServer_ShouldReload_ShouldHandleMultipleChanges(t *testing.T) {
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
sameAgentConfig := &Config{
|
||||
TLSConfig: &sconfig.TLSConfig{
|
||||
TLSConfig: &config.TLSConfig{
|
||||
EnableHTTP: true,
|
||||
EnableRPC: true,
|
||||
VerifyServerHostname: true,
|
||||
@@ -1265,7 +1264,7 @@ func TestServer_ShouldReload_ShouldHandleMultipleChanges(t *testing.T) {
|
||||
}
|
||||
|
||||
agent := NewTestAgent(t, t.Name(), func(c *Config) {
|
||||
c.TLSConfig = &sconfig.TLSConfig{
|
||||
c.TLSConfig = &config.TLSConfig{
|
||||
EnableHTTP: true,
|
||||
EnableRPC: true,
|
||||
VerifyServerHostname: true,
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
consulapi "github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
@@ -79,7 +78,7 @@ func TestConsul_Connect(t *testing.T) {
|
||||
}
|
||||
|
||||
// required by isNomadSidecar assertion below
|
||||
serviceRegMap := map[string]*api.AgentServiceRegistration{
|
||||
serviceRegMap := map[string]*consulapi.AgentServiceRegistration{
|
||||
MakeAllocServiceID(alloc.ID, "group-"+alloc.TaskGroup, tg.Services[0]): nil,
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
cstructs "github.com/hashicorp/nomad/client/structs"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
nstructs "github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/hashicorp/nomad/plugins/drivers"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
@@ -48,19 +47,19 @@ func TestClientAllocations_GarbageCollectAll_Local(t *testing.T) {
|
||||
})
|
||||
|
||||
// Make the request without having a node-id
|
||||
req := &structs.NodeSpecificRequest{
|
||||
QueryOptions: structs.QueryOptions{Region: "global"},
|
||||
req := &nstructs.NodeSpecificRequest{
|
||||
QueryOptions: nstructs.QueryOptions{Region: "global"},
|
||||
}
|
||||
|
||||
// Fetch the response
|
||||
var resp structs.GenericResponse
|
||||
var resp nstructs.GenericResponse
|
||||
err := msgpackrpc.CallWithCodec(codec, "ClientAllocations.GarbageCollectAll", req, &resp)
|
||||
require.NotNil(err)
|
||||
require.Contains(err.Error(), "missing")
|
||||
|
||||
// Fetch the response setting the node id
|
||||
req.NodeID = c.NodeID()
|
||||
var resp2 structs.GenericResponse
|
||||
var resp2 nstructs.GenericResponse
|
||||
err = msgpackrpc.CallWithCodec(codec, "ClientAllocations.GarbageCollectAll", req, &resp2)
|
||||
require.Nil(err)
|
||||
}
|
||||
@@ -90,7 +89,7 @@ func TestClientAllocations_GarbageCollectAll_Local_ACL(t *testing.T) {
|
||||
{
|
||||
Name: "bad token",
|
||||
Token: tokenBad.SecretID,
|
||||
ExpectedError: structs.ErrPermissionDenied.Error(),
|
||||
ExpectedError: nstructs.ErrPermissionDenied.Error(),
|
||||
},
|
||||
{
|
||||
Name: "good token",
|
||||
@@ -108,16 +107,16 @@ func TestClientAllocations_GarbageCollectAll_Local_ACL(t *testing.T) {
|
||||
t.Run(c.Name, func(t *testing.T) {
|
||||
|
||||
// Make the request without having a node-id
|
||||
req := &structs.NodeSpecificRequest{
|
||||
req := &nstructs.NodeSpecificRequest{
|
||||
NodeID: uuid.Generate(),
|
||||
QueryOptions: structs.QueryOptions{
|
||||
QueryOptions: nstructs.QueryOptions{
|
||||
AuthToken: c.Token,
|
||||
Region: "global",
|
||||
},
|
||||
}
|
||||
|
||||
// Fetch the response
|
||||
var resp structs.GenericResponse
|
||||
var resp nstructs.GenericResponse
|
||||
err := msgpackrpc.CallWithCodec(codec, "ClientAllocations.GarbageCollectAll", req, &resp)
|
||||
require.NotNil(err)
|
||||
require.Contains(err.Error(), c.ExpectedError)
|
||||
@@ -136,13 +135,13 @@ func TestClientAllocations_GarbageCollectAll_NoNode(t *testing.T) {
|
||||
testutil.WaitForLeader(t, s.RPC)
|
||||
|
||||
// Make the request without having a node-id
|
||||
req := &structs.NodeSpecificRequest{
|
||||
req := &nstructs.NodeSpecificRequest{
|
||||
NodeID: uuid.Generate(),
|
||||
QueryOptions: structs.QueryOptions{Region: "global"},
|
||||
QueryOptions: nstructs.QueryOptions{Region: "global"},
|
||||
}
|
||||
|
||||
// Fetch the response
|
||||
var resp structs.GenericResponse
|
||||
var resp nstructs.GenericResponse
|
||||
err := msgpackrpc.CallWithCodec(codec, "ClientAllocations.GarbageCollectAll", req, &resp)
|
||||
require.NotNil(err)
|
||||
require.Contains(err.Error(), "Unknown node")
|
||||
@@ -164,21 +163,21 @@ func TestClientAllocations_GarbageCollectAll_OldNode(t *testing.T) {
|
||||
node.Attributes["nomad.version"] = "0.7.1"
|
||||
require.Nil(state.UpsertNode(nstructs.MsgTypeTestSetup, 1005, node))
|
||||
|
||||
req := &structs.NodeSpecificRequest{
|
||||
req := &nstructs.NodeSpecificRequest{
|
||||
NodeID: node.ID,
|
||||
QueryOptions: structs.QueryOptions{Region: "global"},
|
||||
QueryOptions: nstructs.QueryOptions{Region: "global"},
|
||||
}
|
||||
|
||||
var resp structs.GenericResponse
|
||||
var resp nstructs.GenericResponse
|
||||
err := msgpackrpc.CallWithCodec(codec, "ClientAllocations.GarbageCollectAll", req, &resp)
|
||||
require.True(structs.IsErrNodeLacksRpc(err))
|
||||
require.True(nstructs.IsErrNodeLacksRpc(err))
|
||||
|
||||
// Test for a missing version error
|
||||
delete(node.Attributes, "nomad.version")
|
||||
require.Nil(state.UpsertNode(nstructs.MsgTypeTestSetup, 1006, node))
|
||||
|
||||
err = msgpackrpc.CallWithCodec(codec, "ClientAllocations.GarbageCollectAll", req, &resp)
|
||||
require.True(structs.IsErrUnknownNomadVersion(err))
|
||||
require.True(nstructs.IsErrUnknownNomadVersion(err))
|
||||
}
|
||||
|
||||
func TestClientAllocations_GarbageCollectAll_Remote(t *testing.T) {
|
||||
@@ -210,15 +209,15 @@ func TestClientAllocations_GarbageCollectAll_Remote(t *testing.T) {
|
||||
if len(nodes) != 1 {
|
||||
return false, fmt.Errorf("should have 1 client. found %d", len(nodes))
|
||||
}
|
||||
req := &structs.NodeSpecificRequest{
|
||||
req := &nstructs.NodeSpecificRequest{
|
||||
NodeID: c.NodeID(),
|
||||
QueryOptions: structs.QueryOptions{Region: "global"},
|
||||
QueryOptions: nstructs.QueryOptions{Region: "global"},
|
||||
}
|
||||
resp := structs.SingleNodeResponse{}
|
||||
resp := nstructs.SingleNodeResponse{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return resp.Node != nil && resp.Node.Status == structs.NodeStatusReady, fmt.Errorf(
|
||||
return resp.Node != nil && resp.Node.Status == nstructs.NodeStatusReady, fmt.Errorf(
|
||||
"expected ready but found %s", pretty.Sprint(resp.Node))
|
||||
}, func(err error) {
|
||||
t.Fatalf("should have a clients")
|
||||
@@ -230,9 +229,9 @@ func TestClientAllocations_GarbageCollectAll_Remote(t *testing.T) {
|
||||
s1.nodeConnsLock.Unlock()
|
||||
|
||||
// Make the request
|
||||
req := &structs.NodeSpecificRequest{
|
||||
req := &nstructs.NodeSpecificRequest{
|
||||
NodeID: c.NodeID(),
|
||||
QueryOptions: structs.QueryOptions{Region: "global"},
|
||||
QueryOptions: nstructs.QueryOptions{Region: "global"},
|
||||
}
|
||||
|
||||
// Fetch the response
|
||||
@@ -259,26 +258,26 @@ func TestClientAllocations_GarbageCollect_OldNode(t *testing.T) {
|
||||
|
||||
alloc := mock.Alloc()
|
||||
alloc.NodeID = node.ID
|
||||
require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1006, []*structs.Allocation{alloc}))
|
||||
require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1006, []*nstructs.Allocation{alloc}))
|
||||
|
||||
req := &structs.AllocSpecificRequest{
|
||||
req := &nstructs.AllocSpecificRequest{
|
||||
AllocID: alloc.ID,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
QueryOptions: nstructs.QueryOptions{
|
||||
Region: "global",
|
||||
Namespace: structs.DefaultNamespace,
|
||||
Namespace: nstructs.DefaultNamespace,
|
||||
},
|
||||
}
|
||||
|
||||
var resp structs.GenericResponse
|
||||
var resp nstructs.GenericResponse
|
||||
err := msgpackrpc.CallWithCodec(codec, "ClientAllocations.GarbageCollect", req, &resp)
|
||||
require.True(structs.IsErrNodeLacksRpc(err), err.Error())
|
||||
require.True(nstructs.IsErrNodeLacksRpc(err), err.Error())
|
||||
|
||||
// Test for a missing version error
|
||||
delete(node.Attributes, "nomad.version")
|
||||
require.Nil(state.UpsertNode(nstructs.MsgTypeTestSetup, 1007, node))
|
||||
|
||||
err = msgpackrpc.CallWithCodec(codec, "ClientAllocations.GarbageCollect", req, &resp)
|
||||
require.True(structs.IsErrUnknownNomadVersion(err), err.Error())
|
||||
require.True(nstructs.IsErrUnknownNomadVersion(err), err.Error())
|
||||
}
|
||||
|
||||
func TestClientAllocations_GarbageCollect_Local(t *testing.T) {
|
||||
@@ -299,17 +298,17 @@ func TestClientAllocations_GarbageCollect_Local(t *testing.T) {
|
||||
|
||||
// Force an allocation onto the node
|
||||
a := mock.Alloc()
|
||||
a.Job.Type = structs.JobTypeBatch
|
||||
a.Job.Type = nstructs.JobTypeBatch
|
||||
a.NodeID = c.NodeID()
|
||||
a.Job.TaskGroups[0].Count = 1
|
||||
a.Job.TaskGroups[0].Tasks[0] = &structs.Task{
|
||||
a.Job.TaskGroups[0].Tasks[0] = &nstructs.Task{
|
||||
Name: "web",
|
||||
Driver: "mock_driver",
|
||||
Config: map[string]interface{}{
|
||||
"run_for": "2s",
|
||||
},
|
||||
LogConfig: structs.DefaultLogConfig(),
|
||||
Resources: &structs.Resources{
|
||||
LogConfig: nstructs.DefaultLogConfig(),
|
||||
Resources: &nstructs.Resources{
|
||||
CPU: 500,
|
||||
MemoryMB: 256,
|
||||
},
|
||||
@@ -325,7 +324,7 @@ func TestClientAllocations_GarbageCollect_Local(t *testing.T) {
|
||||
// Upsert the allocation
|
||||
state := s.State()
|
||||
require.Nil(state.UpsertJob(nstructs.MsgTypeTestSetup, 999, a.Job))
|
||||
require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*structs.Allocation{a}))
|
||||
require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a}))
|
||||
|
||||
// Wait for the client to run the allocation
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
@@ -336,7 +335,7 @@ func TestClientAllocations_GarbageCollect_Local(t *testing.T) {
|
||||
if alloc == nil {
|
||||
return false, fmt.Errorf("unknown alloc")
|
||||
}
|
||||
if alloc.ClientStatus != structs.AllocClientStatusComplete {
|
||||
if alloc.ClientStatus != nstructs.AllocClientStatusComplete {
|
||||
return false, fmt.Errorf("alloc client status: %v", alloc.ClientStatus)
|
||||
}
|
||||
|
||||
@@ -346,19 +345,19 @@ func TestClientAllocations_GarbageCollect_Local(t *testing.T) {
|
||||
})
|
||||
|
||||
// Make the request without having an alloc id
|
||||
req := &structs.AllocSpecificRequest{
|
||||
QueryOptions: structs.QueryOptions{Region: "global"},
|
||||
req := &nstructs.AllocSpecificRequest{
|
||||
QueryOptions: nstructs.QueryOptions{Region: "global"},
|
||||
}
|
||||
|
||||
// Fetch the response
|
||||
var resp structs.GenericResponse
|
||||
var resp nstructs.GenericResponse
|
||||
err := msgpackrpc.CallWithCodec(codec, "ClientAllocations.GarbageCollect", req, &resp)
|
||||
require.NotNil(err)
|
||||
require.Contains(err.Error(), "missing")
|
||||
|
||||
// Fetch the response setting the node id
|
||||
req.AllocID = a.ID
|
||||
var resp2 structs.GenericResponse
|
||||
var resp2 nstructs.GenericResponse
|
||||
err = msgpackrpc.CallWithCodec(codec, "ClientAllocations.GarbageCollect", req, &resp2)
|
||||
require.Nil(err)
|
||||
}
|
||||
@@ -376,14 +375,14 @@ func TestClientAllocations_GarbageCollect_Local_ACL(t *testing.T) {
|
||||
policyBad := mock.NamespacePolicy("other", "", []string{acl.NamespaceCapabilityReadFS})
|
||||
tokenBad := mock.CreatePolicyAndToken(t, s.State(), 1005, "invalid", policyBad)
|
||||
|
||||
policyGood := mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilitySubmitJob})
|
||||
policyGood := mock.NamespacePolicy(nstructs.DefaultNamespace, "", []string{acl.NamespaceCapabilitySubmitJob})
|
||||
tokenGood := mock.CreatePolicyAndToken(t, s.State(), 1009, "valid2", policyGood)
|
||||
|
||||
// Upsert the allocation
|
||||
state := s.State()
|
||||
alloc := mock.Alloc()
|
||||
require.NoError(t, state.UpsertJob(nstructs.MsgTypeTestSetup, 1010, alloc.Job))
|
||||
require.NoError(t, state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1011, []*structs.Allocation{alloc}))
|
||||
require.NoError(t, state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1011, []*nstructs.Allocation{alloc}))
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
@@ -393,17 +392,17 @@ func TestClientAllocations_GarbageCollect_Local_ACL(t *testing.T) {
|
||||
{
|
||||
Name: "bad token",
|
||||
Token: tokenBad.SecretID,
|
||||
ExpectedError: structs.ErrPermissionDenied.Error(),
|
||||
ExpectedError: nstructs.ErrPermissionDenied.Error(),
|
||||
},
|
||||
{
|
||||
Name: "good token",
|
||||
Token: tokenGood.SecretID,
|
||||
ExpectedError: structs.ErrUnknownNodePrefix,
|
||||
ExpectedError: nstructs.ErrUnknownNodePrefix,
|
||||
},
|
||||
{
|
||||
Name: "root token",
|
||||
Token: root.SecretID,
|
||||
ExpectedError: structs.ErrUnknownNodePrefix,
|
||||
ExpectedError: nstructs.ErrUnknownNodePrefix,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -411,17 +410,17 @@ func TestClientAllocations_GarbageCollect_Local_ACL(t *testing.T) {
|
||||
t.Run(c.Name, func(t *testing.T) {
|
||||
|
||||
// Make the request without having a node-id
|
||||
req := &structs.AllocSpecificRequest{
|
||||
req := &nstructs.AllocSpecificRequest{
|
||||
AllocID: alloc.ID,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
QueryOptions: nstructs.QueryOptions{
|
||||
AuthToken: c.Token,
|
||||
Region: "global",
|
||||
Namespace: structs.DefaultNamespace,
|
||||
Namespace: nstructs.DefaultNamespace,
|
||||
},
|
||||
}
|
||||
|
||||
// Fetch the response
|
||||
var resp structs.GenericResponse
|
||||
var resp nstructs.GenericResponse
|
||||
err := msgpackrpc.CallWithCodec(codec, "ClientAllocations.GarbageCollect", req, &resp)
|
||||
require.NotNil(t, err)
|
||||
require.Contains(t, err.Error(), c.ExpectedError)
|
||||
@@ -455,17 +454,17 @@ func TestClientAllocations_GarbageCollect_Remote(t *testing.T) {
|
||||
|
||||
// Force an allocation onto the node
|
||||
a := mock.Alloc()
|
||||
a.Job.Type = structs.JobTypeBatch
|
||||
a.Job.Type = nstructs.JobTypeBatch
|
||||
a.NodeID = c.NodeID()
|
||||
a.Job.TaskGroups[0].Count = 1
|
||||
a.Job.TaskGroups[0].Tasks[0] = &structs.Task{
|
||||
a.Job.TaskGroups[0].Tasks[0] = &nstructs.Task{
|
||||
Name: "web",
|
||||
Driver: "mock_driver",
|
||||
Config: map[string]interface{}{
|
||||
"run_for": "2s",
|
||||
},
|
||||
LogConfig: structs.DefaultLogConfig(),
|
||||
Resources: &structs.Resources{
|
||||
LogConfig: nstructs.DefaultLogConfig(),
|
||||
Resources: &nstructs.Resources{
|
||||
CPU: 500,
|
||||
MemoryMB: 256,
|
||||
},
|
||||
@@ -475,15 +474,15 @@ func TestClientAllocations_GarbageCollect_Remote(t *testing.T) {
|
||||
if len(nodes) != 1 {
|
||||
return false, fmt.Errorf("should have 1 client. found %d", len(nodes))
|
||||
}
|
||||
req := &structs.NodeSpecificRequest{
|
||||
req := &nstructs.NodeSpecificRequest{
|
||||
NodeID: c.NodeID(),
|
||||
QueryOptions: structs.QueryOptions{Region: "global"},
|
||||
QueryOptions: nstructs.QueryOptions{Region: "global"},
|
||||
}
|
||||
resp := structs.SingleNodeResponse{}
|
||||
resp := nstructs.SingleNodeResponse{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return resp.Node != nil && resp.Node.Status == structs.NodeStatusReady, fmt.Errorf(
|
||||
return resp.Node != nil && resp.Node.Status == nstructs.NodeStatusReady, fmt.Errorf(
|
||||
"expected ready but found %s", pretty.Sprint(resp.Node))
|
||||
}, func(err error) {
|
||||
t.Fatalf("should have a clients")
|
||||
@@ -493,9 +492,9 @@ func TestClientAllocations_GarbageCollect_Remote(t *testing.T) {
|
||||
state1 := s1.State()
|
||||
state2 := s2.State()
|
||||
require.Nil(state1.UpsertJob(nstructs.MsgTypeTestSetup, 999, a.Job))
|
||||
require.Nil(state1.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*structs.Allocation{a}))
|
||||
require.Nil(state1.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a}))
|
||||
require.Nil(state2.UpsertJob(nstructs.MsgTypeTestSetup, 999, a.Job))
|
||||
require.Nil(state2.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*structs.Allocation{a}))
|
||||
require.Nil(state2.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a}))
|
||||
|
||||
// Wait for the client to run the allocation
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
@@ -506,7 +505,7 @@ func TestClientAllocations_GarbageCollect_Remote(t *testing.T) {
|
||||
if alloc == nil {
|
||||
return false, fmt.Errorf("unknown alloc")
|
||||
}
|
||||
if alloc.ClientStatus != structs.AllocClientStatusComplete {
|
||||
if alloc.ClientStatus != nstructs.AllocClientStatusComplete {
|
||||
return false, fmt.Errorf("alloc client status: %v", alloc.ClientStatus)
|
||||
}
|
||||
|
||||
@@ -521,9 +520,9 @@ func TestClientAllocations_GarbageCollect_Remote(t *testing.T) {
|
||||
s1.nodeConnsLock.Unlock()
|
||||
|
||||
// Make the request
|
||||
req := &structs.AllocSpecificRequest{
|
||||
req := &nstructs.AllocSpecificRequest{
|
||||
AllocID: a.ID,
|
||||
QueryOptions: structs.QueryOptions{Region: "global"},
|
||||
QueryOptions: nstructs.QueryOptions{Region: "global"},
|
||||
}
|
||||
|
||||
// Fetch the response
|
||||
@@ -550,25 +549,25 @@ func TestClientAllocations_Stats_OldNode(t *testing.T) {
|
||||
|
||||
alloc := mock.Alloc()
|
||||
alloc.NodeID = node.ID
|
||||
require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1006, []*structs.Allocation{alloc}))
|
||||
require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1006, []*nstructs.Allocation{alloc}))
|
||||
|
||||
req := &structs.AllocSpecificRequest{
|
||||
req := &nstructs.AllocSpecificRequest{
|
||||
AllocID: alloc.ID,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
QueryOptions: nstructs.QueryOptions{
|
||||
Region: "global",
|
||||
},
|
||||
}
|
||||
|
||||
var resp structs.GenericResponse
|
||||
var resp nstructs.GenericResponse
|
||||
err := msgpackrpc.CallWithCodec(codec, "ClientAllocations.Stats", req, &resp)
|
||||
require.True(structs.IsErrNodeLacksRpc(err), err.Error())
|
||||
require.True(nstructs.IsErrNodeLacksRpc(err), err.Error())
|
||||
|
||||
// Test for a missing version error
|
||||
delete(node.Attributes, "nomad.version")
|
||||
require.Nil(state.UpsertNode(nstructs.MsgTypeTestSetup, 1007, node))
|
||||
|
||||
err = msgpackrpc.CallWithCodec(codec, "ClientAllocations.Stats", req, &resp)
|
||||
require.True(structs.IsErrUnknownNomadVersion(err), err.Error())
|
||||
require.True(nstructs.IsErrUnknownNomadVersion(err), err.Error())
|
||||
}
|
||||
|
||||
func TestClientAllocations_Stats_Local(t *testing.T) {
|
||||
@@ -588,17 +587,17 @@ func TestClientAllocations_Stats_Local(t *testing.T) {
|
||||
|
||||
// Force an allocation onto the node
|
||||
a := mock.Alloc()
|
||||
a.Job.Type = structs.JobTypeBatch
|
||||
a.Job.Type = nstructs.JobTypeBatch
|
||||
a.NodeID = c.NodeID()
|
||||
a.Job.TaskGroups[0].Count = 1
|
||||
a.Job.TaskGroups[0].Tasks[0] = &structs.Task{
|
||||
a.Job.TaskGroups[0].Tasks[0] = &nstructs.Task{
|
||||
Name: "web",
|
||||
Driver: "mock_driver",
|
||||
Config: map[string]interface{}{
|
||||
"run_for": "2s",
|
||||
},
|
||||
LogConfig: structs.DefaultLogConfig(),
|
||||
Resources: &structs.Resources{
|
||||
LogConfig: nstructs.DefaultLogConfig(),
|
||||
Resources: &nstructs.Resources{
|
||||
CPU: 500,
|
||||
MemoryMB: 256,
|
||||
},
|
||||
@@ -614,7 +613,7 @@ func TestClientAllocations_Stats_Local(t *testing.T) {
|
||||
// Upsert the allocation
|
||||
state := s.State()
|
||||
require.Nil(state.UpsertJob(nstructs.MsgTypeTestSetup, 999, a.Job))
|
||||
require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*structs.Allocation{a}))
|
||||
require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a}))
|
||||
|
||||
// Wait for the client to run the allocation
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
@@ -625,7 +624,7 @@ func TestClientAllocations_Stats_Local(t *testing.T) {
|
||||
if alloc == nil {
|
||||
return false, fmt.Errorf("unknown alloc")
|
||||
}
|
||||
if alloc.ClientStatus != structs.AllocClientStatusComplete {
|
||||
if alloc.ClientStatus != nstructs.AllocClientStatusComplete {
|
||||
return false, fmt.Errorf("alloc client status: %v", alloc.ClientStatus)
|
||||
}
|
||||
|
||||
@@ -635,15 +634,15 @@ func TestClientAllocations_Stats_Local(t *testing.T) {
|
||||
})
|
||||
|
||||
// Make the request without having an alloc id
|
||||
req := &structs.AllocSpecificRequest{
|
||||
QueryOptions: structs.QueryOptions{Region: "global"},
|
||||
req := &nstructs.AllocSpecificRequest{
|
||||
QueryOptions: nstructs.QueryOptions{Region: "global"},
|
||||
}
|
||||
|
||||
// Fetch the response
|
||||
var resp cstructs.AllocStatsResponse
|
||||
err := msgpackrpc.CallWithCodec(codec, "ClientAllocations.Stats", req, &resp)
|
||||
require.NotNil(err)
|
||||
require.EqualError(err, structs.ErrMissingAllocID.Error(), "(%T) %v")
|
||||
require.EqualError(err, nstructs.ErrMissingAllocID.Error(), "(%T) %v")
|
||||
|
||||
// Fetch the response setting the node id
|
||||
req.AllocID = a.ID
|
||||
@@ -666,14 +665,14 @@ func TestClientAllocations_Stats_Local_ACL(t *testing.T) {
|
||||
policyBad := mock.NamespacePolicy("other", "", []string{acl.NamespaceCapabilityReadFS})
|
||||
tokenBad := mock.CreatePolicyAndToken(t, s.State(), 1005, "invalid", policyBad)
|
||||
|
||||
policyGood := mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob})
|
||||
policyGood := mock.NamespacePolicy(nstructs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob})
|
||||
tokenGood := mock.CreatePolicyAndToken(t, s.State(), 1009, "valid2", policyGood)
|
||||
|
||||
// Upsert the allocation
|
||||
state := s.State()
|
||||
alloc := mock.Alloc()
|
||||
require.NoError(t, state.UpsertJob(nstructs.MsgTypeTestSetup, 1010, alloc.Job))
|
||||
require.NoError(t, state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1011, []*structs.Allocation{alloc}))
|
||||
require.NoError(t, state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1011, []*nstructs.Allocation{alloc}))
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
@@ -683,17 +682,17 @@ func TestClientAllocations_Stats_Local_ACL(t *testing.T) {
|
||||
{
|
||||
Name: "bad token",
|
||||
Token: tokenBad.SecretID,
|
||||
ExpectedError: structs.ErrPermissionDenied.Error(),
|
||||
ExpectedError: nstructs.ErrPermissionDenied.Error(),
|
||||
},
|
||||
{
|
||||
Name: "good token",
|
||||
Token: tokenGood.SecretID,
|
||||
ExpectedError: structs.ErrUnknownNodePrefix,
|
||||
ExpectedError: nstructs.ErrUnknownNodePrefix,
|
||||
},
|
||||
{
|
||||
Name: "root token",
|
||||
Token: root.SecretID,
|
||||
ExpectedError: structs.ErrUnknownNodePrefix,
|
||||
ExpectedError: nstructs.ErrUnknownNodePrefix,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -701,12 +700,12 @@ func TestClientAllocations_Stats_Local_ACL(t *testing.T) {
|
||||
t.Run(c.Name, func(t *testing.T) {
|
||||
|
||||
// Make the request without having a node-id
|
||||
req := &structs.AllocSpecificRequest{
|
||||
req := &nstructs.AllocSpecificRequest{
|
||||
AllocID: alloc.ID,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
QueryOptions: nstructs.QueryOptions{
|
||||
AuthToken: c.Token,
|
||||
Region: "global",
|
||||
Namespace: structs.DefaultNamespace,
|
||||
Namespace: nstructs.DefaultNamespace,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -744,17 +743,17 @@ func TestClientAllocations_Stats_Remote(t *testing.T) {
|
||||
|
||||
// Force an allocation onto the node
|
||||
a := mock.Alloc()
|
||||
a.Job.Type = structs.JobTypeBatch
|
||||
a.Job.Type = nstructs.JobTypeBatch
|
||||
a.NodeID = c.NodeID()
|
||||
a.Job.TaskGroups[0].Count = 1
|
||||
a.Job.TaskGroups[0].Tasks[0] = &structs.Task{
|
||||
a.Job.TaskGroups[0].Tasks[0] = &nstructs.Task{
|
||||
Name: "web",
|
||||
Driver: "mock_driver",
|
||||
Config: map[string]interface{}{
|
||||
"run_for": "2s",
|
||||
},
|
||||
LogConfig: structs.DefaultLogConfig(),
|
||||
Resources: &structs.Resources{
|
||||
LogConfig: nstructs.DefaultLogConfig(),
|
||||
Resources: &nstructs.Resources{
|
||||
CPU: 500,
|
||||
MemoryMB: 256,
|
||||
},
|
||||
@@ -770,9 +769,9 @@ func TestClientAllocations_Stats_Remote(t *testing.T) {
|
||||
state1 := s1.State()
|
||||
state2 := s2.State()
|
||||
require.Nil(state1.UpsertJob(nstructs.MsgTypeTestSetup, 999, a.Job))
|
||||
require.Nil(state1.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*structs.Allocation{a}))
|
||||
require.Nil(state1.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a}))
|
||||
require.Nil(state2.UpsertJob(nstructs.MsgTypeTestSetup, 999, a.Job))
|
||||
require.Nil(state2.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*structs.Allocation{a}))
|
||||
require.Nil(state2.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a}))
|
||||
|
||||
// Wait for the client to run the allocation
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
@@ -783,7 +782,7 @@ func TestClientAllocations_Stats_Remote(t *testing.T) {
|
||||
if alloc == nil {
|
||||
return false, fmt.Errorf("unknown alloc")
|
||||
}
|
||||
if alloc.ClientStatus != structs.AllocClientStatusComplete {
|
||||
if alloc.ClientStatus != nstructs.AllocClientStatusComplete {
|
||||
return false, fmt.Errorf("alloc client status: %v", alloc.ClientStatus)
|
||||
}
|
||||
|
||||
@@ -798,9 +797,9 @@ func TestClientAllocations_Stats_Remote(t *testing.T) {
|
||||
s1.nodeConnsLock.Unlock()
|
||||
|
||||
// Make the request
|
||||
req := &structs.AllocSpecificRequest{
|
||||
req := &nstructs.AllocSpecificRequest{
|
||||
AllocID: a.ID,
|
||||
QueryOptions: structs.QueryOptions{Region: "global"},
|
||||
QueryOptions: nstructs.QueryOptions{Region: "global"},
|
||||
}
|
||||
|
||||
// Fetch the response
|
||||
@@ -828,17 +827,17 @@ func TestClientAllocations_Restart_Local(t *testing.T) {
|
||||
|
||||
// Force an allocation onto the node
|
||||
a := mock.Alloc()
|
||||
a.Job.Type = structs.JobTypeService
|
||||
a.Job.Type = nstructs.JobTypeService
|
||||
a.NodeID = c.NodeID()
|
||||
a.Job.TaskGroups[0].Count = 1
|
||||
a.Job.TaskGroups[0].Tasks[0] = &structs.Task{
|
||||
a.Job.TaskGroups[0].Tasks[0] = &nstructs.Task{
|
||||
Name: "web",
|
||||
Driver: "mock_driver",
|
||||
Config: map[string]interface{}{
|
||||
"run_for": "10s",
|
||||
},
|
||||
LogConfig: structs.DefaultLogConfig(),
|
||||
Resources: &structs.Resources{
|
||||
LogConfig: nstructs.DefaultLogConfig(),
|
||||
Resources: &nstructs.Resources{
|
||||
CPU: 500,
|
||||
MemoryMB: 256,
|
||||
},
|
||||
@@ -854,7 +853,7 @@ func TestClientAllocations_Restart_Local(t *testing.T) {
|
||||
// Upsert the allocation
|
||||
state := s.State()
|
||||
require.Nil(state.UpsertJob(nstructs.MsgTypeTestSetup, 999, a.Job))
|
||||
require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*structs.Allocation{a}))
|
||||
require.Nil(state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a}))
|
||||
|
||||
// Wait for the client to run the allocation
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
@@ -865,7 +864,7 @@ func TestClientAllocations_Restart_Local(t *testing.T) {
|
||||
if alloc == nil {
|
||||
return false, fmt.Errorf("unknown alloc")
|
||||
}
|
||||
if alloc.ClientStatus != structs.AllocClientStatusRunning {
|
||||
if alloc.ClientStatus != nstructs.AllocClientStatusRunning {
|
||||
return false, fmt.Errorf("alloc client status: %v", alloc.ClientStatus)
|
||||
}
|
||||
|
||||
@@ -875,20 +874,20 @@ func TestClientAllocations_Restart_Local(t *testing.T) {
|
||||
})
|
||||
|
||||
// Make the request without having an alloc id
|
||||
req := &structs.AllocRestartRequest{
|
||||
QueryOptions: structs.QueryOptions{Region: "global"},
|
||||
req := &nstructs.AllocRestartRequest{
|
||||
QueryOptions: nstructs.QueryOptions{Region: "global"},
|
||||
}
|
||||
|
||||
// Fetch the response
|
||||
var resp structs.GenericResponse
|
||||
var resp nstructs.GenericResponse
|
||||
err := msgpackrpc.CallWithCodec(codec, "ClientAllocations.Restart", req, &resp)
|
||||
require.NotNil(err)
|
||||
require.EqualError(err, structs.ErrMissingAllocID.Error(), "(%T) %v")
|
||||
require.EqualError(err, nstructs.ErrMissingAllocID.Error(), "(%T) %v")
|
||||
|
||||
// Fetch the response setting the alloc id - This should not error because the
|
||||
// alloc is running.
|
||||
req.AllocID = a.ID
|
||||
var resp2 structs.GenericResponse
|
||||
var resp2 nstructs.GenericResponse
|
||||
err = msgpackrpc.CallWithCodec(codec, "ClientAllocations.Restart", req, &resp2)
|
||||
require.Nil(err)
|
||||
|
||||
@@ -941,17 +940,17 @@ func TestClientAllocations_Restart_Remote(t *testing.T) {
|
||||
|
||||
// Force an allocation onto the node
|
||||
a := mock.Alloc()
|
||||
a.Job.Type = structs.JobTypeService
|
||||
a.Job.Type = nstructs.JobTypeService
|
||||
a.NodeID = c.NodeID()
|
||||
a.Job.TaskGroups[0].Count = 1
|
||||
a.Job.TaskGroups[0].Tasks[0] = &structs.Task{
|
||||
a.Job.TaskGroups[0].Tasks[0] = &nstructs.Task{
|
||||
Name: "web",
|
||||
Driver: "mock_driver",
|
||||
Config: map[string]interface{}{
|
||||
"run_for": "10s",
|
||||
},
|
||||
LogConfig: structs.DefaultLogConfig(),
|
||||
Resources: &structs.Resources{
|
||||
LogConfig: nstructs.DefaultLogConfig(),
|
||||
Resources: &nstructs.Resources{
|
||||
CPU: 500,
|
||||
MemoryMB: 256,
|
||||
},
|
||||
@@ -968,9 +967,9 @@ func TestClientAllocations_Restart_Remote(t *testing.T) {
|
||||
state1 := s1.State()
|
||||
state2 := s2.State()
|
||||
require.Nil(state1.UpsertJob(nstructs.MsgTypeTestSetup, 999, a.Job))
|
||||
require.Nil(state1.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*structs.Allocation{a}))
|
||||
require.Nil(state1.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a}))
|
||||
require.Nil(state2.UpsertJob(nstructs.MsgTypeTestSetup, 999, a.Job))
|
||||
require.Nil(state2.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*structs.Allocation{a}))
|
||||
require.Nil(state2.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a}))
|
||||
|
||||
// Wait for the client to run the allocation
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
@@ -981,7 +980,7 @@ func TestClientAllocations_Restart_Remote(t *testing.T) {
|
||||
if alloc == nil {
|
||||
return false, fmt.Errorf("unknown alloc")
|
||||
}
|
||||
if alloc.ClientStatus != structs.AllocClientStatusRunning {
|
||||
if alloc.ClientStatus != nstructs.AllocClientStatusRunning {
|
||||
return false, fmt.Errorf("alloc client status: %v", alloc.ClientStatus)
|
||||
}
|
||||
|
||||
@@ -991,20 +990,20 @@ func TestClientAllocations_Restart_Remote(t *testing.T) {
|
||||
})
|
||||
|
||||
// Make the request without having an alloc id
|
||||
req := &structs.AllocRestartRequest{
|
||||
QueryOptions: structs.QueryOptions{Region: "global"},
|
||||
req := &nstructs.AllocRestartRequest{
|
||||
QueryOptions: nstructs.QueryOptions{Region: "global"},
|
||||
}
|
||||
|
||||
// Fetch the response
|
||||
var resp structs.GenericResponse
|
||||
var resp nstructs.GenericResponse
|
||||
err := msgpackrpc.CallWithCodec(codec, "ClientAllocations.Restart", req, &resp)
|
||||
require.NotNil(err)
|
||||
require.EqualError(err, structs.ErrMissingAllocID.Error(), "(%T) %v")
|
||||
require.EqualError(err, nstructs.ErrMissingAllocID.Error(), "(%T) %v")
|
||||
|
||||
// Fetch the response setting the alloc id - This should succeed because the
|
||||
// alloc is running
|
||||
req.AllocID = a.ID
|
||||
var resp2 structs.GenericResponse
|
||||
var resp2 nstructs.GenericResponse
|
||||
err = msgpackrpc.CallWithCodec(codec, "ClientAllocations.Restart", req, &resp2)
|
||||
require.NoError(err)
|
||||
}
|
||||
@@ -1020,14 +1019,14 @@ func TestClientAllocations_Restart_ACL(t *testing.T) {
|
||||
policyBad := mock.NamespacePolicy("other", "", []string{acl.NamespaceCapabilityReadFS})
|
||||
tokenBad := mock.CreatePolicyAndToken(t, s.State(), 1005, "invalid", policyBad)
|
||||
|
||||
policyGood := mock.NamespacePolicy(structs.DefaultNamespace, acl.PolicyWrite, nil)
|
||||
policyGood := mock.NamespacePolicy(nstructs.DefaultNamespace, acl.PolicyWrite, nil)
|
||||
tokenGood := mock.CreatePolicyAndToken(t, s.State(), 1009, "valid2", policyGood)
|
||||
|
||||
// Upsert the allocation
|
||||
state := s.State()
|
||||
alloc := mock.Alloc()
|
||||
require.NoError(t, state.UpsertJob(nstructs.MsgTypeTestSetup, 1010, alloc.Job))
|
||||
require.NoError(t, state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1011, []*structs.Allocation{alloc}))
|
||||
require.NoError(t, state.UpsertAllocs(nstructs.MsgTypeTestSetup, 1011, []*nstructs.Allocation{alloc}))
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
@@ -1037,7 +1036,7 @@ func TestClientAllocations_Restart_ACL(t *testing.T) {
|
||||
{
|
||||
Name: "bad token",
|
||||
Token: tokenBad.SecretID,
|
||||
ExpectedError: structs.ErrPermissionDenied.Error(),
|
||||
ExpectedError: nstructs.ErrPermissionDenied.Error(),
|
||||
},
|
||||
{
|
||||
Name: "good token",
|
||||
@@ -1055,17 +1054,17 @@ func TestClientAllocations_Restart_ACL(t *testing.T) {
|
||||
t.Run(c.Name, func(t *testing.T) {
|
||||
|
||||
// Make the request without having a node-id
|
||||
req := &structs.AllocRestartRequest{
|
||||
req := &nstructs.AllocRestartRequest{
|
||||
AllocID: alloc.ID,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Namespace: structs.DefaultNamespace,
|
||||
QueryOptions: nstructs.QueryOptions{
|
||||
Namespace: nstructs.DefaultNamespace,
|
||||
AuthToken: c.Token,
|
||||
Region: "global",
|
||||
},
|
||||
}
|
||||
|
||||
// Fetch the response
|
||||
var resp structs.GenericResponse
|
||||
var resp nstructs.GenericResponse
|
||||
err := msgpackrpc.CallWithCodec(codec, "ClientAllocations.Restart", req, &resp)
|
||||
require.NotNil(t, err)
|
||||
require.Contains(t, err.Error(), c.ExpectedError)
|
||||
@@ -1121,7 +1120,7 @@ func TestAlloc_ExecStreaming(t *testing.T) {
|
||||
///// Start task
|
||||
a := mock.BatchAlloc()
|
||||
a.NodeID = c.NodeID()
|
||||
a.Job.Type = structs.JobTypeBatch
|
||||
a.Job.Type = nstructs.JobTypeBatch
|
||||
a.Job.TaskGroups[0].Count = 1
|
||||
a.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
|
||||
"run_for": "20s",
|
||||
@@ -1135,10 +1134,10 @@ func TestAlloc_ExecStreaming(t *testing.T) {
|
||||
// Upsert the allocation
|
||||
localState := localServer.State()
|
||||
require.Nil(t, localState.UpsertJob(nstructs.MsgTypeTestSetup, 999, a.Job))
|
||||
require.Nil(t, localState.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*structs.Allocation{a}))
|
||||
require.Nil(t, localState.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a}))
|
||||
remoteState := remoteServer.State()
|
||||
require.Nil(t, remoteState.UpsertJob(nstructs.MsgTypeTestSetup, 999, a.Job))
|
||||
require.Nil(t, remoteState.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*structs.Allocation{a}))
|
||||
require.Nil(t, remoteState.UpsertAllocs(nstructs.MsgTypeTestSetup, 1003, []*nstructs.Allocation{a}))
|
||||
|
||||
// Wait for the client to run the allocation
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
@@ -1149,7 +1148,7 @@ func TestAlloc_ExecStreaming(t *testing.T) {
|
||||
if alloc == nil {
|
||||
return false, fmt.Errorf("unknown alloc")
|
||||
}
|
||||
if alloc.ClientStatus != structs.AllocClientStatusRunning {
|
||||
if alloc.ClientStatus != nstructs.AllocClientStatusRunning {
|
||||
return false, fmt.Errorf("alloc client status: %v", alloc.ClientStatus)
|
||||
}
|
||||
|
||||
@@ -1161,7 +1160,7 @@ func TestAlloc_ExecStreaming(t *testing.T) {
|
||||
///////// Actually run query now
|
||||
cases := []struct {
|
||||
name string
|
||||
rpc func(string) (structs.StreamingRpcHandler, error)
|
||||
rpc func(string) (nstructs.StreamingRpcHandler, error)
|
||||
}{
|
||||
{"client", c.StreamingRpcHandler},
|
||||
{"local_server", localServer.StreamingRpcHandler},
|
||||
|
||||
Reference in New Issue
Block a user