diff --git a/GNUmakefile b/GNUmakefile index 07852dcae..c506dfec9 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -145,7 +145,7 @@ lint-deps: ## Install linter dependencies @echo "==> Updating linter dependencies..." go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.48.0 go install github.com/client9/misspell/cmd/misspell@v0.3.4 - go install github.com/hashicorp/go-hclog/hclogvet@v0.1.4 + go install github.com/hashicorp/go-hclog/hclogvet@v0.1.5 .PHONY: git-hooks git-dir = $(shell git rev-parse --git-dir) diff --git a/api/allocations_test.go b/api/allocations_test.go index 4f2993e82..3af6c54b2 100644 --- a/api/allocations_test.go +++ b/api/allocations_test.go @@ -159,13 +159,13 @@ func TestAllocations_RescheduleInfo(t *testing.T) { testutil.Parallel(t) // Create a job, task group and alloc job := &Job{ - Name: stringToPtr("foo"), - Namespace: stringToPtr(DefaultNamespace), - ID: stringToPtr("bar"), - ParentID: stringToPtr("lol"), + Name: pointerOf("foo"), + Namespace: pointerOf(DefaultNamespace), + ID: pointerOf("bar"), + ParentID: pointerOf("lol"), TaskGroups: []*TaskGroup{ { - Name: stringToPtr("bar"), + Name: pointerOf("bar"), Tasks: []*Task{ { Name: "task1", @@ -205,8 +205,8 @@ func TestAllocations_RescheduleInfo(t *testing.T) { { desc: "no reschedule events", reschedulePolicy: &ReschedulePolicy{ - Attempts: intToPtr(3), - Interval: timeToPtr(15 * time.Minute), + Attempts: pointerOf(3), + Interval: pointerOf(15 * time.Minute), }, expAttempted: 0, expTotal: 3, @@ -214,8 +214,8 @@ func TestAllocations_RescheduleInfo(t *testing.T) { { desc: "all reschedule events within interval", reschedulePolicy: &ReschedulePolicy{ - Attempts: intToPtr(3), - Interval: timeToPtr(15 * time.Minute), + Attempts: pointerOf(3), + Interval: pointerOf(15 * time.Minute), }, time: time.Now(), rescheduleTracker: &RescheduleTracker{ @@ -231,8 +231,8 @@ func TestAllocations_RescheduleInfo(t *testing.T) { { desc: "some reschedule events outside interval", reschedulePolicy: &ReschedulePolicy{ - Attempts: intToPtr(3), - Interval: timeToPtr(15 * time.Minute), + Attempts: pointerOf(3), + Interval: pointerOf(15 * time.Minute), }, time: time.Now(), rescheduleTracker: &RescheduleTracker{ @@ -276,13 +276,13 @@ func TestAllocations_ExecErrors(t *testing.T) { a := c.Allocations() job := &Job{ - Name: stringToPtr("foo"), - Namespace: stringToPtr(DefaultNamespace), - ID: stringToPtr("bar"), - ParentID: stringToPtr("lol"), + Name: pointerOf("foo"), + Namespace: pointerOf(DefaultNamespace), + ID: pointerOf("bar"), + ParentID: pointerOf("lol"), TaskGroups: []*TaskGroup{ { - Name: stringToPtr("bar"), + Name: pointerOf("bar"), Tasks: []*Task{ { Name: "task1", @@ -392,9 +392,9 @@ func TestAllocation_ClientTerminalStatus(t *testing.T) { func TestAllocations_ShouldMigrate(t *testing.T) { testutil.Parallel(t) - require.True(t, DesiredTransition{Migrate: boolToPtr(true)}.ShouldMigrate()) + require.True(t, DesiredTransition{Migrate: pointerOf(true)}.ShouldMigrate()) require.False(t, DesiredTransition{}.ShouldMigrate()) - require.False(t, DesiredTransition{Migrate: boolToPtr(false)}.ShouldMigrate()) + require.False(t, DesiredTransition{Migrate: pointerOf(false)}.ShouldMigrate()) } func TestAllocations_Services(t *testing.T) { diff --git a/api/compose_test.go b/api/compose_test.go index c70b244cd..2fe750a6d 100644 --- a/api/compose_test.go +++ b/api/compose_test.go @@ -15,13 +15,13 @@ func TestCompose(t *testing.T) { SetMeta("foo", "bar"). Constrain(NewConstraint("kernel.name", "=", "linux")). Require(&Resources{ - CPU: intToPtr(1250), - MemoryMB: intToPtr(1024), - DiskMB: intToPtr(2048), + CPU: pointerOf(1250), + MemoryMB: pointerOf(1024), + DiskMB: pointerOf(2048), Networks: []*NetworkResource{ { CIDR: "0.0.0.0/0", - MBits: intToPtr(100), + MBits: pointerOf(100), ReservedPorts: []Port{{"", 80, 0, ""}, {"", 443, 0, ""}}, }, }, @@ -47,11 +47,11 @@ func TestCompose(t *testing.T) { // Check that the composed result looks correct expect := &Job{ - Region: stringToPtr("global"), - ID: stringToPtr("job1"), - Name: stringToPtr("myjob"), - Type: stringToPtr(JobTypeService), - Priority: intToPtr(2), + Region: pointerOf("global"), + ID: pointerOf("job1"), + Name: pointerOf("myjob"), + Type: pointerOf(JobTypeService), + Priority: pointerOf(2), Datacenters: []string{ "dc1", }, @@ -67,8 +67,8 @@ func TestCompose(t *testing.T) { }, TaskGroups: []*TaskGroup{ { - Name: stringToPtr("grp1"), - Count: intToPtr(2), + Name: pointerOf("grp1"), + Count: pointerOf(2), Constraints: []*Constraint{ { LTarget: "kernel.name", @@ -81,13 +81,13 @@ func TestCompose(t *testing.T) { LTarget: "${node.class}", RTarget: "large", Operand: "=", - Weight: int8ToPtr(50), + Weight: pointerOf(int8(50)), }, }, Spreads: []*Spread{ { Attribute: "${node.datacenter}", - Weight: int8ToPtr(30), + Weight: pointerOf(int8(30)), SpreadTarget: []*SpreadTarget{ { Value: "dc1", @@ -105,13 +105,13 @@ func TestCompose(t *testing.T) { Name: "task1", Driver: "exec", Resources: &Resources{ - CPU: intToPtr(1250), - MemoryMB: intToPtr(1024), - DiskMB: intToPtr(2048), + CPU: pointerOf(1250), + MemoryMB: pointerOf(1024), + DiskMB: pointerOf(2048), Networks: []*NetworkResource{ { CIDR: "0.0.0.0/0", - MBits: intToPtr(100), + MBits: pointerOf(100), ReservedPorts: []Port{ {"", 80, 0, ""}, {"", 443, 0, ""}, diff --git a/api/consul.go b/api/consul.go index db25a1664..9a76bfb32 100644 --- a/api/consul.go +++ b/api/consul.go @@ -1,6 +1,8 @@ package api -import "time" +import ( + "time" +) // Consul represents configuration related to consul. type Consul struct { @@ -121,11 +123,11 @@ func (st *SidecarTask) Canonicalize() { } if st.KillTimeout == nil { - st.KillTimeout = timeToPtr(5 * time.Second) + st.KillTimeout = pointerOf(5 * time.Second) } if st.ShutdownDelay == nil { - st.ShutdownDelay = timeToPtr(0) + st.ShutdownDelay = pointerOf(time.Duration(0)) } } @@ -313,7 +315,7 @@ func (p *ConsulGatewayProxy) Canonicalize() { if p.ConnectTimeout == nil { // same as the default from consul - p.ConnectTimeout = timeToPtr(defaultGatewayConnectTimeout) + p.ConnectTimeout = pointerOf(defaultGatewayConnectTimeout) } if len(p.EnvoyGatewayBindAddresses) == 0 { @@ -347,7 +349,7 @@ func (p *ConsulGatewayProxy) Copy() *ConsulGatewayProxy { } return &ConsulGatewayProxy{ - ConnectTimeout: timeToPtr(*p.ConnectTimeout), + ConnectTimeout: pointerOf(*p.ConnectTimeout), EnvoyGatewayBindTaggedAddresses: p.EnvoyGatewayBindTaggedAddresses, EnvoyGatewayBindAddresses: binds, EnvoyGatewayNoDefaultBind: p.EnvoyGatewayNoDefaultBind, diff --git a/api/consul_test.go b/api/consul_test.go index 2556ab662..ce2757295 100644 --- a/api/consul_test.go +++ b/api/consul_test.go @@ -39,7 +39,7 @@ func TestConsul_MergeNamespace(t *testing.T) { testutil.Parallel(t) t.Run("already set", func(t *testing.T) { a := &Consul{Namespace: "foo"} - ns := stringToPtr("bar") + ns := pointerOf("bar") a.MergeNamespace(ns) require.Equal(t, "foo", a.Namespace) require.Equal(t, "bar", *ns) @@ -47,7 +47,7 @@ func TestConsul_MergeNamespace(t *testing.T) { t.Run("inherit", func(t *testing.T) { a := &Consul{Namespace: ""} - ns := stringToPtr("bar") + ns := pointerOf("bar") a.MergeNamespace(ns) require.Equal(t, "bar", a.Namespace) require.Equal(t, "bar", *ns) @@ -228,9 +228,9 @@ func TestSidecarTask_Canonicalize(t *testing.T) { t.Run("non empty sidecar_task resources", func(t *testing.T) { exp := DefaultResources() - exp.MemoryMB = intToPtr(333) + exp.MemoryMB = pointerOf(333) st := &SidecarTask{ - Resources: &Resources{MemoryMB: intToPtr(333)}, + Resources: &Resources{MemoryMB: pointerOf(333)}, } st.Canonicalize() require.Equal(t, exp, st.Resources) @@ -263,7 +263,7 @@ func TestConsulGateway_Canonicalize(t *testing.T) { }, } cg.Canonicalize() - require.Equal(t, timeToPtr(5*time.Second), cg.Proxy.ConnectTimeout) + require.Equal(t, pointerOf(5*time.Second), cg.Proxy.ConnectTimeout) require.True(t, cg.Proxy.EnvoyGatewayBindTaggedAddresses) require.Nil(t, cg.Proxy.EnvoyGatewayBindAddresses) require.True(t, cg.Proxy.EnvoyGatewayNoDefaultBind) @@ -283,7 +283,7 @@ func TestConsulGateway_Copy(t *testing.T) { gateway := &ConsulGateway{ Proxy: &ConsulGatewayProxy{ - ConnectTimeout: timeToPtr(3 * time.Second), + ConnectTimeout: pointerOf(3 * time.Second), EnvoyGatewayBindTaggedAddresses: true, EnvoyGatewayBindAddresses: map[string]*ConsulGatewayBindAddress{ "listener1": {Address: "10.0.0.1", Port: 2000}, diff --git a/api/event_stream_test.go b/api/event_stream_test.go index d0f55f91f..72c742cac 100644 --- a/api/event_stream_test.go +++ b/api/event_stream_test.go @@ -240,8 +240,8 @@ func TestEventStream_PayloadValueHelpers(t *testing.T) { j, err := event.Job() require.NoError(t, err) require.Equal(t, &Job{ - ID: stringToPtr("some-id"), - Namespace: stringToPtr("some-namespace-id"), + ID: pointerOf("some-id"), + Namespace: pointerOf("some-namespace-id"), }, j) }, }, diff --git a/api/fs_test.go b/api/fs_test.go index a5ac93785..fc03a9f76 100644 --- a/api/fs_test.go +++ b/api/fs_test.go @@ -63,13 +63,13 @@ func TestFS_Logs(t *testing.T) { } job := &Job{ - ID: stringToPtr("TestFS_Logs"), - Region: stringToPtr("global"), + ID: pointerOf("TestFS_Logs"), + Region: pointerOf("global"), Datacenters: []string{"dc1"}, - Type: stringToPtr("batch"), + Type: pointerOf("batch"), TaskGroups: []*TaskGroup{ { - Name: stringToPtr("TestFS_LogsGroup"), + Name: pointerOf("TestFS_LogsGroup"), Tasks: []*Task{ { Name: "logger", diff --git a/api/go.mod b/api/go.mod index a478988b7..6af81a635 100644 --- a/api/go.mod +++ b/api/go.mod @@ -1,6 +1,6 @@ module github.com/hashicorp/nomad/api -go 1.17 +go 1.18 require ( github.com/docker/go-units v0.4.0 @@ -11,11 +11,13 @@ require ( github.com/kr/pretty v0.3.0 github.com/mitchellh/go-testing-interface v1.14.1 github.com/mitchellh/mapstructure v1.5.0 + github.com/shoenig/test v0.3.0 github.com/stretchr/testify v1.8.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect + github.com/google/go-cmp v0.5.8 // indirect github.com/kr/text v0.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect diff --git a/api/go.sum b/api/go.sum index 01a972f5f..aed21cd87 100644 --- a/api/go.sum +++ b/api/go.sum @@ -4,6 +4,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= @@ -29,6 +31,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/shoenig/test v0.3.0 h1:H6tfSvgLrPHRR5NH9S40+lOfoyeH2PbswBr4twgn9Po= +github.com/shoenig/test v0.3.0/go.mod h1:xYtyGBC5Q3kzCNyJg/SjgNpfAa2kvmgA0i5+lQso8x0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= diff --git a/api/jobs.go b/api/jobs.go index f66d50019..4fc048dd5 100644 --- a/api/jobs.go +++ b/api/jobs.go @@ -189,7 +189,7 @@ func (j *Jobs) Scale(jobID, group string, count *int, message string, error bool var count64 *int64 if count != nil { - count64 = int64ToPtr(int64(*count)) + count64 = pointerOf(int64(*count)) } req := &ScalingRequest{ Count: count64, @@ -513,15 +513,15 @@ type UpdateStrategy struct { // jobs with the old policy or for populating field defaults. func DefaultUpdateStrategy() *UpdateStrategy { return &UpdateStrategy{ - Stagger: timeToPtr(30 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(30 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), } } @@ -533,39 +533,39 @@ func (u *UpdateStrategy) Copy() *UpdateStrategy { copy := new(UpdateStrategy) if u.Stagger != nil { - copy.Stagger = timeToPtr(*u.Stagger) + copy.Stagger = pointerOf(*u.Stagger) } if u.MaxParallel != nil { - copy.MaxParallel = intToPtr(*u.MaxParallel) + copy.MaxParallel = pointerOf(*u.MaxParallel) } if u.HealthCheck != nil { - copy.HealthCheck = stringToPtr(*u.HealthCheck) + copy.HealthCheck = pointerOf(*u.HealthCheck) } if u.MinHealthyTime != nil { - copy.MinHealthyTime = timeToPtr(*u.MinHealthyTime) + copy.MinHealthyTime = pointerOf(*u.MinHealthyTime) } if u.HealthyDeadline != nil { - copy.HealthyDeadline = timeToPtr(*u.HealthyDeadline) + copy.HealthyDeadline = pointerOf(*u.HealthyDeadline) } if u.ProgressDeadline != nil { - copy.ProgressDeadline = timeToPtr(*u.ProgressDeadline) + copy.ProgressDeadline = pointerOf(*u.ProgressDeadline) } if u.AutoRevert != nil { - copy.AutoRevert = boolToPtr(*u.AutoRevert) + copy.AutoRevert = pointerOf(*u.AutoRevert) } if u.Canary != nil { - copy.Canary = intToPtr(*u.Canary) + copy.Canary = pointerOf(*u.Canary) } if u.AutoPromote != nil { - copy.AutoPromote = boolToPtr(*u.AutoPromote) + copy.AutoPromote = pointerOf(*u.AutoPromote) } return copy @@ -577,39 +577,39 @@ func (u *UpdateStrategy) Merge(o *UpdateStrategy) { } if o.Stagger != nil { - u.Stagger = timeToPtr(*o.Stagger) + u.Stagger = pointerOf(*o.Stagger) } if o.MaxParallel != nil { - u.MaxParallel = intToPtr(*o.MaxParallel) + u.MaxParallel = pointerOf(*o.MaxParallel) } if o.HealthCheck != nil { - u.HealthCheck = stringToPtr(*o.HealthCheck) + u.HealthCheck = pointerOf(*o.HealthCheck) } if o.MinHealthyTime != nil { - u.MinHealthyTime = timeToPtr(*o.MinHealthyTime) + u.MinHealthyTime = pointerOf(*o.MinHealthyTime) } if o.HealthyDeadline != nil { - u.HealthyDeadline = timeToPtr(*o.HealthyDeadline) + u.HealthyDeadline = pointerOf(*o.HealthyDeadline) } if o.ProgressDeadline != nil { - u.ProgressDeadline = timeToPtr(*o.ProgressDeadline) + u.ProgressDeadline = pointerOf(*o.ProgressDeadline) } if o.AutoRevert != nil { - u.AutoRevert = boolToPtr(*o.AutoRevert) + u.AutoRevert = pointerOf(*o.AutoRevert) } if o.Canary != nil { - u.Canary = intToPtr(*o.Canary) + u.Canary = pointerOf(*o.Canary) } if o.AutoPromote != nil { - u.AutoPromote = boolToPtr(*o.AutoPromote) + u.AutoPromote = pointerOf(*o.AutoPromote) } } @@ -706,15 +706,15 @@ type Multiregion struct { func (m *Multiregion) Canonicalize() { if m.Strategy == nil { m.Strategy = &MultiregionStrategy{ - MaxParallel: intToPtr(0), - OnFailure: stringToPtr(""), + MaxParallel: pointerOf(0), + OnFailure: pointerOf(""), } } else { if m.Strategy.MaxParallel == nil { - m.Strategy.MaxParallel = intToPtr(0) + m.Strategy.MaxParallel = pointerOf(0) } if m.Strategy.OnFailure == nil { - m.Strategy.OnFailure = stringToPtr("") + m.Strategy.OnFailure = pointerOf("") } } if m.Regions == nil { @@ -722,7 +722,7 @@ func (m *Multiregion) Canonicalize() { } for _, region := range m.Regions { if region.Count == nil { - region.Count = intToPtr(1) + region.Count = pointerOf(1) } if region.Datacenters == nil { region.Datacenters = []string{} @@ -740,13 +740,13 @@ func (m *Multiregion) Copy() *Multiregion { copy := new(Multiregion) if m.Strategy != nil { copy.Strategy = new(MultiregionStrategy) - copy.Strategy.MaxParallel = intToPtr(*m.Strategy.MaxParallel) - copy.Strategy.OnFailure = stringToPtr(*m.Strategy.OnFailure) + copy.Strategy.MaxParallel = pointerOf(*m.Strategy.MaxParallel) + copy.Strategy.OnFailure = pointerOf(*m.Strategy.OnFailure) } for _, region := range m.Regions { copyRegion := new(MultiregionRegion) copyRegion.Name = region.Name - copyRegion.Count = intToPtr(*region.Count) + copyRegion.Count = pointerOf(*region.Count) copyRegion.Datacenters = append(copyRegion.Datacenters, region.Datacenters...) for k, v := range region.Meta { copyRegion.Meta[k] = v @@ -779,19 +779,19 @@ type PeriodicConfig struct { func (p *PeriodicConfig) Canonicalize() { if p.Enabled == nil { - p.Enabled = boolToPtr(true) + p.Enabled = pointerOf(true) } if p.Spec == nil { - p.Spec = stringToPtr("") + p.Spec = pointerOf("") } if p.SpecType == nil { - p.SpecType = stringToPtr(PeriodicSpecCron) + p.SpecType = pointerOf(PeriodicSpecCron) } if p.ProhibitOverlap == nil { - p.ProhibitOverlap = boolToPtr(false) + p.ProhibitOverlap = pointerOf(false) } if p.TimeZone == nil || *p.TimeZone == "" { - p.TimeZone = stringToPtr("UTC") + p.TimeZone = pointerOf("UTC") } } @@ -904,70 +904,70 @@ func (j *Job) IsMultiregion() bool { func (j *Job) Canonicalize() { if j.ID == nil { - j.ID = stringToPtr("") + j.ID = pointerOf("") } if j.Name == nil { - j.Name = stringToPtr(*j.ID) + j.Name = pointerOf(*j.ID) } if j.ParentID == nil { - j.ParentID = stringToPtr("") + j.ParentID = pointerOf("") } if j.Namespace == nil { - j.Namespace = stringToPtr(DefaultNamespace) + j.Namespace = pointerOf(DefaultNamespace) } if j.Priority == nil { - j.Priority = intToPtr(50) + j.Priority = pointerOf(50) } if j.Stop == nil { - j.Stop = boolToPtr(false) + j.Stop = pointerOf(false) } if j.Region == nil { - j.Region = stringToPtr(GlobalRegion) + j.Region = pointerOf(GlobalRegion) } if j.Namespace == nil { - j.Namespace = stringToPtr("default") + j.Namespace = pointerOf("default") } if j.Type == nil { - j.Type = stringToPtr("service") + j.Type = pointerOf("service") } if j.AllAtOnce == nil { - j.AllAtOnce = boolToPtr(false) + j.AllAtOnce = pointerOf(false) } if j.ConsulToken == nil { - j.ConsulToken = stringToPtr("") + j.ConsulToken = pointerOf("") } if j.ConsulNamespace == nil { - j.ConsulNamespace = stringToPtr("") + j.ConsulNamespace = pointerOf("") } if j.VaultToken == nil { - j.VaultToken = stringToPtr("") + j.VaultToken = pointerOf("") } if j.VaultNamespace == nil { - j.VaultNamespace = stringToPtr("") + j.VaultNamespace = pointerOf("") } if j.NomadTokenID == nil { - j.NomadTokenID = stringToPtr("") + j.NomadTokenID = pointerOf("") } if j.Status == nil { - j.Status = stringToPtr("") + j.Status = pointerOf("") } if j.StatusDescription == nil { - j.StatusDescription = stringToPtr("") + j.StatusDescription = pointerOf("") } if j.Stable == nil { - j.Stable = boolToPtr(false) + j.Stable = pointerOf(false) } if j.Version == nil { - j.Version = uint64ToPtr(0) + j.Version = pointerOf(uint64(0)) } if j.CreateIndex == nil { - j.CreateIndex = uint64ToPtr(0) + j.CreateIndex = pointerOf(uint64(0)) } if j.ModifyIndex == nil { - j.ModifyIndex = uint64ToPtr(0) + j.ModifyIndex = pointerOf(uint64(0)) } if j.JobModifyIndex == nil { - j.JobModifyIndex = uint64ToPtr(0) + j.JobModifyIndex = pointerOf(uint64(0)) } if j.Periodic != nil { j.Periodic.Canonicalize() diff --git a/api/jobs_test.go b/api/jobs_test.go index ea3761fa3..78b042bc6 100644 --- a/api/jobs_test.go +++ b/api/jobs_test.go @@ -61,23 +61,23 @@ func TestJobs_Register_PreserveCounts(t *testing.T) { task := NewTask("task", "exec"). SetConfig("command", "/bin/sleep"). Require(&Resources{ - CPU: intToPtr(100), - MemoryMB: intToPtr(256), + CPU: pointerOf(100), + MemoryMB: pointerOf(256), }). SetLogConfig(&LogConfig{ - MaxFiles: intToPtr(1), - MaxFileSizeMB: intToPtr(2), + MaxFiles: pointerOf(1), + MaxFileSizeMB: pointerOf(2), }) group1 := NewTaskGroup("group1", 1). AddTask(task). RequireDisk(&EphemeralDisk{ - SizeMB: intToPtr(25), + SizeMB: pointerOf(25), }) group2 := NewTaskGroup("group2", 2). AddTask(task). RequireDisk(&EphemeralDisk{ - SizeMB: intToPtr(25), + SizeMB: pointerOf(25), }) job := NewBatchJob("job", "redis", "global", 1). @@ -94,11 +94,11 @@ func TestJobs_Register_PreserveCounts(t *testing.T) { // Update the job, new groups to test PreserveCounts group1.Count = nil - group2.Count = intToPtr(0) + group2.Count = pointerOf(0) group3 := NewTaskGroup("group3", 3). AddTask(task). RequireDisk(&EphemeralDisk{ - SizeMB: intToPtr(25), + SizeMB: pointerOf(25), }) job.AddTaskGroup(group3) @@ -133,23 +133,23 @@ func TestJobs_Register_NoPreserveCounts(t *testing.T) { task := NewTask("task", "exec"). SetConfig("command", "/bin/sleep"). Require(&Resources{ - CPU: intToPtr(100), - MemoryMB: intToPtr(256), + CPU: pointerOf(100), + MemoryMB: pointerOf(256), }). SetLogConfig(&LogConfig{ - MaxFiles: intToPtr(1), - MaxFileSizeMB: intToPtr(2), + MaxFiles: pointerOf(1), + MaxFileSizeMB: pointerOf(2), }) group1 := NewTaskGroup("group1", 1). AddTask(task). RequireDisk(&EphemeralDisk{ - SizeMB: intToPtr(25), + SizeMB: pointerOf(25), }) group2 := NewTaskGroup("group2", 2). AddTask(task). RequireDisk(&EphemeralDisk{ - SizeMB: intToPtr(25), + SizeMB: pointerOf(25), }) job := NewBatchJob("job", "redis", "global", 1). @@ -165,12 +165,12 @@ func TestJobs_Register_NoPreserveCounts(t *testing.T) { assertWriteMeta(t, wm) // Update the job, new groups to test PreserveCounts - group1.Count = intToPtr(0) + group1.Count = pointerOf(0) group2.Count = nil group3 := NewTaskGroup("group3", 3). AddTask(task). RequireDisk(&EphemeralDisk{ - SizeMB: intToPtr(25), + SizeMB: pointerOf(25), }) job.AddTaskGroup(group3) @@ -288,79 +288,79 @@ func TestJobs_Canonicalize(t *testing.T) { }, }, expected: &Job{ - ID: stringToPtr(""), - Name: stringToPtr(""), - Region: stringToPtr("global"), - Namespace: stringToPtr(DefaultNamespace), - Type: stringToPtr("service"), - ParentID: stringToPtr(""), - Priority: intToPtr(50), - AllAtOnce: boolToPtr(false), - ConsulToken: stringToPtr(""), - ConsulNamespace: stringToPtr(""), - VaultToken: stringToPtr(""), - VaultNamespace: stringToPtr(""), - NomadTokenID: stringToPtr(""), - Status: stringToPtr(""), - StatusDescription: stringToPtr(""), - Stop: boolToPtr(false), - Stable: boolToPtr(false), - Version: uint64ToPtr(0), - CreateIndex: uint64ToPtr(0), - ModifyIndex: uint64ToPtr(0), - JobModifyIndex: uint64ToPtr(0), + ID: pointerOf(""), + Name: pointerOf(""), + Region: pointerOf("global"), + Namespace: pointerOf(DefaultNamespace), + Type: pointerOf("service"), + ParentID: pointerOf(""), + Priority: pointerOf(50), + AllAtOnce: pointerOf(false), + ConsulToken: pointerOf(""), + ConsulNamespace: pointerOf(""), + VaultToken: pointerOf(""), + VaultNamespace: pointerOf(""), + NomadTokenID: pointerOf(""), + Status: pointerOf(""), + StatusDescription: pointerOf(""), + Stop: pointerOf(false), + Stable: pointerOf(false), + Version: pointerOf(uint64(0)), + CreateIndex: pointerOf(uint64(0)), + ModifyIndex: pointerOf(uint64(0)), + JobModifyIndex: pointerOf(uint64(0)), Update: &UpdateStrategy{ - Stagger: timeToPtr(30 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(30 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), }, TaskGroups: []*TaskGroup{ { - Name: stringToPtr(""), - Count: intToPtr(1), + Name: pointerOf(""), + Count: pointerOf(1), EphemeralDisk: &EphemeralDisk{ - Sticky: boolToPtr(false), - Migrate: boolToPtr(false), - SizeMB: intToPtr(300), + Sticky: pointerOf(false), + Migrate: pointerOf(false), + SizeMB: pointerOf(300), }, RestartPolicy: &RestartPolicy{ - Delay: timeToPtr(15 * time.Second), - Attempts: intToPtr(2), - Interval: timeToPtr(30 * time.Minute), - Mode: stringToPtr("fail"), + Delay: pointerOf(15 * time.Second), + Attempts: pointerOf(2), + Interval: pointerOf(30 * time.Minute), + Mode: pointerOf("fail"), }, ReschedulePolicy: &ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - DelayFunction: stringToPtr("exponential"), - Delay: timeToPtr(30 * time.Second), - MaxDelay: timeToPtr(1 * time.Hour), - Unlimited: boolToPtr(true), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), + DelayFunction: pointerOf("exponential"), + Delay: pointerOf(30 * time.Second), + MaxDelay: pointerOf(1 * time.Hour), + Unlimited: pointerOf(true), }, Consul: &Consul{ Namespace: "", }, Update: &UpdateStrategy{ - Stagger: timeToPtr(30 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(30 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), }, Migrate: DefaultMigrateStrategy(), Tasks: []*Task{ { - KillTimeout: timeToPtr(5 * time.Second), + KillTimeout: pointerOf(5 * time.Second), LogConfig: DefaultLogConfig(), Resources: DefaultResources(), RestartPolicy: defaultServiceJobRestartPolicy(), @@ -373,7 +373,7 @@ func TestJobs_Canonicalize(t *testing.T) { { name: "batch", input: &Job{ - Type: stringToPtr("batch"), + Type: pointerOf("batch"), TaskGroups: []*TaskGroup{ { Tasks: []*Task{ @@ -383,56 +383,56 @@ func TestJobs_Canonicalize(t *testing.T) { }, }, expected: &Job{ - ID: stringToPtr(""), - Name: stringToPtr(""), - Region: stringToPtr("global"), - Namespace: stringToPtr(DefaultNamespace), - Type: stringToPtr("batch"), - ParentID: stringToPtr(""), - Priority: intToPtr(50), - AllAtOnce: boolToPtr(false), - ConsulToken: stringToPtr(""), - ConsulNamespace: stringToPtr(""), - VaultToken: stringToPtr(""), - VaultNamespace: stringToPtr(""), - NomadTokenID: stringToPtr(""), - Status: stringToPtr(""), - StatusDescription: stringToPtr(""), - Stop: boolToPtr(false), - Stable: boolToPtr(false), - Version: uint64ToPtr(0), - CreateIndex: uint64ToPtr(0), - ModifyIndex: uint64ToPtr(0), - JobModifyIndex: uint64ToPtr(0), + ID: pointerOf(""), + Name: pointerOf(""), + Region: pointerOf("global"), + Namespace: pointerOf(DefaultNamespace), + Type: pointerOf("batch"), + ParentID: pointerOf(""), + Priority: pointerOf(50), + AllAtOnce: pointerOf(false), + ConsulToken: pointerOf(""), + ConsulNamespace: pointerOf(""), + VaultToken: pointerOf(""), + VaultNamespace: pointerOf(""), + NomadTokenID: pointerOf(""), + Status: pointerOf(""), + StatusDescription: pointerOf(""), + Stop: pointerOf(false), + Stable: pointerOf(false), + Version: pointerOf(uint64(0)), + CreateIndex: pointerOf(uint64(0)), + ModifyIndex: pointerOf(uint64(0)), + JobModifyIndex: pointerOf(uint64(0)), TaskGroups: []*TaskGroup{ { - Name: stringToPtr(""), - Count: intToPtr(1), + Name: pointerOf(""), + Count: pointerOf(1), EphemeralDisk: &EphemeralDisk{ - Sticky: boolToPtr(false), - Migrate: boolToPtr(false), - SizeMB: intToPtr(300), + Sticky: pointerOf(false), + Migrate: pointerOf(false), + SizeMB: pointerOf(300), }, RestartPolicy: &RestartPolicy{ - Delay: timeToPtr(15 * time.Second), - Attempts: intToPtr(3), - Interval: timeToPtr(24 * time.Hour), - Mode: stringToPtr("fail"), + Delay: pointerOf(15 * time.Second), + Attempts: pointerOf(3), + Interval: pointerOf(24 * time.Hour), + Mode: pointerOf("fail"), }, ReschedulePolicy: &ReschedulePolicy{ - Attempts: intToPtr(1), - Interval: timeToPtr(24 * time.Hour), - DelayFunction: stringToPtr("constant"), - Delay: timeToPtr(5 * time.Second), - MaxDelay: timeToPtr(0), - Unlimited: boolToPtr(false), + Attempts: pointerOf(1), + Interval: pointerOf(24 * time.Hour), + DelayFunction: pointerOf("constant"), + Delay: pointerOf(5 * time.Second), + MaxDelay: pointerOf(time.Duration(0)), + Unlimited: pointerOf(false), }, Consul: &Consul{ Namespace: "", }, Tasks: []*Task{ { - KillTimeout: timeToPtr(5 * time.Second), + KillTimeout: pointerOf(5 * time.Second), LogConfig: DefaultLogConfig(), Resources: DefaultResources(), RestartPolicy: defaultBatchJobRestartPolicy(), @@ -445,13 +445,13 @@ func TestJobs_Canonicalize(t *testing.T) { { name: "partial", input: &Job{ - Name: stringToPtr("foo"), - Namespace: stringToPtr("bar"), - ID: stringToPtr("bar"), - ParentID: stringToPtr("lol"), + Name: pointerOf("foo"), + Namespace: pointerOf("bar"), + ID: pointerOf("bar"), + ParentID: pointerOf("lol"), TaskGroups: []*TaskGroup{ { - Name: stringToPtr("bar"), + Name: pointerOf("bar"), Tasks: []*Task{ { Name: "task1", @@ -461,74 +461,74 @@ func TestJobs_Canonicalize(t *testing.T) { }, }, expected: &Job{ - Namespace: stringToPtr("bar"), - ID: stringToPtr("bar"), - Name: stringToPtr("foo"), - Region: stringToPtr("global"), - Type: stringToPtr("service"), - ParentID: stringToPtr("lol"), - Priority: intToPtr(50), - AllAtOnce: boolToPtr(false), - ConsulToken: stringToPtr(""), - ConsulNamespace: stringToPtr(""), - VaultToken: stringToPtr(""), - VaultNamespace: stringToPtr(""), - NomadTokenID: stringToPtr(""), - Stop: boolToPtr(false), - Stable: boolToPtr(false), - Version: uint64ToPtr(0), - Status: stringToPtr(""), - StatusDescription: stringToPtr(""), - CreateIndex: uint64ToPtr(0), - ModifyIndex: uint64ToPtr(0), - JobModifyIndex: uint64ToPtr(0), + Namespace: pointerOf("bar"), + ID: pointerOf("bar"), + Name: pointerOf("foo"), + Region: pointerOf("global"), + Type: pointerOf("service"), + ParentID: pointerOf("lol"), + Priority: pointerOf(50), + AllAtOnce: pointerOf(false), + ConsulToken: pointerOf(""), + ConsulNamespace: pointerOf(""), + VaultToken: pointerOf(""), + VaultNamespace: pointerOf(""), + NomadTokenID: pointerOf(""), + Stop: pointerOf(false), + Stable: pointerOf(false), + Version: pointerOf(uint64(0)), + Status: pointerOf(""), + StatusDescription: pointerOf(""), + CreateIndex: pointerOf(uint64(0)), + ModifyIndex: pointerOf(uint64(0)), + JobModifyIndex: pointerOf(uint64(0)), Update: &UpdateStrategy{ - Stagger: timeToPtr(30 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(30 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), }, TaskGroups: []*TaskGroup{ { - Name: stringToPtr("bar"), - Count: intToPtr(1), + Name: pointerOf("bar"), + Count: pointerOf(1), EphemeralDisk: &EphemeralDisk{ - Sticky: boolToPtr(false), - Migrate: boolToPtr(false), - SizeMB: intToPtr(300), + Sticky: pointerOf(false), + Migrate: pointerOf(false), + SizeMB: pointerOf(300), }, RestartPolicy: &RestartPolicy{ - Delay: timeToPtr(15 * time.Second), - Attempts: intToPtr(2), - Interval: timeToPtr(30 * time.Minute), - Mode: stringToPtr("fail"), + Delay: pointerOf(15 * time.Second), + Attempts: pointerOf(2), + Interval: pointerOf(30 * time.Minute), + Mode: pointerOf("fail"), }, ReschedulePolicy: &ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - DelayFunction: stringToPtr("exponential"), - Delay: timeToPtr(30 * time.Second), - MaxDelay: timeToPtr(1 * time.Hour), - Unlimited: boolToPtr(true), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), + DelayFunction: pointerOf("exponential"), + Delay: pointerOf(30 * time.Second), + MaxDelay: pointerOf(1 * time.Hour), + Unlimited: pointerOf(true), }, Consul: &Consul{ Namespace: "", }, Update: &UpdateStrategy{ - Stagger: timeToPtr(30 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(30 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), }, Migrate: DefaultMigrateStrategy(), Tasks: []*Task{ @@ -536,7 +536,7 @@ func TestJobs_Canonicalize(t *testing.T) { Name: "task1", LogConfig: DefaultLogConfig(), Resources: DefaultResources(), - KillTimeout: timeToPtr(5 * time.Second), + KillTimeout: pointerOf(5 * time.Second), RestartPolicy: defaultServiceJobRestartPolicy(), }, }, @@ -547,29 +547,29 @@ func TestJobs_Canonicalize(t *testing.T) { { name: "example_template", input: &Job{ - ID: stringToPtr("example_template"), - Name: stringToPtr("example_template"), + ID: pointerOf("example_template"), + Name: pointerOf("example_template"), Datacenters: []string{"dc1"}, - Type: stringToPtr("service"), + Type: pointerOf("service"), Update: &UpdateStrategy{ - MaxParallel: intToPtr(1), - AutoPromote: boolToPtr(true), + MaxParallel: pointerOf(1), + AutoPromote: pointerOf(true), }, TaskGroups: []*TaskGroup{ { - Name: stringToPtr("cache"), - Count: intToPtr(1), + Name: pointerOf("cache"), + Count: pointerOf(1), RestartPolicy: &RestartPolicy{ - Interval: timeToPtr(5 * time.Minute), - Attempts: intToPtr(10), - Delay: timeToPtr(25 * time.Second), - Mode: stringToPtr("delay"), + Interval: pointerOf(5 * time.Minute), + Attempts: pointerOf(10), + Delay: pointerOf(25 * time.Second), + Mode: pointerOf("delay"), }, Update: &UpdateStrategy{ - AutoRevert: boolToPtr(true), + AutoRevert: pointerOf(true), }, EphemeralDisk: &EphemeralDisk{ - SizeMB: intToPtr(300), + SizeMB: pointerOf(300), }, Tasks: []*Task{ { @@ -583,14 +583,14 @@ func TestJobs_Canonicalize(t *testing.T) { }, RestartPolicy: &RestartPolicy{ // inherit other values from TG - Attempts: intToPtr(20), + Attempts: pointerOf(20), }, Resources: &Resources{ - CPU: intToPtr(500), - MemoryMB: intToPtr(256), + CPU: pointerOf(500), + MemoryMB: pointerOf(256), Networks: []*NetworkResource{ { - MBits: intToPtr(10), + MBits: pointerOf(10), DynamicPorts: []Port{ { Label: "db", @@ -617,13 +617,13 @@ func TestJobs_Canonicalize(t *testing.T) { }, Templates: []*Template{ { - EmbeddedTmpl: stringToPtr("---"), - DestPath: stringToPtr("local/file.yml"), + EmbeddedTmpl: pointerOf("---"), + DestPath: pointerOf("local/file.yml"), }, { - EmbeddedTmpl: stringToPtr("FOO=bar\n"), - DestPath: stringToPtr("local/file.env"), - Envvars: boolToPtr(true), + EmbeddedTmpl: pointerOf("FOO=bar\n"), + DestPath: pointerOf("local/file.env"), + Envvars: pointerOf(true), }, }, }, @@ -632,75 +632,75 @@ func TestJobs_Canonicalize(t *testing.T) { }, }, expected: &Job{ - Namespace: stringToPtr(DefaultNamespace), - ID: stringToPtr("example_template"), - Name: stringToPtr("example_template"), - ParentID: stringToPtr(""), - Priority: intToPtr(50), - Region: stringToPtr("global"), - Type: stringToPtr("service"), - AllAtOnce: boolToPtr(false), - ConsulToken: stringToPtr(""), - ConsulNamespace: stringToPtr(""), - VaultToken: stringToPtr(""), - VaultNamespace: stringToPtr(""), - NomadTokenID: stringToPtr(""), - Stop: boolToPtr(false), - Stable: boolToPtr(false), - Version: uint64ToPtr(0), - Status: stringToPtr(""), - StatusDescription: stringToPtr(""), - CreateIndex: uint64ToPtr(0), - ModifyIndex: uint64ToPtr(0), - JobModifyIndex: uint64ToPtr(0), + Namespace: pointerOf(DefaultNamespace), + ID: pointerOf("example_template"), + Name: pointerOf("example_template"), + ParentID: pointerOf(""), + Priority: pointerOf(50), + Region: pointerOf("global"), + Type: pointerOf("service"), + AllAtOnce: pointerOf(false), + ConsulToken: pointerOf(""), + ConsulNamespace: pointerOf(""), + VaultToken: pointerOf(""), + VaultNamespace: pointerOf(""), + NomadTokenID: pointerOf(""), + Stop: pointerOf(false), + Stable: pointerOf(false), + Version: pointerOf(uint64(0)), + Status: pointerOf(""), + StatusDescription: pointerOf(""), + CreateIndex: pointerOf(uint64(0)), + ModifyIndex: pointerOf(uint64(0)), + JobModifyIndex: pointerOf(uint64(0)), Datacenters: []string{"dc1"}, Update: &UpdateStrategy{ - Stagger: timeToPtr(30 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(true), + Stagger: pointerOf(30 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(true), }, TaskGroups: []*TaskGroup{ { - Name: stringToPtr("cache"), - Count: intToPtr(1), + Name: pointerOf("cache"), + Count: pointerOf(1), RestartPolicy: &RestartPolicy{ - Interval: timeToPtr(5 * time.Minute), - Attempts: intToPtr(10), - Delay: timeToPtr(25 * time.Second), - Mode: stringToPtr("delay"), + Interval: pointerOf(5 * time.Minute), + Attempts: pointerOf(10), + Delay: pointerOf(25 * time.Second), + Mode: pointerOf("delay"), }, ReschedulePolicy: &ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - DelayFunction: stringToPtr("exponential"), - Delay: timeToPtr(30 * time.Second), - MaxDelay: timeToPtr(1 * time.Hour), - Unlimited: boolToPtr(true), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), + DelayFunction: pointerOf("exponential"), + Delay: pointerOf(30 * time.Second), + MaxDelay: pointerOf(1 * time.Hour), + Unlimited: pointerOf(true), }, EphemeralDisk: &EphemeralDisk{ - Sticky: boolToPtr(false), - Migrate: boolToPtr(false), - SizeMB: intToPtr(300), + Sticky: pointerOf(false), + Migrate: pointerOf(false), + SizeMB: pointerOf(300), }, Consul: &Consul{ Namespace: "", }, Update: &UpdateStrategy{ - Stagger: timeToPtr(30 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - AutoRevert: boolToPtr(true), - Canary: intToPtr(0), - AutoPromote: boolToPtr(true), + Stagger: pointerOf(30 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + AutoRevert: pointerOf(true), + Canary: pointerOf(0), + AutoPromote: pointerOf(true), }, Migrate: DefaultMigrateStrategy(), Tasks: []*Task{ @@ -714,18 +714,18 @@ func TestJobs_Canonicalize(t *testing.T) { }}, }, RestartPolicy: &RestartPolicy{ - Interval: timeToPtr(5 * time.Minute), - Attempts: intToPtr(20), - Delay: timeToPtr(25 * time.Second), - Mode: stringToPtr("delay"), + Interval: pointerOf(5 * time.Minute), + Attempts: pointerOf(20), + Delay: pointerOf(25 * time.Second), + Mode: pointerOf("delay"), }, Resources: &Resources{ - CPU: intToPtr(500), - Cores: intToPtr(0), - MemoryMB: intToPtr(256), + CPU: pointerOf(500), + Cores: pointerOf(0), + MemoryMB: pointerOf(256), Networks: []*NetworkResource{ { - MBits: intToPtr(10), + MBits: pointerOf(10), DynamicPorts: []Port{ { Label: "db", @@ -754,38 +754,38 @@ func TestJobs_Canonicalize(t *testing.T) { }, }, }, - KillTimeout: timeToPtr(5 * time.Second), + KillTimeout: pointerOf(5 * time.Second), LogConfig: DefaultLogConfig(), Templates: []*Template{ { - SourcePath: stringToPtr(""), - DestPath: stringToPtr("local/file.yml"), - EmbeddedTmpl: stringToPtr("---"), - ChangeMode: stringToPtr("restart"), - ChangeSignal: stringToPtr(""), - Splay: timeToPtr(5 * time.Second), - Perms: stringToPtr("0644"), - Uid: intToPtr(-1), - Gid: intToPtr(-1), - LeftDelim: stringToPtr("{{"), - RightDelim: stringToPtr("}}"), - Envvars: boolToPtr(false), - VaultGrace: timeToPtr(0), + SourcePath: pointerOf(""), + DestPath: pointerOf("local/file.yml"), + EmbeddedTmpl: pointerOf("---"), + ChangeMode: pointerOf("restart"), + ChangeSignal: pointerOf(""), + Splay: pointerOf(5 * time.Second), + Perms: pointerOf("0644"), + Uid: pointerOf(-1), + Gid: pointerOf(-1), + LeftDelim: pointerOf("{{"), + RightDelim: pointerOf("}}"), + Envvars: pointerOf(false), + VaultGrace: pointerOf(time.Duration(0)), }, { - SourcePath: stringToPtr(""), - DestPath: stringToPtr("local/file.env"), - EmbeddedTmpl: stringToPtr("FOO=bar\n"), - ChangeMode: stringToPtr("restart"), - ChangeSignal: stringToPtr(""), - Splay: timeToPtr(5 * time.Second), - Perms: stringToPtr("0644"), - Uid: intToPtr(-1), - Gid: intToPtr(-1), - LeftDelim: stringToPtr("{{"), - RightDelim: stringToPtr("}}"), - Envvars: boolToPtr(true), - VaultGrace: timeToPtr(0), + SourcePath: pointerOf(""), + DestPath: pointerOf("local/file.env"), + EmbeddedTmpl: pointerOf("FOO=bar\n"), + ChangeMode: pointerOf("restart"), + ChangeSignal: pointerOf(""), + Splay: pointerOf(5 * time.Second), + Perms: pointerOf("0644"), + Uid: pointerOf(-1), + Gid: pointerOf(-1), + LeftDelim: pointerOf("{{"), + RightDelim: pointerOf("}}"), + Envvars: pointerOf(true), + VaultGrace: pointerOf(time.Duration(0)), }, }, }, @@ -798,48 +798,48 @@ func TestJobs_Canonicalize(t *testing.T) { { name: "periodic", input: &Job{ - ID: stringToPtr("bar"), + ID: pointerOf("bar"), Periodic: &PeriodicConfig{}, }, expected: &Job{ - Namespace: stringToPtr(DefaultNamespace), - ID: stringToPtr("bar"), - ParentID: stringToPtr(""), - Name: stringToPtr("bar"), - Region: stringToPtr("global"), - Type: stringToPtr("service"), - Priority: intToPtr(50), - AllAtOnce: boolToPtr(false), - ConsulToken: stringToPtr(""), - ConsulNamespace: stringToPtr(""), - VaultToken: stringToPtr(""), - VaultNamespace: stringToPtr(""), - NomadTokenID: stringToPtr(""), - Stop: boolToPtr(false), - Stable: boolToPtr(false), - Version: uint64ToPtr(0), - Status: stringToPtr(""), - StatusDescription: stringToPtr(""), - CreateIndex: uint64ToPtr(0), - ModifyIndex: uint64ToPtr(0), - JobModifyIndex: uint64ToPtr(0), + Namespace: pointerOf(DefaultNamespace), + ID: pointerOf("bar"), + ParentID: pointerOf(""), + Name: pointerOf("bar"), + Region: pointerOf("global"), + Type: pointerOf("service"), + Priority: pointerOf(50), + AllAtOnce: pointerOf(false), + ConsulToken: pointerOf(""), + ConsulNamespace: pointerOf(""), + VaultToken: pointerOf(""), + VaultNamespace: pointerOf(""), + NomadTokenID: pointerOf(""), + Stop: pointerOf(false), + Stable: pointerOf(false), + Version: pointerOf(uint64(0)), + Status: pointerOf(""), + StatusDescription: pointerOf(""), + CreateIndex: pointerOf(uint64(0)), + ModifyIndex: pointerOf(uint64(0)), + JobModifyIndex: pointerOf(uint64(0)), Update: &UpdateStrategy{ - Stagger: timeToPtr(30 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(30 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), }, Periodic: &PeriodicConfig{ - Enabled: boolToPtr(true), - Spec: stringToPtr(""), - SpecType: stringToPtr(PeriodicSpecCron), - ProhibitOverlap: boolToPtr(false), - TimeZone: stringToPtr("UTC"), + Enabled: pointerOf(true), + Spec: pointerOf(""), + SpecType: pointerOf(PeriodicSpecCron), + ProhibitOverlap: pointerOf(false), + TimeZone: pointerOf("UTC"), }, }, }, @@ -847,34 +847,34 @@ func TestJobs_Canonicalize(t *testing.T) { { name: "update_merge", input: &Job{ - Name: stringToPtr("foo"), - ID: stringToPtr("bar"), - ParentID: stringToPtr("lol"), + Name: pointerOf("foo"), + ID: pointerOf("bar"), + ParentID: pointerOf("lol"), Update: &UpdateStrategy{ - Stagger: timeToPtr(1 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(6 * time.Minute), - ProgressDeadline: timeToPtr(7 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(1 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(6 * time.Minute), + ProgressDeadline: pointerOf(7 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), }, TaskGroups: []*TaskGroup{ { - Name: stringToPtr("bar"), + Name: pointerOf("bar"), Consul: &Consul{ Namespace: "", }, Update: &UpdateStrategy{ - Stagger: timeToPtr(2 * time.Second), - MaxParallel: intToPtr(2), - HealthCheck: stringToPtr("manual"), - MinHealthyTime: timeToPtr(1 * time.Second), - AutoRevert: boolToPtr(true), - Canary: intToPtr(1), - AutoPromote: boolToPtr(true), + Stagger: pointerOf(2 * time.Second), + MaxParallel: pointerOf(2), + HealthCheck: pointerOf("manual"), + MinHealthyTime: pointerOf(1 * time.Second), + AutoRevert: pointerOf(true), + Canary: pointerOf(1), + AutoPromote: pointerOf(true), }, Tasks: []*Task{ { @@ -883,7 +883,7 @@ func TestJobs_Canonicalize(t *testing.T) { }, }, { - Name: stringToPtr("baz"), + Name: pointerOf("baz"), Tasks: []*Task{ { Name: "task1", @@ -893,74 +893,74 @@ func TestJobs_Canonicalize(t *testing.T) { }, }, expected: &Job{ - Namespace: stringToPtr(DefaultNamespace), - ID: stringToPtr("bar"), - Name: stringToPtr("foo"), - Region: stringToPtr("global"), - Type: stringToPtr("service"), - ParentID: stringToPtr("lol"), - Priority: intToPtr(50), - AllAtOnce: boolToPtr(false), - ConsulToken: stringToPtr(""), - ConsulNamespace: stringToPtr(""), - VaultToken: stringToPtr(""), - VaultNamespace: stringToPtr(""), - NomadTokenID: stringToPtr(""), - Stop: boolToPtr(false), - Stable: boolToPtr(false), - Version: uint64ToPtr(0), - Status: stringToPtr(""), - StatusDescription: stringToPtr(""), - CreateIndex: uint64ToPtr(0), - ModifyIndex: uint64ToPtr(0), - JobModifyIndex: uint64ToPtr(0), + Namespace: pointerOf(DefaultNamespace), + ID: pointerOf("bar"), + Name: pointerOf("foo"), + Region: pointerOf("global"), + Type: pointerOf("service"), + ParentID: pointerOf("lol"), + Priority: pointerOf(50), + AllAtOnce: pointerOf(false), + ConsulToken: pointerOf(""), + ConsulNamespace: pointerOf(""), + VaultToken: pointerOf(""), + VaultNamespace: pointerOf(""), + NomadTokenID: pointerOf(""), + Stop: pointerOf(false), + Stable: pointerOf(false), + Version: pointerOf(uint64(0)), + Status: pointerOf(""), + StatusDescription: pointerOf(""), + CreateIndex: pointerOf(uint64(0)), + ModifyIndex: pointerOf(uint64(0)), + JobModifyIndex: pointerOf(uint64(0)), Update: &UpdateStrategy{ - Stagger: timeToPtr(1 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(6 * time.Minute), - ProgressDeadline: timeToPtr(7 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(1 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(6 * time.Minute), + ProgressDeadline: pointerOf(7 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), }, TaskGroups: []*TaskGroup{ { - Name: stringToPtr("bar"), - Count: intToPtr(1), + Name: pointerOf("bar"), + Count: pointerOf(1), EphemeralDisk: &EphemeralDisk{ - Sticky: boolToPtr(false), - Migrate: boolToPtr(false), - SizeMB: intToPtr(300), + Sticky: pointerOf(false), + Migrate: pointerOf(false), + SizeMB: pointerOf(300), }, RestartPolicy: &RestartPolicy{ - Delay: timeToPtr(15 * time.Second), - Attempts: intToPtr(2), - Interval: timeToPtr(30 * time.Minute), - Mode: stringToPtr("fail"), + Delay: pointerOf(15 * time.Second), + Attempts: pointerOf(2), + Interval: pointerOf(30 * time.Minute), + Mode: pointerOf("fail"), }, ReschedulePolicy: &ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - DelayFunction: stringToPtr("exponential"), - Delay: timeToPtr(30 * time.Second), - MaxDelay: timeToPtr(1 * time.Hour), - Unlimited: boolToPtr(true), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), + DelayFunction: pointerOf("exponential"), + Delay: pointerOf(30 * time.Second), + MaxDelay: pointerOf(1 * time.Hour), + Unlimited: pointerOf(true), }, Consul: &Consul{ Namespace: "", }, Update: &UpdateStrategy{ - Stagger: timeToPtr(2 * time.Second), - MaxParallel: intToPtr(2), - HealthCheck: stringToPtr("manual"), - MinHealthyTime: timeToPtr(1 * time.Second), - HealthyDeadline: timeToPtr(6 * time.Minute), - ProgressDeadline: timeToPtr(7 * time.Minute), - AutoRevert: boolToPtr(true), - Canary: intToPtr(1), - AutoPromote: boolToPtr(true), + Stagger: pointerOf(2 * time.Second), + MaxParallel: pointerOf(2), + HealthCheck: pointerOf("manual"), + MinHealthyTime: pointerOf(1 * time.Second), + HealthyDeadline: pointerOf(6 * time.Minute), + ProgressDeadline: pointerOf(7 * time.Minute), + AutoRevert: pointerOf(true), + Canary: pointerOf(1), + AutoPromote: pointerOf(true), }, Migrate: DefaultMigrateStrategy(), Tasks: []*Task{ @@ -968,46 +968,46 @@ func TestJobs_Canonicalize(t *testing.T) { Name: "task1", LogConfig: DefaultLogConfig(), Resources: DefaultResources(), - KillTimeout: timeToPtr(5 * time.Second), + KillTimeout: pointerOf(5 * time.Second), RestartPolicy: defaultServiceJobRestartPolicy(), }, }, }, { - Name: stringToPtr("baz"), - Count: intToPtr(1), + Name: pointerOf("baz"), + Count: pointerOf(1), EphemeralDisk: &EphemeralDisk{ - Sticky: boolToPtr(false), - Migrate: boolToPtr(false), - SizeMB: intToPtr(300), + Sticky: pointerOf(false), + Migrate: pointerOf(false), + SizeMB: pointerOf(300), }, RestartPolicy: &RestartPolicy{ - Delay: timeToPtr(15 * time.Second), - Attempts: intToPtr(2), - Interval: timeToPtr(30 * time.Minute), - Mode: stringToPtr("fail"), + Delay: pointerOf(15 * time.Second), + Attempts: pointerOf(2), + Interval: pointerOf(30 * time.Minute), + Mode: pointerOf("fail"), }, ReschedulePolicy: &ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - DelayFunction: stringToPtr("exponential"), - Delay: timeToPtr(30 * time.Second), - MaxDelay: timeToPtr(1 * time.Hour), - Unlimited: boolToPtr(true), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), + DelayFunction: pointerOf("exponential"), + Delay: pointerOf(30 * time.Second), + MaxDelay: pointerOf(1 * time.Hour), + Unlimited: pointerOf(true), }, Consul: &Consul{ Namespace: "", }, Update: &UpdateStrategy{ - Stagger: timeToPtr(1 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(6 * time.Minute), - ProgressDeadline: timeToPtr(7 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(1 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(6 * time.Minute), + ProgressDeadline: pointerOf(7 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), }, Migrate: DefaultMigrateStrategy(), Tasks: []*Task{ @@ -1015,7 +1015,7 @@ func TestJobs_Canonicalize(t *testing.T) { Name: "task1", LogConfig: DefaultLogConfig(), Resources: DefaultResources(), - KillTimeout: timeToPtr(5 * time.Second), + KillTimeout: pointerOf(5 * time.Second), RestartPolicy: defaultServiceJobRestartPolicy(), }, }, @@ -1027,35 +1027,35 @@ func TestJobs_Canonicalize(t *testing.T) { { name: "restart_merge", input: &Job{ - Name: stringToPtr("foo"), - ID: stringToPtr("bar"), - ParentID: stringToPtr("lol"), + Name: pointerOf("foo"), + ID: pointerOf("bar"), + ParentID: pointerOf("lol"), TaskGroups: []*TaskGroup{ { - Name: stringToPtr("bar"), + Name: pointerOf("bar"), RestartPolicy: &RestartPolicy{ - Delay: timeToPtr(15 * time.Second), - Attempts: intToPtr(2), - Interval: timeToPtr(30 * time.Minute), - Mode: stringToPtr("fail"), + Delay: pointerOf(15 * time.Second), + Attempts: pointerOf(2), + Interval: pointerOf(30 * time.Minute), + Mode: pointerOf("fail"), }, Tasks: []*Task{ { Name: "task1", RestartPolicy: &RestartPolicy{ - Attempts: intToPtr(5), - Delay: timeToPtr(1 * time.Second), + Attempts: pointerOf(5), + Delay: pointerOf(1 * time.Second), }, }, }, }, { - Name: stringToPtr("baz"), + Name: pointerOf("baz"), RestartPolicy: &RestartPolicy{ - Delay: timeToPtr(20 * time.Second), - Attempts: intToPtr(2), - Interval: timeToPtr(30 * time.Minute), - Mode: stringToPtr("fail"), + Delay: pointerOf(20 * time.Second), + Attempts: pointerOf(2), + Interval: pointerOf(30 * time.Minute), + Mode: pointerOf("fail"), }, Consul: &Consul{ Namespace: "", @@ -1069,74 +1069,74 @@ func TestJobs_Canonicalize(t *testing.T) { }, }, expected: &Job{ - Namespace: stringToPtr(DefaultNamespace), - ID: stringToPtr("bar"), - Name: stringToPtr("foo"), - Region: stringToPtr("global"), - Type: stringToPtr("service"), - ParentID: stringToPtr("lol"), - Priority: intToPtr(50), - AllAtOnce: boolToPtr(false), - ConsulToken: stringToPtr(""), - ConsulNamespace: stringToPtr(""), - VaultToken: stringToPtr(""), - VaultNamespace: stringToPtr(""), - NomadTokenID: stringToPtr(""), - Stop: boolToPtr(false), - Stable: boolToPtr(false), - Version: uint64ToPtr(0), - Status: stringToPtr(""), - StatusDescription: stringToPtr(""), - CreateIndex: uint64ToPtr(0), - ModifyIndex: uint64ToPtr(0), - JobModifyIndex: uint64ToPtr(0), + Namespace: pointerOf(DefaultNamespace), + ID: pointerOf("bar"), + Name: pointerOf("foo"), + Region: pointerOf("global"), + Type: pointerOf("service"), + ParentID: pointerOf("lol"), + Priority: pointerOf(50), + AllAtOnce: pointerOf(false), + ConsulToken: pointerOf(""), + ConsulNamespace: pointerOf(""), + VaultToken: pointerOf(""), + VaultNamespace: pointerOf(""), + NomadTokenID: pointerOf(""), + Stop: pointerOf(false), + Stable: pointerOf(false), + Version: pointerOf(uint64(0)), + Status: pointerOf(""), + StatusDescription: pointerOf(""), + CreateIndex: pointerOf(uint64(0)), + ModifyIndex: pointerOf(uint64(0)), + JobModifyIndex: pointerOf(uint64(0)), Update: &UpdateStrategy{ - Stagger: timeToPtr(30 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(30 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), }, TaskGroups: []*TaskGroup{ { - Name: stringToPtr("bar"), - Count: intToPtr(1), + Name: pointerOf("bar"), + Count: pointerOf(1), EphemeralDisk: &EphemeralDisk{ - Sticky: boolToPtr(false), - Migrate: boolToPtr(false), - SizeMB: intToPtr(300), + Sticky: pointerOf(false), + Migrate: pointerOf(false), + SizeMB: pointerOf(300), }, RestartPolicy: &RestartPolicy{ - Delay: timeToPtr(15 * time.Second), - Attempts: intToPtr(2), - Interval: timeToPtr(30 * time.Minute), - Mode: stringToPtr("fail"), + Delay: pointerOf(15 * time.Second), + Attempts: pointerOf(2), + Interval: pointerOf(30 * time.Minute), + Mode: pointerOf("fail"), }, ReschedulePolicy: &ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - DelayFunction: stringToPtr("exponential"), - Delay: timeToPtr(30 * time.Second), - MaxDelay: timeToPtr(1 * time.Hour), - Unlimited: boolToPtr(true), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), + DelayFunction: pointerOf("exponential"), + Delay: pointerOf(30 * time.Second), + MaxDelay: pointerOf(1 * time.Hour), + Unlimited: pointerOf(true), }, Consul: &Consul{ Namespace: "", }, Update: &UpdateStrategy{ - Stagger: timeToPtr(30 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(30 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), }, Migrate: DefaultMigrateStrategy(), Tasks: []*Task{ @@ -1144,51 +1144,51 @@ func TestJobs_Canonicalize(t *testing.T) { Name: "task1", LogConfig: DefaultLogConfig(), Resources: DefaultResources(), - KillTimeout: timeToPtr(5 * time.Second), + KillTimeout: pointerOf(5 * time.Second), RestartPolicy: &RestartPolicy{ - Attempts: intToPtr(5), - Delay: timeToPtr(1 * time.Second), - Interval: timeToPtr(30 * time.Minute), - Mode: stringToPtr("fail"), + Attempts: pointerOf(5), + Delay: pointerOf(1 * time.Second), + Interval: pointerOf(30 * time.Minute), + Mode: pointerOf("fail"), }, }, }, }, { - Name: stringToPtr("baz"), - Count: intToPtr(1), + Name: pointerOf("baz"), + Count: pointerOf(1), EphemeralDisk: &EphemeralDisk{ - Sticky: boolToPtr(false), - Migrate: boolToPtr(false), - SizeMB: intToPtr(300), + Sticky: pointerOf(false), + Migrate: pointerOf(false), + SizeMB: pointerOf(300), }, RestartPolicy: &RestartPolicy{ - Delay: timeToPtr(20 * time.Second), - Attempts: intToPtr(2), - Interval: timeToPtr(30 * time.Minute), - Mode: stringToPtr("fail"), + Delay: pointerOf(20 * time.Second), + Attempts: pointerOf(2), + Interval: pointerOf(30 * time.Minute), + Mode: pointerOf("fail"), }, ReschedulePolicy: &ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - DelayFunction: stringToPtr("exponential"), - Delay: timeToPtr(30 * time.Second), - MaxDelay: timeToPtr(1 * time.Hour), - Unlimited: boolToPtr(true), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), + DelayFunction: pointerOf("exponential"), + Delay: pointerOf(30 * time.Second), + MaxDelay: pointerOf(1 * time.Hour), + Unlimited: pointerOf(true), }, Consul: &Consul{ Namespace: "", }, Update: &UpdateStrategy{ - Stagger: timeToPtr(30 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(30 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), }, Migrate: DefaultMigrateStrategy(), Tasks: []*Task{ @@ -1196,12 +1196,12 @@ func TestJobs_Canonicalize(t *testing.T) { Name: "task1", LogConfig: DefaultLogConfig(), Resources: DefaultResources(), - KillTimeout: timeToPtr(5 * time.Second), + KillTimeout: pointerOf(5 * time.Second), RestartPolicy: &RestartPolicy{ - Delay: timeToPtr(20 * time.Second), - Attempts: intToPtr(2), - Interval: timeToPtr(30 * time.Minute), - Mode: stringToPtr("fail"), + Delay: pointerOf(20 * time.Second), + Attempts: pointerOf(2), + Interval: pointerOf(30 * time.Minute), + Mode: pointerOf("fail"), }, }, }, @@ -1213,14 +1213,14 @@ func TestJobs_Canonicalize(t *testing.T) { { name: "multiregion", input: &Job{ - Name: stringToPtr("foo"), - ID: stringToPtr("bar"), - ParentID: stringToPtr("lol"), + Name: pointerOf("foo"), + ID: pointerOf("bar"), + ParentID: pointerOf("lol"), Multiregion: &Multiregion{ Regions: []*MultiregionRegion{ { Name: "west", - Count: intToPtr(1), + Count: pointerOf(1), }, }, }, @@ -1228,49 +1228,49 @@ func TestJobs_Canonicalize(t *testing.T) { expected: &Job{ Multiregion: &Multiregion{ Strategy: &MultiregionStrategy{ - MaxParallel: intToPtr(0), - OnFailure: stringToPtr(""), + MaxParallel: pointerOf(0), + OnFailure: pointerOf(""), }, Regions: []*MultiregionRegion{ { Name: "west", - Count: intToPtr(1), + Count: pointerOf(1), Datacenters: []string{}, Meta: map[string]string{}, }, }, }, - Namespace: stringToPtr(DefaultNamespace), - ID: stringToPtr("bar"), - Name: stringToPtr("foo"), - Region: stringToPtr("global"), - Type: stringToPtr("service"), - ParentID: stringToPtr("lol"), - Priority: intToPtr(50), - AllAtOnce: boolToPtr(false), - ConsulToken: stringToPtr(""), - ConsulNamespace: stringToPtr(""), - VaultToken: stringToPtr(""), - VaultNamespace: stringToPtr(""), - NomadTokenID: stringToPtr(""), - Stop: boolToPtr(false), - Stable: boolToPtr(false), - Version: uint64ToPtr(0), - Status: stringToPtr(""), - StatusDescription: stringToPtr(""), - CreateIndex: uint64ToPtr(0), - ModifyIndex: uint64ToPtr(0), - JobModifyIndex: uint64ToPtr(0), + Namespace: pointerOf(DefaultNamespace), + ID: pointerOf("bar"), + Name: pointerOf("foo"), + Region: pointerOf("global"), + Type: pointerOf("service"), + ParentID: pointerOf("lol"), + Priority: pointerOf(50), + AllAtOnce: pointerOf(false), + ConsulToken: pointerOf(""), + ConsulNamespace: pointerOf(""), + VaultToken: pointerOf(""), + VaultNamespace: pointerOf(""), + NomadTokenID: pointerOf(""), + Stop: pointerOf(false), + Stable: pointerOf(false), + Version: pointerOf(uint64(0)), + Status: pointerOf(""), + StatusDescription: pointerOf(""), + CreateIndex: pointerOf(uint64(0)), + ModifyIndex: pointerOf(uint64(0)), + JobModifyIndex: pointerOf(uint64(0)), Update: &UpdateStrategy{ - Stagger: timeToPtr(30 * time.Second), - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - AutoRevert: boolToPtr(false), - Canary: intToPtr(0), - AutoPromote: boolToPtr(false), + Stagger: pointerOf(30 * time.Second), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + AutoRevert: pointerOf(false), + Canary: pointerOf(0), + AutoPromote: pointerOf(false), }, }, }, @@ -1360,13 +1360,13 @@ func TestJobs_Revert(t *testing.T) { assertWriteMeta(t, wm) // Fail revert at incorrect enforce - _, _, err = jobs.Revert(*job.ID, 0, uint64ToPtr(10), nil, "", "") + _, _, err = jobs.Revert(*job.ID, 0, pointerOf(uint64(10)), nil, "", "") if err == nil || !strings.Contains(err.Error(), "enforcing version") { t.Fatalf("expected enforcement error: %v", err) } // Works at correct index - revertResp, wm, err := jobs.Revert(*job.ID, 0, uint64ToPtr(1), nil, "", "") + revertResp, wm, err := jobs.Revert(*job.ID, 0, pointerOf(uint64(1)), nil, "", "") if err != nil { t.Fatalf("err: %s", err) } @@ -1446,13 +1446,13 @@ func TestJobs_ScaleInvalidAction(t *testing.T) { // Register test job job := testJob() - job.ID = stringToPtr("TestJobs_Scale") + job.ID = pointerOf("TestJobs_Scale") _, wm, err := jobs.Register(job, nil) require.NoError(err) assertWriteMeta(t, wm) // Perform a scaling action with bad group name, verify error - _, _, err = jobs.Scale(*job.ID, "incorrect-group-name", intToPtr(2), + _, _, err = jobs.Scale(*job.ID, "incorrect-group-name", pointerOf(2), "because", false, nil, nil) require.Error(err) require.Contains(err.Error(), "does not exist") @@ -1956,11 +1956,11 @@ func TestJobs_NewBatchJob(t *testing.T) { testutil.Parallel(t) job := NewBatchJob("job1", "myjob", "global", 5) expect := &Job{ - Region: stringToPtr("global"), - ID: stringToPtr("job1"), - Name: stringToPtr("myjob"), - Type: stringToPtr(JobTypeBatch), - Priority: intToPtr(5), + Region: pointerOf("global"), + ID: pointerOf("job1"), + Name: pointerOf("myjob"), + Type: pointerOf(JobTypeBatch), + Priority: pointerOf(5), } if !reflect.DeepEqual(job, expect) { t.Fatalf("expect: %#v, got: %#v", expect, job) @@ -1971,11 +1971,11 @@ func TestJobs_NewServiceJob(t *testing.T) { testutil.Parallel(t) job := NewServiceJob("job1", "myjob", "global", 5) expect := &Job{ - Region: stringToPtr("global"), - ID: stringToPtr("job1"), - Name: stringToPtr("myjob"), - Type: stringToPtr(JobTypeService), - Priority: intToPtr(5), + Region: pointerOf("global"), + ID: pointerOf("job1"), + Name: pointerOf("myjob"), + Type: pointerOf(JobTypeService), + Priority: pointerOf(5), } if !reflect.DeepEqual(job, expect) { t.Fatalf("expect: %#v, got: %#v", expect, job) @@ -1986,11 +1986,11 @@ func TestJobs_NewSystemJob(t *testing.T) { testutil.Parallel(t) job := NewSystemJob("job1", "myjob", "global", 5) expect := &Job{ - Region: stringToPtr("global"), - ID: stringToPtr("job1"), - Name: stringToPtr("myjob"), - Type: stringToPtr(JobTypeSystem), - Priority: intToPtr(5), + Region: pointerOf("global"), + ID: pointerOf("job1"), + Name: pointerOf("myjob"), + Type: pointerOf(JobTypeSystem), + Priority: pointerOf(5), } if !reflect.DeepEqual(job, expect) { t.Fatalf("expect: %#v, got: %#v", expect, job) @@ -2001,11 +2001,11 @@ func TestJobs_NewSysbatchJob(t *testing.T) { testutil.Parallel(t) job := NewSysbatchJob("job1", "myjob", "global", 5) expect := &Job{ - Region: stringToPtr("global"), - ID: stringToPtr("job1"), - Name: stringToPtr("myjob"), - Type: stringToPtr(JobTypeSysbatch), - Priority: intToPtr(5), + Region: pointerOf("global"), + ID: pointerOf("job1"), + Name: pointerOf("myjob"), + Type: pointerOf(JobTypeSysbatch), + Priority: pointerOf(5), } require.Equal(t, expect, job) } @@ -2089,13 +2089,13 @@ func TestJobs_AddAffinity(t *testing.T) { LTarget: "kernel.version", RTarget: "4.6", Operand: "=", - Weight: int8ToPtr(100), + Weight: pointerOf(int8(100)), }, { LTarget: "${node.datacenter}", RTarget: "dc2", Operand: "=", - Weight: int8ToPtr(50), + Weight: pointerOf(int8(50)), }, } if !reflect.DeepEqual(job.Affinities, expect) { @@ -2149,7 +2149,7 @@ func TestJobs_AddSpread(t *testing.T) { expect := []*Spread{ { Attribute: "${meta.rack}", - Weight: int8ToPtr(100), + Weight: pointerOf(int8(100)), SpreadTarget: []*SpreadTarget{ { Value: "r1", @@ -2159,7 +2159,7 @@ func TestJobs_AddSpread(t *testing.T) { }, { Attribute: "${node.datacenter}", - Weight: int8ToPtr(100), + Weight: pointerOf(int8(100)), SpreadTarget: []*SpreadTarget{ { Value: "dc1", @@ -2190,7 +2190,7 @@ func TestJobs_ScaleAction(t *testing.T) { newCount := origCount + 1 // Trying to scale against a target before it exists returns an error - _, _, err := jobs.Scale(id, "missing", intToPtr(newCount), "this won't work", + _, _, err := jobs.Scale(id, "missing", pointerOf(newCount), "this won't work", false, nil, nil) require.Error(err) require.Contains(err.Error(), "not found") @@ -2202,7 +2202,7 @@ func TestJobs_ScaleAction(t *testing.T) { // Perform scaling action scalingResp, wm, err := jobs.Scale(id, groupName, - intToPtr(newCount), "need more instances", false, + pointerOf(newCount), "need more instances", false, map[string]interface{}{ "meta": "data", }, nil) diff --git a/api/nodes_test.go b/api/nodes_test.go index a811826ed..6d4de0747 100644 --- a/api/nodes_test.go +++ b/api/nodes_test.go @@ -669,15 +669,15 @@ func TestNodeStatValueFormatting(t *testing.T) { }{ { "true", - StatValue{BoolVal: boolToPtr(true)}, + StatValue{BoolVal: pointerOf(true)}, }, { "false", - StatValue{BoolVal: boolToPtr(false)}, + StatValue{BoolVal: pointerOf(false)}, }, { "myvalue", - StatValue{StringVal: stringToPtr("myvalue")}, + StatValue{StringVal: pointerOf("myvalue")}, }, { "2.718", @@ -710,28 +710,28 @@ func TestNodeStatValueFormatting(t *testing.T) { { "2", StatValue{ - IntNumeratorVal: int64ToPtr(2), + IntNumeratorVal: pointerOf(int64(2)), }, }, { "2 / 3", StatValue{ - IntNumeratorVal: int64ToPtr(2), - IntDenominatorVal: int64ToPtr(3), + IntNumeratorVal: pointerOf(int64(2)), + IntDenominatorVal: pointerOf(int64(3)), }, }, { "2 MHz", StatValue{ - IntNumeratorVal: int64ToPtr(2), + IntNumeratorVal: pointerOf(int64(2)), Unit: "MHz", }, }, { "2 / 3 MHz", StatValue{ - IntNumeratorVal: int64ToPtr(2), - IntDenominatorVal: int64ToPtr(3), + IntNumeratorVal: pointerOf(int64(2)), + IntDenominatorVal: pointerOf(int64(3)), Unit: "MHz", }, }, diff --git a/api/resources.go b/api/resources.go index b5ada2d9e..43f6bbe86 100644 --- a/api/resources.go +++ b/api/resources.go @@ -38,7 +38,7 @@ func (r *Resources) Canonicalize() { // CPU will be set to the default if cores is nil above. // If cpu is nil here then cores has been set and cpu should be 0 if r.CPU == nil { - r.CPU = intToPtr(0) + r.CPU = pointerOf(0) } if r.MemoryMB == nil { @@ -55,9 +55,9 @@ func (r *Resources) Canonicalize() { // and should be kept in sync. func DefaultResources() *Resources { return &Resources{ - CPU: intToPtr(100), - Cores: intToPtr(0), - MemoryMB: intToPtr(300), + CPU: pointerOf(100), + Cores: pointerOf(0), + MemoryMB: pointerOf(300), } } @@ -68,9 +68,9 @@ func DefaultResources() *Resources { // IN nomad/structs/structs.go and should be kept in sync. func MinResources() *Resources { return &Resources{ - CPU: intToPtr(1), - Cores: intToPtr(0), - MemoryMB: intToPtr(10), + CPU: pointerOf(1), + Cores: pointerOf(0), + MemoryMB: pointerOf(10), } } @@ -268,7 +268,7 @@ type RequestedDevice struct { func (d *RequestedDevice) Canonicalize() { if d.Count == nil { - d.Count = uint64ToPtr(1) + d.Count = pointerOf(uint64(1)) } for _, a := range d.Affinities { diff --git a/api/resources_test.go b/api/resources_test.go index 2f9904ba6..de67c74a0 100644 --- a/api/resources_test.go +++ b/api/resources_test.go @@ -23,25 +23,25 @@ func TestResources_Canonicalize(t *testing.T) { { name: "cores", input: &Resources{ - Cores: intToPtr(2), - MemoryMB: intToPtr(1024), + Cores: pointerOf(2), + MemoryMB: pointerOf(1024), }, expected: &Resources{ - CPU: intToPtr(0), - Cores: intToPtr(2), - MemoryMB: intToPtr(1024), + CPU: pointerOf(0), + Cores: pointerOf(2), + MemoryMB: pointerOf(1024), }, }, { name: "cpu", input: &Resources{ - CPU: intToPtr(500), - MemoryMB: intToPtr(1024), + CPU: pointerOf(500), + MemoryMB: pointerOf(1024), }, expected: &Resources{ - CPU: intToPtr(500), - Cores: intToPtr(0), - MemoryMB: intToPtr(1024), + CPU: pointerOf(500), + Cores: pointerOf(0), + MemoryMB: pointerOf(1024), }, }, } diff --git a/api/scaling.go b/api/scaling.go index 2266c8959..32259c9f4 100644 --- a/api/scaling.go +++ b/api/scaling.go @@ -35,7 +35,7 @@ func (s *Scaling) GetPolicy(id string, q *QueryOptions) (*ScalingPolicy, *QueryM func (p *ScalingPolicy) Canonicalize(taskGroupCount int) { if p.Enabled == nil { - p.Enabled = boolToPtr(true) + p.Enabled = pointerOf(true) } if p.Min == nil { var m int64 = int64(taskGroupCount) diff --git a/api/scaling_test.go b/api/scaling_test.go index 0d4a703c6..af9dff3b5 100644 --- a/api/scaling_test.go +++ b/api/scaling_test.go @@ -24,7 +24,7 @@ func TestScalingPolicies_ListPolicies(t *testing.T) { // Register a job with a scaling policy job := testJob() job.TaskGroups[0].Scaling = &ScalingPolicy{ - Max: int64ToPtr(100), + Max: pointerOf(int64(100)), } _, _, err = jobs.Register(job, nil) require.NoError(err) @@ -77,9 +77,9 @@ func TestScalingPolicies_GetPolicy(t *testing.T) { // Register a job with a scaling policy job := testJob() policy := &ScalingPolicy{ - Enabled: boolToPtr(true), - Min: int64ToPtr(1), - Max: int64ToPtr(1), + Enabled: pointerOf(true), + Min: pointerOf(int64(1)), + Max: pointerOf(int64(1)), Policy: map[string]interface{}{ "key": "value", }, diff --git a/api/services.go b/api/services.go index 55d2b01c2..8d9b4157b 100644 --- a/api/services.go +++ b/api/services.go @@ -144,7 +144,7 @@ func (c *CheckRestart) Canonicalize() { } if c.Grace == nil { - c.Grace = timeToPtr(1 * time.Second) + c.Grace = pointerOf(1 * time.Second) } } diff --git a/api/services_test.go b/api/services_test.go index 5de82e17f..c33fa429c 100644 --- a/api/services_test.go +++ b/api/services_test.go @@ -24,8 +24,8 @@ func TestServiceRegistrations_Delete(t *testing.T) { func TestService_Canonicalize(t *testing.T) { testutil.Parallel(t) - j := &Job{Name: stringToPtr("job")} - tg := &TaskGroup{Name: stringToPtr("group")} + j := &Job{Name: pointerOf("job")} + tg := &TaskGroup{Name: pointerOf("group")} task := &Task{Name: "task"} s := &Service{ TaggedAddresses: make(map[string]string), @@ -45,8 +45,8 @@ func TestService_Canonicalize(t *testing.T) { func TestServiceCheck_Canonicalize(t *testing.T) { testutil.Parallel(t) - j := &Job{Name: stringToPtr("job")} - tg := &TaskGroup{Name: stringToPtr("group")} + j := &Job{Name: pointerOf("job")} + tg := &TaskGroup{Name: pointerOf("group")} task := &Task{Name: "task"} s := &Service{ Checks: []ServiceCheck{ @@ -64,8 +64,8 @@ func TestServiceCheck_Canonicalize(t *testing.T) { func TestService_Check_PassFail(t *testing.T) { testutil.Parallel(t) - job := &Job{Name: stringToPtr("job")} - tg := &TaskGroup{Name: stringToPtr("group")} + job := &Job{Name: pointerOf("job")} + tg := &TaskGroup{Name: pointerOf("group")} task := &Task{Name: "task"} t.Run("enforce minimums", func(t *testing.T) { @@ -100,13 +100,13 @@ func TestService_Check_PassFail(t *testing.T) { func TestService_CheckRestart(t *testing.T) { testutil.Parallel(t) - job := &Job{Name: stringToPtr("job")} - tg := &TaskGroup{Name: stringToPtr("group")} + job := &Job{Name: pointerOf("job")} + tg := &TaskGroup{Name: pointerOf("group")} task := &Task{Name: "task"} service := &Service{ CheckRestart: &CheckRestart{ Limit: 11, - Grace: timeToPtr(11 * time.Second), + Grace: pointerOf(11 * time.Second), IgnoreWarnings: true, }, Checks: []ServiceCheck{ @@ -114,7 +114,7 @@ func TestService_CheckRestart(t *testing.T) { Name: "all-set", CheckRestart: &CheckRestart{ Limit: 22, - Grace: timeToPtr(22 * time.Second), + Grace: pointerOf(22 * time.Second), IgnoreWarnings: true, }, }, @@ -122,7 +122,7 @@ func TestService_CheckRestart(t *testing.T) { Name: "some-set", CheckRestart: &CheckRestart{ Limit: 33, - Grace: timeToPtr(33 * time.Second), + Grace: pointerOf(33 * time.Second), }, }, { @@ -148,8 +148,8 @@ func TestService_CheckRestart(t *testing.T) { func TestService_Connect_proxy_settings(t *testing.T) { testutil.Parallel(t) - job := &Job{Name: stringToPtr("job")} - tg := &TaskGroup{Name: stringToPtr("group")} + job := &Job{Name: pointerOf("job")} + tg := &TaskGroup{Name: pointerOf("group")} task := &Task{Name: "task"} service := &Service{ Connect: &ConsulConnect{ @@ -183,8 +183,8 @@ func TestService_Tags(t *testing.T) { r := require.New(t) // canonicalize does not modify eto or tags - job := &Job{Name: stringToPtr("job")} - tg := &TaskGroup{Name: stringToPtr("group")} + job := &Job{Name: pointerOf("job")} + tg := &TaskGroup{Name: pointerOf("group")} task := &Task{Name: "task"} service := &Service{ Tags: []string{"a", "b"}, diff --git a/api/tasks.go b/api/tasks.go index 2f677c482..c4db6224b 100644 --- a/api/tasks.go +++ b/api/tasks.go @@ -170,13 +170,13 @@ func NewAffinity(lTarget string, operand string, rTarget string, weight int8) *A LTarget: lTarget, RTarget: rTarget, Operand: operand, - Weight: int8ToPtr(weight), + Weight: pointerOf(int8(weight)), } } func (a *Affinity) Canonicalize() { if a.Weight == nil { - a.Weight = int8ToPtr(50) + a.Weight = pointerOf(int8(50)) } } @@ -187,35 +187,35 @@ func NewDefaultReschedulePolicy(jobType string) *ReschedulePolicy { // This needs to be in sync with DefaultServiceJobReschedulePolicy // in nomad/structs/structs.go dp = &ReschedulePolicy{ - Delay: timeToPtr(30 * time.Second), - DelayFunction: stringToPtr("exponential"), - MaxDelay: timeToPtr(1 * time.Hour), - Unlimited: boolToPtr(true), + Delay: pointerOf(30 * time.Second), + DelayFunction: pointerOf("exponential"), + MaxDelay: pointerOf(1 * time.Hour), + Unlimited: pointerOf(true), - Attempts: intToPtr(0), - Interval: timeToPtr(0), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), } case "batch": // This needs to be in sync with DefaultBatchJobReschedulePolicy // in nomad/structs/structs.go dp = &ReschedulePolicy{ - Attempts: intToPtr(1), - Interval: timeToPtr(24 * time.Hour), - Delay: timeToPtr(5 * time.Second), - DelayFunction: stringToPtr("constant"), + Attempts: pointerOf(1), + Interval: pointerOf(24 * time.Hour), + Delay: pointerOf(5 * time.Second), + DelayFunction: pointerOf("constant"), - MaxDelay: timeToPtr(0), - Unlimited: boolToPtr(false), + MaxDelay: pointerOf(time.Duration(0)), + Unlimited: pointerOf(false), } case "system": dp = &ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - Delay: timeToPtr(0), - DelayFunction: stringToPtr(""), - MaxDelay: timeToPtr(0), - Unlimited: boolToPtr(false), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), + Delay: pointerOf(time.Duration(0)), + DelayFunction: pointerOf(""), + MaxDelay: pointerOf(time.Duration(0)), + Unlimited: pointerOf(false), } default: @@ -223,12 +223,12 @@ func NewDefaultReschedulePolicy(jobType string) *ReschedulePolicy { // function and we need to ensure a non-nil object is returned so that // the canonicalization runs without panicking. dp = &ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - Delay: timeToPtr(0), - DelayFunction: stringToPtr(""), - MaxDelay: timeToPtr(0), - Unlimited: boolToPtr(false), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), + Delay: pointerOf(time.Duration(0)), + DelayFunction: pointerOf(""), + MaxDelay: pointerOf(time.Duration(0)), + Unlimited: pointerOf(false), } } return dp @@ -276,14 +276,14 @@ func NewSpreadTarget(value string, percent uint8) *SpreadTarget { func NewSpread(attribute string, weight int8, spreadTargets []*SpreadTarget) *Spread { return &Spread{ Attribute: attribute, - Weight: int8ToPtr(weight), + Weight: pointerOf(int8(weight)), SpreadTarget: spreadTargets, } } func (s *Spread) Canonicalize() { if s.Weight == nil { - s.Weight = int8ToPtr(50) + s.Weight = pointerOf(int8(50)) } } @@ -296,21 +296,21 @@ type EphemeralDisk struct { func DefaultEphemeralDisk() *EphemeralDisk { return &EphemeralDisk{ - Sticky: boolToPtr(false), - Migrate: boolToPtr(false), - SizeMB: intToPtr(300), + Sticky: pointerOf(false), + Migrate: pointerOf(false), + SizeMB: pointerOf(300), } } func (e *EphemeralDisk) Canonicalize() { if e.Sticky == nil { - e.Sticky = boolToPtr(false) + e.Sticky = pointerOf(false) } if e.Migrate == nil { - e.Migrate = boolToPtr(false) + e.Migrate = pointerOf(false) } if e.SizeMB == nil { - e.SizeMB = intToPtr(300) + e.SizeMB = pointerOf(300) } } @@ -325,10 +325,10 @@ type MigrateStrategy struct { func DefaultMigrateStrategy() *MigrateStrategy { return &MigrateStrategy{ - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), } } @@ -405,10 +405,10 @@ type VolumeMount struct { func (vm *VolumeMount) Canonicalize() { if vm.PropagationMode == nil { - vm.PropagationMode = stringToPtr(VolumeMountPropagationPrivate) + vm.PropagationMode = pointerOf(VolumeMountPropagationPrivate) } if vm.ReadOnly == nil { - vm.ReadOnly = boolToPtr(false) + vm.ReadOnly = pointerOf(false) } } @@ -439,22 +439,22 @@ type TaskGroup struct { // NewTaskGroup creates a new TaskGroup. func NewTaskGroup(name string, count int) *TaskGroup { return &TaskGroup{ - Name: stringToPtr(name), - Count: intToPtr(count), + Name: pointerOf(name), + Count: pointerOf(count), } } // Canonicalize sets defaults and merges settings that should be inherited from the job func (g *TaskGroup) Canonicalize(job *Job) { if g.Name == nil { - g.Name = stringToPtr("") + g.Name = pointerOf("") } if g.Count == nil { if g.Scaling != nil && g.Scaling.Min != nil { - g.Count = intToPtr(int(*g.Scaling.Min)) + g.Count = pointerOf(int(*g.Scaling.Min)) } else { - g.Count = intToPtr(1) + g.Count = pointerOf(1) } } if g.Scaling != nil { @@ -558,10 +558,10 @@ func (g *TaskGroup) Canonicalize(job *Job) { // in nomad/structs/structs.go func defaultServiceJobRestartPolicy() *RestartPolicy { return &RestartPolicy{ - Delay: timeToPtr(15 * time.Second), - Attempts: intToPtr(2), - Interval: timeToPtr(30 * time.Minute), - Mode: stringToPtr(RestartPolicyModeFail), + Delay: pointerOf(15 * time.Second), + Attempts: pointerOf(2), + Interval: pointerOf(30 * time.Minute), + Mode: pointerOf(RestartPolicyModeFail), } } @@ -569,10 +569,10 @@ func defaultServiceJobRestartPolicy() *RestartPolicy { // in nomad/structs/structs.go func defaultBatchJobRestartPolicy() *RestartPolicy { return &RestartPolicy{ - Delay: timeToPtr(15 * time.Second), - Attempts: intToPtr(3), - Interval: timeToPtr(24 * time.Hour), - Mode: stringToPtr(RestartPolicyModeFail), + Delay: pointerOf(15 * time.Second), + Attempts: pointerOf(3), + Interval: pointerOf(24 * time.Hour), + Mode: pointerOf(RestartPolicyModeFail), } } @@ -623,17 +623,17 @@ type LogConfig struct { func DefaultLogConfig() *LogConfig { return &LogConfig{ - MaxFiles: intToPtr(10), - MaxFileSizeMB: intToPtr(10), + MaxFiles: pointerOf(10), + MaxFileSizeMB: pointerOf(10), } } func (l *LogConfig) Canonicalize() { if l.MaxFiles == nil { - l.MaxFiles = intToPtr(10) + l.MaxFiles = pointerOf(10) } if l.MaxFileSizeMB == nil { - l.MaxFileSizeMB = intToPtr(10) + l.MaxFileSizeMB = pointerOf(10) } } @@ -694,7 +694,7 @@ func (t *Task) Canonicalize(tg *TaskGroup, job *Job) { t.Resources.Canonicalize() if t.KillTimeout == nil { - t.KillTimeout = timeToPtr(5 * time.Second) + t.KillTimeout = pointerOf(5 * time.Second) } if t.LogConfig == nil { t.LogConfig = DefaultLogConfig() @@ -746,11 +746,11 @@ type TaskArtifact struct { func (a *TaskArtifact) Canonicalize() { if a.GetterMode == nil { - a.GetterMode = stringToPtr("any") + a.GetterMode = pointerOf("any") } if a.GetterSource == nil { // Shouldn't be possible, but we don't want to panic - a.GetterSource = stringToPtr("") + a.GetterSource = pointerOf("") } if len(a.GetterOptions) == 0 { a.GetterOptions = nil @@ -768,7 +768,7 @@ func (a *TaskArtifact) Canonicalize() { a.RelativeDest = &dest default: // Default to a directory - a.RelativeDest = stringToPtr("local/") + a.RelativeDest = pointerOf("local/") } } } @@ -810,52 +810,52 @@ type Template struct { func (tmpl *Template) Canonicalize() { if tmpl.SourcePath == nil { - tmpl.SourcePath = stringToPtr("") + tmpl.SourcePath = pointerOf("") } if tmpl.DestPath == nil { - tmpl.DestPath = stringToPtr("") + tmpl.DestPath = pointerOf("") } if tmpl.EmbeddedTmpl == nil { - tmpl.EmbeddedTmpl = stringToPtr("") + tmpl.EmbeddedTmpl = pointerOf("") } if tmpl.ChangeMode == nil { - tmpl.ChangeMode = stringToPtr("restart") + tmpl.ChangeMode = pointerOf("restart") } if tmpl.ChangeSignal == nil { if *tmpl.ChangeMode == "signal" { - tmpl.ChangeSignal = stringToPtr("SIGHUP") + tmpl.ChangeSignal = pointerOf("SIGHUP") } else { - tmpl.ChangeSignal = stringToPtr("") + tmpl.ChangeSignal = pointerOf("") } } else { sig := *tmpl.ChangeSignal - tmpl.ChangeSignal = stringToPtr(strings.ToUpper(sig)) + tmpl.ChangeSignal = pointerOf(strings.ToUpper(sig)) } if tmpl.Splay == nil { - tmpl.Splay = timeToPtr(5 * time.Second) + tmpl.Splay = pointerOf(5 * time.Second) } if tmpl.Perms == nil { - tmpl.Perms = stringToPtr("0644") + tmpl.Perms = pointerOf("0644") } if tmpl.Uid == nil { - tmpl.Uid = intToPtr(-1) + tmpl.Uid = pointerOf(-1) } if tmpl.Gid == nil { - tmpl.Gid = intToPtr(-1) + tmpl.Gid = pointerOf(-1) } if tmpl.LeftDelim == nil { - tmpl.LeftDelim = stringToPtr("{{") + tmpl.LeftDelim = pointerOf("{{") } if tmpl.RightDelim == nil { - tmpl.RightDelim = stringToPtr("}}") + tmpl.RightDelim = pointerOf("}}") } if tmpl.Envvars == nil { - tmpl.Envvars = boolToPtr(false) + tmpl.Envvars = pointerOf(false) } //COMPAT(0.12) VaultGrace is deprecated and unused as of Vault 0.5 if tmpl.VaultGrace == nil { - tmpl.VaultGrace = timeToPtr(0) + tmpl.VaultGrace = pointerOf(time.Duration(0)) } } @@ -869,16 +869,16 @@ type Vault struct { func (v *Vault) Canonicalize() { if v.Env == nil { - v.Env = boolToPtr(true) + v.Env = pointerOf(true) } if v.Namespace == nil { - v.Namespace = stringToPtr("") + v.Namespace = pointerOf("") } if v.ChangeMode == nil { - v.ChangeMode = stringToPtr("restart") + v.ChangeMode = pointerOf("restart") } if v.ChangeSignal == nil { - v.ChangeSignal = stringToPtr("SIGHUP") + v.ChangeSignal = pointerOf("SIGHUP") } } diff --git a/api/tasks_test.go b/api/tasks_test.go index 02e20506a..b0312263d 100644 --- a/api/tasks_test.go +++ b/api/tasks_test.go @@ -15,8 +15,8 @@ func TestTaskGroup_NewTaskGroup(t *testing.T) { testutil.Parallel(t) grp := NewTaskGroup("grp1", 2) expect := &TaskGroup{ - Name: stringToPtr("grp1"), - Count: intToPtr(2), + Name: pointerOf("grp1"), + Count: pointerOf(2), } if !reflect.DeepEqual(grp, expect) { t.Fatalf("expect: %#v, got: %#v", expect, grp) @@ -79,13 +79,13 @@ func TestTaskGroup_AddAffinity(t *testing.T) { LTarget: "kernel.version", RTarget: "4.6", Operand: "=", - Weight: int8ToPtr(100), + Weight: pointerOf(int8(100)), }, { LTarget: "${node.affinity}", RTarget: "dc2", Operand: "=", - Weight: int8ToPtr(50), + Weight: pointerOf(int8(50)), }, } if !reflect.DeepEqual(grp.Affinities, expect) { @@ -143,7 +143,7 @@ func TestTaskGroup_AddSpread(t *testing.T) { expect := []*Spread{ { Attribute: "${meta.rack}", - Weight: int8ToPtr(100), + Weight: pointerOf(int8(100)), SpreadTarget: []*SpreadTarget{ { Value: "r1", @@ -153,7 +153,7 @@ func TestTaskGroup_AddSpread(t *testing.T) { }, { Attribute: "${node.datacenter}", - Weight: int8ToPtr(100), + Weight: pointerOf(int8(100)), SpreadTarget: []*SpreadTarget{ { Value: "dc1", @@ -263,13 +263,13 @@ func TestTask_Require(t *testing.T) { // Create some require resources resources := &Resources{ - CPU: intToPtr(1250), - MemoryMB: intToPtr(128), - DiskMB: intToPtr(2048), + CPU: pointerOf(1250), + MemoryMB: pointerOf(128), + DiskMB: pointerOf(2048), Networks: []*NetworkResource{ { CIDR: "0.0.0.0/0", - MBits: intToPtr(100), + MBits: pointerOf(100), ReservedPorts: []Port{{"", 80, 0, ""}, {"", 443, 0, ""}}, }, }, @@ -340,13 +340,13 @@ func TestTask_AddAffinity(t *testing.T) { LTarget: "kernel.version", RTarget: "4.6", Operand: "=", - Weight: int8ToPtr(100), + Weight: pointerOf(int8(100)), }, { LTarget: "${node.datacenter}", RTarget: "dc2", Operand: "=", - Weight: int8ToPtr(50), + Weight: pointerOf(int8(50)), }, } if !reflect.DeepEqual(task.Affinities, expect) { @@ -357,8 +357,8 @@ func TestTask_AddAffinity(t *testing.T) { func TestTask_Artifact(t *testing.T) { testutil.Parallel(t) a := TaskArtifact{ - GetterSource: stringToPtr("http://localhost/foo.txt"), - GetterMode: stringToPtr("file"), + GetterSource: pointerOf("http://localhost/foo.txt"), + GetterMode: pointerOf("file"), GetterHeaders: make(map[string]string), GetterOptions: make(map[string]string), } @@ -396,10 +396,10 @@ func TestTask_Canonicalize_TaskLifecycle(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { tg := &TaskGroup{ - Name: stringToPtr("foo"), + Name: pointerOf("foo"), } j := &Job{ - ID: stringToPtr("test"), + ID: pointerOf("test"), } tc.task.Canonicalize(tg, j) require.Equal(t, tc.expected, tc.task.Lifecycle) @@ -429,16 +429,16 @@ func TestTask_Template_WaitConfig_Canonicalize_and_Copy(t *testing.T) { { name: "all-fields", task: taskWithWait(&WaitConfig{ - Min: timeToPtr(5), - Max: timeToPtr(10), + Min: pointerOf(time.Duration(5)), + Max: pointerOf(time.Duration(10)), }), canonicalized: &WaitConfig{ - Min: timeToPtr(5), - Max: timeToPtr(10), + Min: pointerOf(time.Duration(5)), + Max: pointerOf(time.Duration(10)), }, copied: &WaitConfig{ - Min: timeToPtr(5), - Max: timeToPtr(10), + Min: pointerOf(time.Duration(5)), + Max: pointerOf(time.Duration(10)), }, }, { @@ -456,25 +456,25 @@ func TestTask_Template_WaitConfig_Canonicalize_and_Copy(t *testing.T) { { name: "min-only", task: taskWithWait(&WaitConfig{ - Min: timeToPtr(5), + Min: pointerOf(time.Duration(5)), }), canonicalized: &WaitConfig{ - Min: timeToPtr(5), + Min: pointerOf(time.Duration(5)), }, copied: &WaitConfig{ - Min: timeToPtr(5), + Min: pointerOf(time.Duration(5)), }, }, { name: "max-only", task: taskWithWait(&WaitConfig{ - Max: timeToPtr(10), + Max: pointerOf(time.Duration(10)), }), canonicalized: &WaitConfig{ - Max: timeToPtr(10), + Max: pointerOf(time.Duration(10)), }, copied: &WaitConfig{ - Max: timeToPtr(10), + Max: pointerOf(time.Duration(10)), }, }, } @@ -482,10 +482,10 @@ func TestTask_Template_WaitConfig_Canonicalize_and_Copy(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { tg := &TaskGroup{ - Name: stringToPtr("foo"), + Name: pointerOf("foo"), } j := &Job{ - ID: stringToPtr("test"), + ID: pointerOf("test"), } require.Equal(t, tc.copied, tc.task.Templates[0].Wait.Copy()) tc.task.Canonicalize(tg, j) @@ -504,10 +504,10 @@ func TestTask_Canonicalize_Vault(t *testing.T) { name: "empty", input: &Vault{}, expected: &Vault{ - Env: boolToPtr(true), - Namespace: stringToPtr(""), - ChangeMode: stringToPtr("restart"), - ChangeSignal: stringToPtr("SIGHUP"), + Env: pointerOf(true), + Namespace: pointerOf(""), + ChangeMode: pointerOf("restart"), + ChangeSignal: pointerOf("SIGHUP"), }, }, } @@ -525,22 +525,22 @@ func TestTaskGroup_Canonicalize_Update(t *testing.T) { testutil.Parallel(t) // Job with an Empty() Update job := &Job{ - ID: stringToPtr("test"), + ID: pointerOf("test"), Update: &UpdateStrategy{ - AutoRevert: boolToPtr(false), - AutoPromote: boolToPtr(false), - Canary: intToPtr(0), - HealthCheck: stringToPtr(""), - HealthyDeadline: timeToPtr(0), - ProgressDeadline: timeToPtr(0), - MaxParallel: intToPtr(0), - MinHealthyTime: timeToPtr(0), - Stagger: timeToPtr(0), + AutoRevert: pointerOf(false), + AutoPromote: pointerOf(false), + Canary: pointerOf(0), + HealthCheck: pointerOf(""), + HealthyDeadline: pointerOf(time.Duration(0)), + ProgressDeadline: pointerOf(time.Duration(0)), + MaxParallel: pointerOf(0), + MinHealthyTime: pointerOf(time.Duration(0)), + Stagger: pointerOf(time.Duration(0)), }, } job.Canonicalize() tg := &TaskGroup{ - Name: stringToPtr("foo"), + Name: pointerOf("foo"), } tg.Canonicalize(job) assert.NotNil(t, job.Update) @@ -552,15 +552,15 @@ func TestTaskGroup_Canonicalize_Scaling(t *testing.T) { require := require.New(t) job := &Job{ - ID: stringToPtr("test"), + ID: pointerOf("test"), } job.Canonicalize() tg := &TaskGroup{ - Name: stringToPtr("foo"), + Name: pointerOf("foo"), Count: nil, Scaling: &ScalingPolicy{ Min: nil, - Max: int64ToPtr(10), + Max: pointerOf(int64(10)), Policy: nil, Enabled: nil, CreateIndex: 0, @@ -578,7 +578,7 @@ func TestTaskGroup_Canonicalize_Scaling(t *testing.T) { // count == nil => count = Scaling.Min tg.Count = nil - tg.Scaling.Min = int64ToPtr(5) + tg.Scaling.Min = pointerOf(int64(5)) tg.Canonicalize(job) require.NotNil(tg.Count) require.NotNil(tg.Scaling.Min) @@ -586,7 +586,7 @@ func TestTaskGroup_Canonicalize_Scaling(t *testing.T) { require.EqualValues(*tg.Count, *tg.Scaling.Min) // Scaling.Min == nil => Scaling.Min == count - tg.Count = intToPtr(5) + tg.Count = pointerOf(5) tg.Scaling.Min = nil tg.Canonicalize(job) require.NotNil(tg.Count) @@ -595,8 +595,8 @@ func TestTaskGroup_Canonicalize_Scaling(t *testing.T) { require.EqualValues(*tg.Scaling.Min, *tg.Count) // both present, both persisted - tg.Count = intToPtr(5) - tg.Scaling.Min = int64ToPtr(1) + tg.Count = pointerOf(5) + tg.Scaling.Min = pointerOf(int64(1)) tg.Canonicalize(job) require.NotNil(tg.Count) require.NotNil(tg.Scaling.Min) @@ -607,32 +607,32 @@ func TestTaskGroup_Canonicalize_Scaling(t *testing.T) { func TestTaskGroup_Merge_Update(t *testing.T) { testutil.Parallel(t) job := &Job{ - ID: stringToPtr("test"), + ID: pointerOf("test"), Update: &UpdateStrategy{}, } job.Canonicalize() // Merge and canonicalize part of an update stanza tg := &TaskGroup{ - Name: stringToPtr("foo"), + Name: pointerOf("foo"), Update: &UpdateStrategy{ - AutoRevert: boolToPtr(true), - Canary: intToPtr(5), - HealthCheck: stringToPtr("foo"), + AutoRevert: pointerOf(true), + Canary: pointerOf(5), + HealthCheck: pointerOf("foo"), }, } tg.Canonicalize(job) require.Equal(t, &UpdateStrategy{ - AutoRevert: boolToPtr(true), - AutoPromote: boolToPtr(false), - Canary: intToPtr(5), - HealthCheck: stringToPtr("foo"), - HealthyDeadline: timeToPtr(5 * time.Minute), - ProgressDeadline: timeToPtr(10 * time.Minute), - MaxParallel: intToPtr(1), - MinHealthyTime: timeToPtr(10 * time.Second), - Stagger: timeToPtr(30 * time.Second), + AutoRevert: pointerOf(true), + AutoPromote: pointerOf(false), + Canary: pointerOf(5), + HealthCheck: pointerOf("foo"), + HealthyDeadline: pointerOf(5 * time.Minute), + ProgressDeadline: pointerOf(10 * time.Minute), + MaxParallel: pointerOf(1), + MinHealthyTime: pointerOf(10 * time.Second), + Stagger: pointerOf(30 * time.Second), }, tg.Update) } @@ -661,44 +661,44 @@ func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) { jobMigrate: nil, taskMigrate: nil, expected: &MigrateStrategy{ - MaxParallel: intToPtr(1), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), + MaxParallel: pointerOf(1), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), }, }, { desc: "Empty job migrate strategy", jobType: "service", jobMigrate: &MigrateStrategy{ - MaxParallel: intToPtr(0), - HealthCheck: stringToPtr(""), - MinHealthyTime: timeToPtr(0), - HealthyDeadline: timeToPtr(0), + MaxParallel: pointerOf(0), + HealthCheck: pointerOf(""), + MinHealthyTime: pointerOf(time.Duration(0)), + HealthyDeadline: pointerOf(time.Duration(0)), }, taskMigrate: nil, expected: &MigrateStrategy{ - MaxParallel: intToPtr(0), - HealthCheck: stringToPtr(""), - MinHealthyTime: timeToPtr(0), - HealthyDeadline: timeToPtr(0), + MaxParallel: pointerOf(0), + HealthCheck: pointerOf(""), + MinHealthyTime: pointerOf(time.Duration(0)), + HealthyDeadline: pointerOf(time.Duration(0)), }, }, { desc: "Inherit from job", jobType: "service", jobMigrate: &MigrateStrategy{ - MaxParallel: intToPtr(3), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(2), - HealthyDeadline: timeToPtr(2), + MaxParallel: pointerOf(3), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(time.Duration(2)), + HealthyDeadline: pointerOf(time.Duration(2)), }, taskMigrate: nil, expected: &MigrateStrategy{ - MaxParallel: intToPtr(3), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(2), - HealthyDeadline: timeToPtr(2), + MaxParallel: pointerOf(3), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(time.Duration(2)), + HealthyDeadline: pointerOf(time.Duration(2)), }, }, { @@ -706,67 +706,67 @@ func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) { jobType: "service", jobMigrate: nil, taskMigrate: &MigrateStrategy{ - MaxParallel: intToPtr(3), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(2), - HealthyDeadline: timeToPtr(2), + MaxParallel: pointerOf(3), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(time.Duration(2)), + HealthyDeadline: pointerOf(time.Duration(2)), }, expected: &MigrateStrategy{ - MaxParallel: intToPtr(3), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(2), - HealthyDeadline: timeToPtr(2), + MaxParallel: pointerOf(3), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(time.Duration(2)), + HealthyDeadline: pointerOf(time.Duration(2)), }, }, { desc: "Merge from job", jobType: "service", jobMigrate: &MigrateStrategy{ - MaxParallel: intToPtr(11), + MaxParallel: pointerOf(11), }, taskMigrate: &MigrateStrategy{ - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(2), - HealthyDeadline: timeToPtr(2), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(time.Duration(2)), + HealthyDeadline: pointerOf(time.Duration(2)), }, expected: &MigrateStrategy{ - MaxParallel: intToPtr(11), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(2), - HealthyDeadline: timeToPtr(2), + MaxParallel: pointerOf(11), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(time.Duration(2)), + HealthyDeadline: pointerOf(time.Duration(2)), }, }, { desc: "Override from group", jobType: "service", jobMigrate: &MigrateStrategy{ - MaxParallel: intToPtr(11), + MaxParallel: pointerOf(11), }, taskMigrate: &MigrateStrategy{ - MaxParallel: intToPtr(5), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(2), - HealthyDeadline: timeToPtr(2), + MaxParallel: pointerOf(5), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(time.Duration(2)), + HealthyDeadline: pointerOf(time.Duration(2)), }, expected: &MigrateStrategy{ - MaxParallel: intToPtr(5), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(2), - HealthyDeadline: timeToPtr(2), + MaxParallel: pointerOf(5), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(time.Duration(2)), + HealthyDeadline: pointerOf(time.Duration(2)), }, }, { desc: "Parallel from job, defaulting", jobType: "service", jobMigrate: &MigrateStrategy{ - MaxParallel: intToPtr(5), + MaxParallel: pointerOf(5), }, taskMigrate: nil, expected: &MigrateStrategy{ - MaxParallel: intToPtr(5), - HealthCheck: stringToPtr("checks"), - MinHealthyTime: timeToPtr(10 * time.Second), - HealthyDeadline: timeToPtr(5 * time.Minute), + MaxParallel: pointerOf(5), + HealthCheck: pointerOf("checks"), + MinHealthyTime: pointerOf(10 * time.Second), + HealthyDeadline: pointerOf(5 * time.Minute), }, }, } @@ -774,13 +774,13 @@ func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) { for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { job := &Job{ - ID: stringToPtr("test"), + ID: pointerOf("test"), Migrate: tc.jobMigrate, - Type: stringToPtr(tc.jobType), + Type: pointerOf(tc.jobType), } job.Canonicalize() tg := &TaskGroup{ - Name: stringToPtr("foo"), + Name: pointerOf("foo"), Migrate: tc.taskMigrate, } tg.Canonicalize(job) @@ -793,12 +793,12 @@ func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) { func TestSpread_Canonicalize(t *testing.T) { testutil.Parallel(t) job := &Job{ - ID: stringToPtr("test"), - Type: stringToPtr("batch"), + ID: pointerOf("test"), + Type: pointerOf("batch"), } job.Canonicalize() tg := &TaskGroup{ - Name: stringToPtr("foo"), + Name: pointerOf("foo"), } type testCase struct { desc string @@ -818,7 +818,7 @@ func TestSpread_Canonicalize(t *testing.T) { "Zero spread", &Spread{ Attribute: "test", - Weight: int8ToPtr(0), + Weight: pointerOf(int8(0)), }, 0, }, @@ -826,7 +826,7 @@ func TestSpread_Canonicalize(t *testing.T) { "Non Zero spread", &Spread{ Attribute: "test", - Weight: int8ToPtr(100), + Weight: pointerOf(int8(100)), }, 100, }, @@ -854,48 +854,48 @@ func Test_NewDefaultReschedulePolicy(t *testing.T) { desc: "service job type", inputJobType: "service", expected: &ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - Delay: timeToPtr(30 * time.Second), - DelayFunction: stringToPtr("exponential"), - MaxDelay: timeToPtr(1 * time.Hour), - Unlimited: boolToPtr(true), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), + Delay: pointerOf(30 * time.Second), + DelayFunction: pointerOf("exponential"), + MaxDelay: pointerOf(1 * time.Hour), + Unlimited: pointerOf(true), }, }, { desc: "batch job type", inputJobType: "batch", expected: &ReschedulePolicy{ - Attempts: intToPtr(1), - Interval: timeToPtr(24 * time.Hour), - Delay: timeToPtr(5 * time.Second), - DelayFunction: stringToPtr("constant"), - MaxDelay: timeToPtr(0), - Unlimited: boolToPtr(false), + Attempts: pointerOf(1), + Interval: pointerOf(24 * time.Hour), + Delay: pointerOf(5 * time.Second), + DelayFunction: pointerOf("constant"), + MaxDelay: pointerOf(time.Duration(0)), + Unlimited: pointerOf(false), }, }, { desc: "system job type", inputJobType: "system", expected: &ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - Delay: timeToPtr(0), - DelayFunction: stringToPtr(""), - MaxDelay: timeToPtr(0), - Unlimited: boolToPtr(false), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), + Delay: pointerOf(time.Duration(0)), + DelayFunction: pointerOf(""), + MaxDelay: pointerOf(time.Duration(0)), + Unlimited: pointerOf(false), }, }, { desc: "unrecognised job type", inputJobType: "unrecognised", expected: &ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - Delay: timeToPtr(0), - DelayFunction: stringToPtr(""), - MaxDelay: timeToPtr(0), - Unlimited: boolToPtr(false), + Attempts: pointerOf(0), + Interval: pointerOf(time.Duration(0)), + Delay: pointerOf(time.Duration(0)), + DelayFunction: pointerOf(""), + MaxDelay: pointerOf(time.Duration(0)), + Unlimited: pointerOf(false), }, }, } @@ -912,13 +912,13 @@ func TestTaskGroup_Canonicalize_Consul(t *testing.T) { testutil.Parallel(t) t.Run("override job consul in group", func(t *testing.T) { job := &Job{ - ID: stringToPtr("job"), - ConsulNamespace: stringToPtr("ns1"), + ID: pointerOf("job"), + ConsulNamespace: pointerOf("ns1"), } job.Canonicalize() tg := &TaskGroup{ - Name: stringToPtr("group"), + Name: pointerOf("group"), Consul: &Consul{Namespace: "ns2"}, } tg.Canonicalize(job) @@ -929,13 +929,13 @@ func TestTaskGroup_Canonicalize_Consul(t *testing.T) { t.Run("inherit job consul in group", func(t *testing.T) { job := &Job{ - ID: stringToPtr("job"), - ConsulNamespace: stringToPtr("ns1"), + ID: pointerOf("job"), + ConsulNamespace: pointerOf("ns1"), } job.Canonicalize() tg := &TaskGroup{ - Name: stringToPtr("group"), + Name: pointerOf("group"), Consul: nil, // not set, inherit from job } tg.Canonicalize(job) @@ -946,13 +946,13 @@ func TestTaskGroup_Canonicalize_Consul(t *testing.T) { t.Run("set in group only", func(t *testing.T) { job := &Job{ - ID: stringToPtr("job"), + ID: pointerOf("job"), ConsulNamespace: nil, } job.Canonicalize() tg := &TaskGroup{ - Name: stringToPtr("group"), + Name: pointerOf("group"), Consul: &Consul{Namespace: "ns2"}, } tg.Canonicalize(job) diff --git a/api/util_test.go b/api/util_test.go index 7e5f2e1b5..efaecdf49 100644 --- a/api/util_test.go +++ b/api/util_test.go @@ -27,18 +27,18 @@ func testJob() *Job { task := NewTask("task1", "raw_exec"). SetConfig("command", "/bin/sleep"). Require(&Resources{ - CPU: intToPtr(100), - MemoryMB: intToPtr(256), + CPU: pointerOf(100), + MemoryMB: pointerOf(256), }). SetLogConfig(&LogConfig{ - MaxFiles: intToPtr(1), - MaxFileSizeMB: intToPtr(2), + MaxFiles: pointerOf(1), + MaxFileSizeMB: pointerOf(2), }) group := NewTaskGroup("group1", 1). AddTask(task). RequireDisk(&EphemeralDisk{ - SizeMB: intToPtr(25), + SizeMB: pointerOf(25), }) job := NewBatchJob("job1", "redis", "global", 1). @@ -52,18 +52,18 @@ func testJobWithScalingPolicy() *Job { job := testJob() job.TaskGroups[0].Scaling = &ScalingPolicy{ Policy: map[string]interface{}{}, - Min: int64ToPtr(1), - Max: int64ToPtr(5), - Enabled: boolToPtr(true), + Min: pointerOf(int64(1)), + Max: pointerOf(int64(5)), + Enabled: pointerOf(true), } return job } func testPeriodicJob() *Job { job := testJob().AddPeriodicConfig(&PeriodicConfig{ - Enabled: boolToPtr(true), - Spec: stringToPtr("*/30 * * * *"), - SpecType: stringToPtr("cron"), + Enabled: pointerOf(true), + Spec: pointerOf("*/30 * * * *"), + SpecType: pointerOf("cron"), }) return job } @@ -109,8 +109,8 @@ func testQuotaSpec() *QuotaSpec { { Region: "global", RegionLimit: &Resources{ - CPU: intToPtr(2000), - MemoryMB: intToPtr(2000), + CPU: pointerOf(2000), + MemoryMB: pointerOf(2000), }, }, }, diff --git a/api/utils.go b/api/utils.go index 9e54306f6..a8e1c02e4 100644 --- a/api/utils.go +++ b/api/utils.go @@ -3,44 +3,8 @@ package api import ( "strconv" "strings" - "time" ) -// boolToPtr returns the pointer to a boolean -func boolToPtr(b bool) *bool { - return &b -} - -// int8ToPtr returns the pointer to an int8 -func int8ToPtr(i int8) *int8 { - return &i -} - -// intToPtr returns the pointer to an int -func intToPtr(i int) *int { - return &i -} - -// uint64ToPtr returns the pointer to an uint64 -func uint64ToPtr(u uint64) *uint64 { - return &u -} - -// int64ToPtr returns the pointer to a int64 -func int64ToPtr(i int64) *int64 { - return &i -} - -// stringToPtr returns the pointer to a string -func stringToPtr(str string) *string { - return &str -} - -// timeToPtr returns the pointer to a time stamp -func timeToPtr(t time.Duration) *time.Duration { - return &t -} - // formatFloat converts the floating-point number f to a string, // after rounding it to the passed unit. // @@ -61,3 +25,8 @@ func formatFloat(f float64, maxPrec int) string { return v[:sublen] } + +// pointerOf returns a pointer to a. +func pointerOf[A any](a A) *A { + return &a +} diff --git a/api/utils_test.go b/api/utils_test.go index 7e0d789bd..969b1562b 100644 --- a/api/utils_test.go +++ b/api/utils_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/hashicorp/nomad/api/internal/testutil" + "github.com/shoenig/test/must" "github.com/stretchr/testify/require" ) @@ -39,3 +40,14 @@ func TestFormatRoundedFloat(t *testing.T) { require.Equal(t, c.expected, formatFloat(c.input, 3)) } } + +func Test_PointerOf(t *testing.T) { + s := "hello" + sPtr := pointerOf(s) + + must.Eq(t, s, *sPtr) + + b := "bye" + sPtr = &b + must.NotEq(t, s, *sPtr) +} diff --git a/client/agent_endpoint.go b/client/agent_endpoint.go index 12993c498..f7a1c4013 100644 --- a/client/agent_endpoint.go +++ b/client/agent_endpoint.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/nomad/command/agent/host" "github.com/hashicorp/nomad/command/agent/monitor" "github.com/hashicorp/nomad/command/agent/pprof" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs" metrics "github.com/armon/go-metrics" @@ -89,16 +89,16 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) { encoder := codec.NewEncoder(conn, structs.MsgpackHandle) if err := decoder.Decode(&args); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } // Check acl if aclObj, err := a.c.ResolveToken(args.AuthToken); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(403), encoder) + handleStreamResultError(err, pointer.Of(int64(403)), encoder) return } else if aclObj != nil && !aclObj.AllowAgentRead() { - handleStreamResultError(structs.ErrPermissionDenied, helper.Int64ToPtr(403), encoder) + handleStreamResultError(structs.ErrPermissionDenied, pointer.Of(int64(403)), encoder) return } @@ -108,7 +108,7 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) { } if logLevel == log.NoLevel { - handleStreamResultError(errors.New("Unknown log level"), helper.Int64ToPtr(400), encoder) + handleStreamResultError(errors.New("Unknown log level"), pointer.Of(int64(400)), encoder) return } @@ -206,7 +206,7 @@ OUTER: } if streamErr != nil { - handleStreamResultError(streamErr, helper.Int64ToPtr(500), encoder) + handleStreamResultError(streamErr, pointer.Of(int64(500)), encoder) return } } diff --git a/client/alloc_endpoint.go b/client/alloc_endpoint.go index a0bfc3920..2be7dfb41 100644 --- a/client/alloc_endpoint.go +++ b/client/alloc_endpoint.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/acl" cstructs "github.com/hashicorp/nomad/client/structs" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" nstructs "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/drivers" @@ -183,7 +183,7 @@ func (a *Allocations) execImpl(encoder *codec.Encoder, decoder *codec.Decoder, e // Decode the arguments var req cstructs.AllocExecRequest if err := decoder.Decode(&req); err != nil { - return helper.Int64ToPtr(500), err + return pointer.Of(int64(500)), err } if a.c.GetConfig().DisableRemoteExec { @@ -191,13 +191,13 @@ func (a *Allocations) execImpl(encoder *codec.Encoder, decoder *codec.Decoder, e } if req.AllocID == "" { - return helper.Int64ToPtr(400), allocIDNotPresentErr + return pointer.Of(int64(400)), allocIDNotPresentErr } ar, err := a.c.getAllocRunner(req.AllocID) if err != nil { - code := helper.Int64ToPtr(500) + code := pointer.Of(int64(500)) if nstructs.IsErrUnknownAllocation(err) { - code = helper.Int64ToPtr(404) + code = pointer.Of(int64(404)) } return code, err @@ -232,17 +232,17 @@ func (a *Allocations) execImpl(encoder *codec.Encoder, decoder *codec.Decoder, e // Validate the arguments if req.Task == "" { - return helper.Int64ToPtr(400), taskNotPresentErr + return pointer.Of(int64(400)), taskNotPresentErr } if len(req.Cmd) == 0 { - return helper.Int64ToPtr(400), errors.New("command is not present") + return pointer.Of(int64(400)), errors.New("command is not present") } capabilities, err := ar.GetTaskDriverCapabilities(req.Task) if err != nil { - code := helper.Int64ToPtr(500) + code := pointer.Of(int64(500)) if nstructs.IsErrUnknownAllocation(err) { - code = helper.Int64ToPtr(404) + code = pointer.Of(int64(404)) } return code, err @@ -258,9 +258,9 @@ func (a *Allocations) execImpl(encoder *codec.Encoder, decoder *codec.Decoder, e allocState, err := a.c.GetAllocState(req.AllocID) if err != nil { - code := helper.Int64ToPtr(500) + code := pointer.Of(int64(500)) if nstructs.IsErrUnknownAllocation(err) { - code = helper.Int64ToPtr(404) + code = pointer.Of(int64(404)) } return code, err @@ -269,11 +269,11 @@ func (a *Allocations) execImpl(encoder *codec.Encoder, decoder *codec.Decoder, e // Check that the task is there taskState := allocState.TaskStates[req.Task] if taskState == nil { - return helper.Int64ToPtr(400), fmt.Errorf("unknown task name %q", req.Task) + return pointer.Of(int64(400)), fmt.Errorf("unknown task name %q", req.Task) } if taskState.StartedAt.IsZero() { - return helper.Int64ToPtr(404), fmt.Errorf("task %q not started yet.", req.Task) + return pointer.Of(int64(404)), fmt.Errorf("task %q not started yet.", req.Task) } ctx, cancel := context.WithCancel(context.Background()) @@ -281,12 +281,12 @@ func (a *Allocations) execImpl(encoder *codec.Encoder, decoder *codec.Decoder, e h := ar.GetTaskExecHandler(req.Task) if h == nil { - return helper.Int64ToPtr(404), fmt.Errorf("task %q is not running.", req.Task) + return pointer.Of(int64(404)), fmt.Errorf("task %q is not running.", req.Task) } err = h(ctx, req.Cmd, req.Tty, newExecStream(decoder, encoder)) if err != nil { - code := helper.Int64ToPtr(500) + code := pointer.Of(int64(500)) return code, err } diff --git a/client/allocrunner/alloc_runner.go b/client/allocrunner/alloc_runner.go index 624c3145e..c50b2b98d 100644 --- a/client/allocrunner/alloc_runner.go +++ b/client/allocrunner/alloc_runner.go @@ -28,7 +28,7 @@ import ( cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/client/vaultclient" agentconsul "github.com/hashicorp/nomad/command/agent/consul" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/device" "github.com/hashicorp/nomad/plugins/drivers" @@ -727,7 +727,7 @@ func (ar *allocRunner) clientAlloc(taskStates map[string]*structs.TaskState) *st if a.ClientStatus == structs.AllocClientStatusFailed && alloc.DeploymentID != "" && !a.DeploymentStatus.HasHealth() { a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), } } diff --git a/client/allocrunner/csi_hook_test.go b/client/allocrunner/csi_hook_test.go index 21d3fc91d..bb5362b95 100644 --- a/client/allocrunner/csi_hook_test.go +++ b/client/allocrunner/csi_hook_test.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/nomad/client/pluginmanager" "github.com/hashicorp/nomad/client/pluginmanager/csimanager" cstructs "github.com/hashicorp/nomad/client/structs" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -193,8 +193,8 @@ func TestCSIHook(t *testing.T) { rpcer := mockRPCer{ alloc: alloc, callCounts: callCounts, - hasExistingClaim: helper.BoolToPtr(tc.startsWithClaims), - schedulable: helper.BoolToPtr(!tc.startsUnschedulable), + hasExistingClaim: pointer.Of(tc.startsWithClaims), + schedulable: pointer.Of(!tc.startsUnschedulable), } ar := mockAllocRunner{ res: &cstructs.AllocHookResources{}, @@ -298,8 +298,8 @@ func TestCSIHook_claimVolumesFromAlloc_Validation(t *testing.T) { rpcer := mockRPCer{ alloc: alloc, callCounts: callCounts, - hasExistingClaim: helper.BoolToPtr(false), - schedulable: helper.BoolToPtr(true), + hasExistingClaim: pointer.Of(false), + schedulable: pointer.Of(true), } ar := mockAllocRunner{ diff --git a/client/allocrunner/group_service_hook_test.go b/client/allocrunner/group_service_hook_test.go index 822ae04d7..e05df8cbc 100644 --- a/client/allocrunner/group_service_hook_test.go +++ b/client/allocrunner/group_service_hook_test.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/nomad/client/serviceregistration/wrapper" "github.com/hashicorp/nomad/client/taskenv" agentconsul "github.com/hashicorp/nomad/command/agent/consul" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -74,7 +74,7 @@ func TestGroupServiceHook_ShutdownDelayUpdate(t *testing.T) { ci.Parallel(t) alloc := mock.Alloc() - alloc.Job.TaskGroups[0].ShutdownDelay = helper.TimeToPtr(10 * time.Second) + alloc.Job.TaskGroups[0].ShutdownDelay = pointer.Of(10 * time.Second) logger := testlog.HCLogger(t) consulMockClient := regMock.NewServiceRegistrationHandler(logger) @@ -95,7 +95,7 @@ func TestGroupServiceHook_ShutdownDelayUpdate(t *testing.T) { require.NoError(t, h.Prerun()) // Incease shutdown Delay - alloc.Job.TaskGroups[0].ShutdownDelay = helper.TimeToPtr(15 * time.Second) + alloc.Job.TaskGroups[0].ShutdownDelay = pointer.Of(15 * time.Second) req := &interfaces.RunnerUpdateRequest{Alloc: alloc} require.NoError(t, h.Update(req)) diff --git a/client/allocrunner/taskrunner/connect_native_hook_test.go b/client/allocrunner/taskrunner/connect_native_hook_test.go index 0718db1c8..9f40bd442 100644 --- a/client/allocrunner/taskrunner/connect_native_hook_test.go +++ b/client/allocrunner/taskrunner/connect_native_hook_test.go @@ -14,7 +14,7 @@ import ( "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/client/testutil" agentconsul "github.com/hashicorp/nomad/command/agent/consul" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -478,8 +478,8 @@ func TestTaskRunner_ConnectNativeHook_shareTLS(t *testing.T) { // TLS config consumed by native application ShareSSL: shareSSL, - EnableSSL: helper.BoolToPtr(true), - VerifySSL: helper.BoolToPtr(true), + EnableSSL: pointer.Of(true), + VerifySSL: pointer.Of(true), CAFile: fakeCert, CertFile: fakeCert, KeyFile: fakeCert, @@ -528,7 +528,7 @@ func TestTaskRunner_ConnectNativeHook_shareTLS(t *testing.T) { // so make sure an unset value turns the feature on. t.Run("share_ssl is true", func(t *testing.T) { - try(t, helper.BoolToPtr(true)) + try(t, pointer.Of(true)) }) t.Run("share_ssl is nil", func(t *testing.T) { @@ -596,9 +596,9 @@ func TestTaskRunner_ConnectNativeHook_shareTLS_override(t *testing.T) { Addr: consulConfig.Address, // TLS config consumed by native application - ShareSSL: helper.BoolToPtr(true), - EnableSSL: helper.BoolToPtr(true), - VerifySSL: helper.BoolToPtr(true), + ShareSSL: pointer.Of(true), + EnableSSL: pointer.Of(true), + VerifySSL: pointer.Of(true), CAFile: fakeCert, CertFile: fakeCert, KeyFile: fakeCert, diff --git a/client/allocrunner/taskrunner/envoy_bootstrap_hook_test.go b/client/allocrunner/taskrunner/envoy_bootstrap_hook_test.go index c729db41a..70ebe47b5 100644 --- a/client/allocrunner/taskrunner/envoy_bootstrap_hook_test.go +++ b/client/allocrunner/taskrunner/envoy_bootstrap_hook_test.go @@ -23,8 +23,8 @@ import ( "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/client/testutil" agentconsul "github.com/hashicorp/nomad/command/agent/consul" - "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/args" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -95,8 +95,8 @@ func TestEnvoyBootstrapHook_decodeTriState(t *testing.T) { ci.Parallel(t) require.Equal(t, "", decodeTriState(nil)) - require.Equal(t, "true", decodeTriState(helper.BoolToPtr(true))) - require.Equal(t, "false", decodeTriState(helper.BoolToPtr(false))) + require.Equal(t, "true", decodeTriState(pointer.Of(true))) + require.Equal(t, "false", decodeTriState(pointer.Of(false))) } var ( diff --git a/client/allocrunner/taskrunner/task_runner_test.go b/client/allocrunner/taskrunner/task_runner_test.go index 845ae4e1c..35a8903bc 100644 --- a/client/allocrunner/taskrunner/task_runner_test.go +++ b/client/allocrunner/taskrunner/task_runner_test.go @@ -31,7 +31,7 @@ import ( agentconsul "github.com/hashicorp/nomad/command/agent/consul" mockdriver "github.com/hashicorp/nomad/drivers/mock" "github.com/hashicorp/nomad/drivers/rawexec" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -1065,7 +1065,7 @@ func TestTaskRunner_NoShutdownDelay(t *testing.T) { maxTimeToFailDuration := time.Duration(testutil.TestMultiplier()) * time.Second alloc := mock.Alloc() - alloc.DesiredTransition = structs.DesiredTransition{NoShutdownDelay: helper.BoolToPtr(true)} + alloc.DesiredTransition = structs.DesiredTransition{NoShutdownDelay: pointer.Of(true)} task := alloc.Job.TaskGroups[0].Tasks[0] task.Services[0].Tags = []string{"tag1"} task.Services = task.Services[:1] // only need 1 for this test diff --git a/client/allocrunner/taskrunner/template/template.go b/client/allocrunner/taskrunner/template/template.go index 3eed8034e..9e307ff01 100644 --- a/client/allocrunner/taskrunner/template/template.go +++ b/client/allocrunner/taskrunner/template/template.go @@ -20,7 +20,7 @@ import ( "github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/taskenv" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs" ) @@ -611,7 +611,7 @@ func parseTemplateConfigs(config *TaskTemplateManagerConfig) (map[*ctconf.Templa } ct.Wait = &ctconf.WaitConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Min: tmpl.Wait.Min, Max: tmpl.Wait.Max, } @@ -723,7 +723,7 @@ func newRunnerConfig(config *TaskTemplateManagerConfig, if cc.ConsulConfig.EnableSSL != nil && *cc.ConsulConfig.EnableSSL { verify := cc.ConsulConfig.VerifySSL != nil && *cc.ConsulConfig.VerifySSL conf.Consul.SSL = &ctconf.SSLConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Verify: &verify, Cert: &cc.ConsulConfig.CertFile, Key: &cc.ConsulConfig.KeyFile, @@ -738,7 +738,7 @@ func newRunnerConfig(config *TaskTemplateManagerConfig, } conf.Consul.Auth = &ctconf.AuthConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Username: &parts[0], Password: &parts[1], } @@ -767,7 +767,7 @@ func newRunnerConfig(config *TaskTemplateManagerConfig, // Set up the Vault config // Always set these to ensure nothing is picked up from the environment emptyStr := "" - conf.Vault.RenewToken = helper.BoolToPtr(false) + conf.Vault.RenewToken = pointer.Of(false) conf.Vault.Token = &emptyStr if cc.VaultConfig != nil && cc.VaultConfig.IsEnabled() { conf.Vault.Address = &cc.VaultConfig.Addr @@ -786,7 +786,7 @@ func newRunnerConfig(config *TaskTemplateManagerConfig, skipVerify := cc.VaultConfig.TLSSkipVerify != nil && *cc.VaultConfig.TLSSkipVerify verify := !skipVerify conf.Vault.SSL = &ctconf.SSLConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Verify: &verify, Cert: &cc.VaultConfig.TLSCertFile, Key: &cc.VaultConfig.TLSKeyFile, @@ -796,8 +796,8 @@ func newRunnerConfig(config *TaskTemplateManagerConfig, } } else { conf.Vault.SSL = &ctconf.SSLConfig{ - Enabled: helper.BoolToPtr(false), - Verify: helper.BoolToPtr(false), + Enabled: pointer.Of(false), + Verify: pointer.Of(false), Cert: &emptyStr, Key: &emptyStr, CaCert: &emptyStr, diff --git a/client/allocrunner/taskrunner/template/template_test.go b/client/allocrunner/taskrunner/template/template_test.go index b22fe7e18..467882234 100644 --- a/client/allocrunner/taskrunner/template/template_test.go +++ b/client/allocrunner/taskrunner/template/template_test.go @@ -26,7 +26,7 @@ import ( "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/taskenv" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -156,7 +156,7 @@ func newTestHarness(t *testing.T, templates []*structs.Template, consul, vault b TemplateConfig: &config.ClientTemplateConfig{ FunctionDenylist: config.DefaultTemplateFunctionDenylist, DisableSandbox: false, - ConsulRetry: &config.RetryConfig{Backoff: helper.TimeToPtr(10 * time.Millisecond)}, + ConsulRetry: &config.RetryConfig{Backoff: pointer.Of(10 * time.Millisecond)}, }}, emitRate: DefaultMaxTemplateEventRate, } @@ -1489,7 +1489,7 @@ func TestTaskTemplateManager_Config_ServerName(t *testing.T) { c := config.DefaultConfig() c.Node = mock.Node() c.VaultConfig = &sconfig.VaultConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Addr: "https://localhost/", TLSServerName: "notlocalhost", } @@ -1517,7 +1517,7 @@ func TestTaskTemplateManager_Config_VaultNamespace(t *testing.T) { c := config.DefaultConfig() c.Node = mock.Node() c.VaultConfig = &sconfig.VaultConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Addr: "https://localhost/", TLSServerName: "notlocalhost", Namespace: testNS, @@ -1548,7 +1548,7 @@ func TestTaskTemplateManager_Config_VaultNamespace_TaskOverride(t *testing.T) { c := config.DefaultConfig() c.Node = mock.Node() c.VaultConfig = &sconfig.VaultConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Addr: "https://localhost/", TLSServerName: "notlocalhost", Namespace: testNS, @@ -1934,7 +1934,7 @@ func TestTaskTemplateManager_ClientTemplateConfig_Set(t *testing.T) { clientConfig.Node = mock.Node() clientConfig.VaultConfig = &sconfig.VaultConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Namespace: testNS, } @@ -1944,18 +1944,18 @@ func TestTaskTemplateManager_ClientTemplateConfig_Set(t *testing.T) { // helper to reduce boilerplate waitConfig := &config.WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), } // helper to reduce boilerplate retryConfig := &config.RetryConfig{ - Attempts: helper.IntToPtr(5), - Backoff: helper.TimeToPtr(5 * time.Second), - MaxBackoff: helper.TimeToPtr(20 * time.Second), + Attempts: pointer.Of(5), + Backoff: pointer.Of(5 * time.Second), + MaxBackoff: pointer.Of(20 * time.Second), } - clientConfig.TemplateConfig.MaxStale = helper.TimeToPtr(5 * time.Second) - clientConfig.TemplateConfig.BlockQueryWaitTime = helper.TimeToPtr(60 * time.Second) + clientConfig.TemplateConfig.MaxStale = pointer.Of(5 * time.Second) + clientConfig.TemplateConfig.BlockQueryWaitTime = pointer.Of(60 * time.Second) clientConfig.TemplateConfig.Wait = waitConfig.Copy() clientConfig.TemplateConfig.ConsulRetry = retryConfig.Copy() clientConfig.TemplateConfig.VaultRetry = retryConfig.Copy() @@ -1966,8 +1966,8 @@ func TestTaskTemplateManager_ClientTemplateConfig_Set(t *testing.T) { allocWithOverride.Job.TaskGroups[0].Tasks[0].Templates = []*structs.Template{ { Wait: &structs.WaitConfig{ - Min: helper.TimeToPtr(2 * time.Second), - Max: helper.TimeToPtr(12 * time.Second), + Min: pointer.Of(2 * time.Second), + Max: pointer.Of(12 * time.Second), }, }, } @@ -1982,8 +1982,8 @@ func TestTaskTemplateManager_ClientTemplateConfig_Set(t *testing.T) { { "basic-wait-config", &config.ClientTemplateConfig{ - MaxStale: helper.TimeToPtr(5 * time.Second), - BlockQueryWaitTime: helper.TimeToPtr(60 * time.Second), + MaxStale: pointer.Of(5 * time.Second), + BlockQueryWaitTime: pointer.Of(60 * time.Second), Wait: waitConfig.Copy(), ConsulRetry: retryConfig.Copy(), VaultRetry: retryConfig.Copy(), @@ -1996,8 +1996,8 @@ func TestTaskTemplateManager_ClientTemplateConfig_Set(t *testing.T) { }, &config.Config{ TemplateConfig: &config.ClientTemplateConfig{ - MaxStale: helper.TimeToPtr(5 * time.Second), - BlockQueryWaitTime: helper.TimeToPtr(60 * time.Second), + MaxStale: pointer.Of(5 * time.Second), + BlockQueryWaitTime: pointer.Of(60 * time.Second), Wait: waitConfig.Copy(), ConsulRetry: retryConfig.Copy(), VaultRetry: retryConfig.Copy(), @@ -2006,17 +2006,17 @@ func TestTaskTemplateManager_ClientTemplateConfig_Set(t *testing.T) { }, &templateconfig.TemplateConfig{ Wait: &templateconfig.WaitConfig{ - Enabled: helper.BoolToPtr(true), - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Enabled: pointer.Of(true), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), }, }, }, { "template-override", &config.ClientTemplateConfig{ - MaxStale: helper.TimeToPtr(5 * time.Second), - BlockQueryWaitTime: helper.TimeToPtr(60 * time.Second), + MaxStale: pointer.Of(5 * time.Second), + BlockQueryWaitTime: pointer.Of(60 * time.Second), Wait: waitConfig.Copy(), ConsulRetry: retryConfig.Copy(), VaultRetry: retryConfig.Copy(), @@ -2029,8 +2029,8 @@ func TestTaskTemplateManager_ClientTemplateConfig_Set(t *testing.T) { }, &config.Config{ TemplateConfig: &config.ClientTemplateConfig{ - MaxStale: helper.TimeToPtr(5 * time.Second), - BlockQueryWaitTime: helper.TimeToPtr(60 * time.Second), + MaxStale: pointer.Of(5 * time.Second), + BlockQueryWaitTime: pointer.Of(60 * time.Second), Wait: waitConfig.Copy(), ConsulRetry: retryConfig.Copy(), VaultRetry: retryConfig.Copy(), @@ -2039,21 +2039,21 @@ func TestTaskTemplateManager_ClientTemplateConfig_Set(t *testing.T) { }, &templateconfig.TemplateConfig{ Wait: &templateconfig.WaitConfig{ - Enabled: helper.BoolToPtr(true), - Min: helper.TimeToPtr(2 * time.Second), - Max: helper.TimeToPtr(12 * time.Second), + Enabled: pointer.Of(true), + Min: pointer.Of(2 * time.Second), + Max: pointer.Of(12 * time.Second), }, }, }, { "bounds-override", &config.ClientTemplateConfig{ - MaxStale: helper.TimeToPtr(5 * time.Second), - BlockQueryWaitTime: helper.TimeToPtr(60 * time.Second), + MaxStale: pointer.Of(5 * time.Second), + BlockQueryWaitTime: pointer.Of(60 * time.Second), Wait: waitConfig.Copy(), WaitBounds: &config.WaitConfig{ - Min: helper.TimeToPtr(3 * time.Second), - Max: helper.TimeToPtr(11 * time.Second), + Min: pointer.Of(3 * time.Second), + Max: pointer.Of(11 * time.Second), }, ConsulRetry: retryConfig.Copy(), VaultRetry: retryConfig.Copy(), @@ -2066,20 +2066,20 @@ func TestTaskTemplateManager_ClientTemplateConfig_Set(t *testing.T) { Templates: []*structs.Template{ { Wait: &structs.WaitConfig{ - Min: helper.TimeToPtr(2 * time.Second), - Max: helper.TimeToPtr(12 * time.Second), + Min: pointer.Of(2 * time.Second), + Max: pointer.Of(12 * time.Second), }, }, }, }, &config.Config{ TemplateConfig: &config.ClientTemplateConfig{ - MaxStale: helper.TimeToPtr(5 * time.Second), - BlockQueryWaitTime: helper.TimeToPtr(60 * time.Second), + MaxStale: pointer.Of(5 * time.Second), + BlockQueryWaitTime: pointer.Of(60 * time.Second), Wait: waitConfig.Copy(), WaitBounds: &config.WaitConfig{ - Min: helper.TimeToPtr(3 * time.Second), - Max: helper.TimeToPtr(11 * time.Second), + Min: pointer.Of(3 * time.Second), + Max: pointer.Of(11 * time.Second), }, ConsulRetry: retryConfig.Copy(), VaultRetry: retryConfig.Copy(), @@ -2088,9 +2088,9 @@ func TestTaskTemplateManager_ClientTemplateConfig_Set(t *testing.T) { }, &templateconfig.TemplateConfig{ Wait: &templateconfig.WaitConfig{ - Enabled: helper.BoolToPtr(true), - Min: helper.TimeToPtr(3 * time.Second), - Max: helper.TimeToPtr(11 * time.Second), + Enabled: pointer.Of(true), + Min: pointer.Of(3 * time.Second), + Max: pointer.Of(11 * time.Second), }, }, }, @@ -2159,8 +2159,8 @@ func TestTaskTemplateManager_Template_Wait_Set(t *testing.T) { Templates: []*structs.Template{ { Wait: &structs.WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), }, }, }, diff --git a/client/client.go b/client/client.go index 9c10ff535..6a7c29cc1 100644 --- a/client/client.go +++ b/client/client.go @@ -47,6 +47,7 @@ import ( "github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/envoy" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/pool" hstats "github.com/hashicorp/nomad/helper/stats" "github.com/hashicorp/nomad/helper/tlsutil" @@ -2386,7 +2387,7 @@ func makeFailedAlloc(add *structs.Allocation, err error) *structs.Allocation { stripped.DeploymentStatus = add.DeploymentStatus.Copy() } else { stripped.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), Timestamp: failTime, } } diff --git a/client/config/artifact_test.go b/client/config/artifact_test.go index 0b296f8f8..a79b4b2b7 100644 --- a/client/config/artifact_test.go +++ b/client/config/artifact_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs/config" "github.com/stretchr/testify/require" ) @@ -34,72 +34,72 @@ func TestArtifactConfigFromAgent(t *testing.T) { { name: "invalid http read timeout", config: &config.ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("invalid"), - HTTPMaxSize: helper.StringToPtr("100GB"), - GCSTimeout: helper.StringToPtr("30m"), - GitTimeout: helper.StringToPtr("30m"), - HgTimeout: helper.StringToPtr("30m"), - S3Timeout: helper.StringToPtr("30m"), + HTTPReadTimeout: pointer.Of("invalid"), + HTTPMaxSize: pointer.Of("100GB"), + GCSTimeout: pointer.Of("30m"), + GitTimeout: pointer.Of("30m"), + HgTimeout: pointer.Of("30m"), + S3Timeout: pointer.Of("30m"), }, expectedError: "error parsing HTTPReadTimeout", }, { name: "invalid http max size", config: &config.ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("30m"), - HTTPMaxSize: helper.StringToPtr("invalid"), - GCSTimeout: helper.StringToPtr("30m"), - GitTimeout: helper.StringToPtr("30m"), - HgTimeout: helper.StringToPtr("30m"), - S3Timeout: helper.StringToPtr("30m"), + HTTPReadTimeout: pointer.Of("30m"), + HTTPMaxSize: pointer.Of("invalid"), + GCSTimeout: pointer.Of("30m"), + GitTimeout: pointer.Of("30m"), + HgTimeout: pointer.Of("30m"), + S3Timeout: pointer.Of("30m"), }, expectedError: "error parsing HTTPMaxSize", }, { name: "invalid gcs timeout", config: &config.ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("30m"), - HTTPMaxSize: helper.StringToPtr("100GB"), - GCSTimeout: helper.StringToPtr("invalid"), - GitTimeout: helper.StringToPtr("30m"), - HgTimeout: helper.StringToPtr("30m"), - S3Timeout: helper.StringToPtr("30m"), + HTTPReadTimeout: pointer.Of("30m"), + HTTPMaxSize: pointer.Of("100GB"), + GCSTimeout: pointer.Of("invalid"), + GitTimeout: pointer.Of("30m"), + HgTimeout: pointer.Of("30m"), + S3Timeout: pointer.Of("30m"), }, expectedError: "error parsing GCSTimeout", }, { name: "invalid git timeout", config: &config.ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("30m"), - HTTPMaxSize: helper.StringToPtr("100GB"), - GCSTimeout: helper.StringToPtr("30m"), - GitTimeout: helper.StringToPtr("invalid"), - HgTimeout: helper.StringToPtr("30m"), - S3Timeout: helper.StringToPtr("30m"), + HTTPReadTimeout: pointer.Of("30m"), + HTTPMaxSize: pointer.Of("100GB"), + GCSTimeout: pointer.Of("30m"), + GitTimeout: pointer.Of("invalid"), + HgTimeout: pointer.Of("30m"), + S3Timeout: pointer.Of("30m"), }, expectedError: "error parsing GitTimeout", }, { name: "invalid hg timeout", config: &config.ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("30m"), - HTTPMaxSize: helper.StringToPtr("100GB"), - GCSTimeout: helper.StringToPtr("30m"), - GitTimeout: helper.StringToPtr("30m"), - HgTimeout: helper.StringToPtr("invalid"), - S3Timeout: helper.StringToPtr("30m"), + HTTPReadTimeout: pointer.Of("30m"), + HTTPMaxSize: pointer.Of("100GB"), + GCSTimeout: pointer.Of("30m"), + GitTimeout: pointer.Of("30m"), + HgTimeout: pointer.Of("invalid"), + S3Timeout: pointer.Of("30m"), }, expectedError: "error parsing HgTimeout", }, { name: "invalid s3 timeout", config: &config.ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("30m"), - HTTPMaxSize: helper.StringToPtr("100GB"), - GCSTimeout: helper.StringToPtr("30m"), - GitTimeout: helper.StringToPtr("30m"), - HgTimeout: helper.StringToPtr("30m"), - S3Timeout: helper.StringToPtr("invalid"), + HTTPReadTimeout: pointer.Of("30m"), + HTTPMaxSize: pointer.Of("100GB"), + GCSTimeout: pointer.Of("30m"), + GitTimeout: pointer.Of("30m"), + HgTimeout: pointer.Of("30m"), + S3Timeout: pointer.Of("invalid"), }, expectedError: "error parsing S3Timeout", }, diff --git a/client/config/config.go b/client/config/config.go index 4f840064d..ec0cd28d5 100644 --- a/client/config/config.go +++ b/client/config/config.go @@ -537,7 +537,7 @@ func (wc *WaitConfig) ToConsulTemplate() (*config.WaitConfig, error) { return nil, err } - result := &config.WaitConfig{Enabled: helper.BoolToPtr(true)} + result := &config.WaitConfig{Enabled: pointer.Of(true)} if wc.Min != nil { result.Min = wc.Min @@ -680,7 +680,7 @@ func (rc *RetryConfig) ToConsulTemplate() (*config.RetryConfig, error) { return nil, err } - result := &config.RetryConfig{Enabled: helper.BoolToPtr(true)} + result := &config.RetryConfig{Enabled: pointer.Of(true)} if rc.Attempts != nil { result.Attempts = rc.Attempts @@ -736,11 +736,11 @@ func DefaultConfig() *Config { TemplateConfig: &ClientTemplateConfig{ FunctionDenylist: DefaultTemplateFunctionDenylist, DisableSandbox: false, - BlockQueryWaitTime: helper.TimeToPtr(5 * time.Minute), // match Consul default - MaxStale: helper.TimeToPtr(DefaultTemplateMaxStale), // match Consul default + BlockQueryWaitTime: pointer.Of(5 * time.Minute), // match Consul default + MaxStale: pointer.Of(DefaultTemplateMaxStale), // match Consul default Wait: &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(4 * time.Minute), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(4 * time.Minute), }, ConsulRetry: &RetryConfig{ Attempts: pointer.Of[int](0), // unlimited diff --git a/client/config/config_test.go b/client/config/config_test.go index 88f5bd1b7..fada640e2 100644 --- a/client/config/config_test.go +++ b/client/config/config_test.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/consul-template/config" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/stretchr/testify/require" ) @@ -49,8 +49,8 @@ func TestConfigReadDefault(t *testing.T) { func mockWaitConfig() *WaitConfig { return &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), } } @@ -66,26 +66,26 @@ func TestWaitConfig_Copy(t *testing.T) { "fully-populated", mockWaitConfig(), &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), }, }, { "min-only", &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), + Min: pointer.Of(5 * time.Second), }, &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), + Min: pointer.Of(5 * time.Second), }, }, { "max-only", &WaitConfig{ - Max: helper.TimeToPtr(5 * time.Second), + Max: pointer.Of(5 * time.Second), }, &WaitConfig{ - Max: helper.TimeToPtr(5 * time.Second), + Max: pointer.Of(5 * time.Second), }, }, } @@ -122,7 +122,7 @@ func TestWaitConfig_IsEmpty(t *testing.T) { { "is-not-empty", &WaitConfig{ - Min: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(10 * time.Second), }, false, }, @@ -148,8 +148,8 @@ func TestWaitConfig_IsEqual(t *testing.T) { "are-equal", mockWaitConfig(), &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), }, true, }, @@ -157,8 +157,8 @@ func TestWaitConfig_IsEqual(t *testing.T) { "min-different", mockWaitConfig(), &WaitConfig{ - Min: helper.TimeToPtr(4 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(4 * time.Second), + Max: pointer.Of(10 * time.Second), }, false, }, @@ -166,8 +166,8 @@ func TestWaitConfig_IsEqual(t *testing.T) { "max-different", mockWaitConfig(), &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(9 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(9 * time.Second), }, false, }, @@ -191,8 +191,8 @@ func TestWaitConfig_IsValid(t *testing.T) { { "is-valid", &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), }, "", }, @@ -209,15 +209,15 @@ func TestWaitConfig_IsValid(t *testing.T) { { "min-greater-than-max", &WaitConfig{ - Min: helper.TimeToPtr(10 * time.Second), - Max: helper.TimeToPtr(5 * time.Second), + Min: pointer.Of(10 * time.Second), + Max: pointer.Of(5 * time.Second), }, "greater than", }, { "max-not-set", &WaitConfig{ - Min: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(10 * time.Second), }, "", }, @@ -248,36 +248,36 @@ func TestWaitConfig_Merge(t *testing.T) { "all-fields", mockWaitConfig(), &WaitConfig{ - Min: helper.TimeToPtr(4 * time.Second), - Max: helper.TimeToPtr(9 * time.Second), + Min: pointer.Of(4 * time.Second), + Max: pointer.Of(9 * time.Second), }, &WaitConfig{ - Min: helper.TimeToPtr(4 * time.Second), - Max: helper.TimeToPtr(9 * time.Second), + Min: pointer.Of(4 * time.Second), + Max: pointer.Of(9 * time.Second), }, }, { "min-only", mockWaitConfig(), &WaitConfig{ - Min: helper.TimeToPtr(4 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(4 * time.Second), + Max: pointer.Of(10 * time.Second), }, &WaitConfig{ - Min: helper.TimeToPtr(4 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(4 * time.Second), + Max: pointer.Of(10 * time.Second), }, }, { "max-only", mockWaitConfig(), &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(9 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(9 * time.Second), }, &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(9 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(9 * time.Second), }, }, } @@ -298,14 +298,14 @@ func TestWaitConfig_ToConsulTemplate(t *testing.T) { ci.Parallel(t) expected := config.WaitConfig{ - Enabled: helper.BoolToPtr(true), - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Enabled: pointer.Of(true), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), } clientWaitConfig := &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), } actual, err := clientWaitConfig.ToConsulTemplate() @@ -316,10 +316,10 @@ func TestWaitConfig_ToConsulTemplate(t *testing.T) { func mockRetryConfig() *RetryConfig { return &RetryConfig{ - Attempts: helper.IntToPtr(5), - Backoff: helper.TimeToPtr(5 * time.Second), + Attempts: pointer.Of(5), + Backoff: pointer.Of(5 * time.Second), BackoffHCL: "5s", - MaxBackoff: helper.TimeToPtr(10 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), MaxBackoffHCL: "10s", } } @@ -335,29 +335,29 @@ func TestRetryConfig_Copy(t *testing.T) { "fully-populated", mockRetryConfig(), &RetryConfig{ - Attempts: helper.IntToPtr(5), - Backoff: helper.TimeToPtr(5 * time.Second), + Attempts: pointer.Of(5), + Backoff: pointer.Of(5 * time.Second), BackoffHCL: "5s", - MaxBackoff: helper.TimeToPtr(10 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), MaxBackoffHCL: "10s", }, }, { "attempts-only", &RetryConfig{ - Attempts: helper.IntToPtr(5), + Attempts: pointer.Of(5), }, &RetryConfig{ - Attempts: helper.IntToPtr(5), + Attempts: pointer.Of(5), }, }, { "backoff-only", &RetryConfig{ - Backoff: helper.TimeToPtr(5 * time.Second), + Backoff: pointer.Of(5 * time.Second), }, &RetryConfig{ - Backoff: helper.TimeToPtr(5 * time.Second), + Backoff: pointer.Of(5 * time.Second), }, }, { @@ -372,10 +372,10 @@ func TestRetryConfig_Copy(t *testing.T) { { "max-backoff-only", &RetryConfig{ - MaxBackoff: helper.TimeToPtr(10 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), }, &RetryConfig{ - MaxBackoff: helper.TimeToPtr(10 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), }, }, { @@ -421,7 +421,7 @@ func TestRetryConfig_IsEmpty(t *testing.T) { { "is-not-empty", &RetryConfig{ - Attempts: helper.IntToPtr(12), + Attempts: pointer.Of(12), }, false, }, @@ -447,10 +447,10 @@ func TestRetryConfig_IsEqual(t *testing.T) { "are-equal", mockRetryConfig(), &RetryConfig{ - Attempts: helper.IntToPtr(5), - Backoff: helper.TimeToPtr(5 * time.Second), + Attempts: pointer.Of(5), + Backoff: pointer.Of(5 * time.Second), BackoffHCL: "5s", - MaxBackoff: helper.TimeToPtr(10 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), MaxBackoffHCL: "10s", }, true, @@ -459,10 +459,10 @@ func TestRetryConfig_IsEqual(t *testing.T) { "attempts-different", mockRetryConfig(), &RetryConfig{ - Attempts: helper.IntToPtr(4), - Backoff: helper.TimeToPtr(5 * time.Second), + Attempts: pointer.Of(4), + Backoff: pointer.Of(5 * time.Second), BackoffHCL: "5s", - MaxBackoff: helper.TimeToPtr(10 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), MaxBackoffHCL: "10s", }, false, @@ -471,10 +471,10 @@ func TestRetryConfig_IsEqual(t *testing.T) { "backoff-different", mockRetryConfig(), &RetryConfig{ - Attempts: helper.IntToPtr(5), - Backoff: helper.TimeToPtr(4 * time.Second), + Attempts: pointer.Of(5), + Backoff: pointer.Of(4 * time.Second), BackoffHCL: "5s", - MaxBackoff: helper.TimeToPtr(10 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), MaxBackoffHCL: "10s", }, false, @@ -483,10 +483,10 @@ func TestRetryConfig_IsEqual(t *testing.T) { "backoff-hcl-different", mockRetryConfig(), &RetryConfig{ - Attempts: helper.IntToPtr(5), - Backoff: helper.TimeToPtr(5 * time.Second), + Attempts: pointer.Of(5), + Backoff: pointer.Of(5 * time.Second), BackoffHCL: "4s", - MaxBackoff: helper.TimeToPtr(10 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), MaxBackoffHCL: "10s", }, false, @@ -495,10 +495,10 @@ func TestRetryConfig_IsEqual(t *testing.T) { "max-backoff-different", mockRetryConfig(), &RetryConfig{ - Attempts: helper.IntToPtr(5), - Backoff: helper.TimeToPtr(5 * time.Second), + Attempts: pointer.Of(5), + Backoff: pointer.Of(5 * time.Second), BackoffHCL: "5s", - MaxBackoff: helper.TimeToPtr(9 * time.Second), + MaxBackoff: pointer.Of(9 * time.Second), MaxBackoffHCL: "10s", }, false, @@ -507,10 +507,10 @@ func TestRetryConfig_IsEqual(t *testing.T) { "max-backoff-hcl-different", mockRetryConfig(), &RetryConfig{ - Attempts: helper.IntToPtr(5), - Backoff: helper.TimeToPtr(5 * time.Second), + Attempts: pointer.Of(5), + Backoff: pointer.Of(5 * time.Second), BackoffHCL: "5s", - MaxBackoff: helper.TimeToPtr(10 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), MaxBackoffHCL: "9s", }, false, @@ -535,8 +535,8 @@ func TestRetryConfig_IsValid(t *testing.T) { { "is-valid", &RetryConfig{ - Backoff: helper.TimeToPtr(5 * time.Second), - MaxBackoff: helper.TimeToPtr(10 * time.Second), + Backoff: pointer.Of(5 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), }, "", }, @@ -553,30 +553,30 @@ func TestRetryConfig_IsValid(t *testing.T) { { "backoff-greater-than-max-backoff", &RetryConfig{ - Backoff: helper.TimeToPtr(10 * time.Second), - MaxBackoff: helper.TimeToPtr(5 * time.Second), + Backoff: pointer.Of(10 * time.Second), + MaxBackoff: pointer.Of(5 * time.Second), }, "greater than max_backoff", }, { "backoff-not-set", &RetryConfig{ - MaxBackoff: helper.TimeToPtr(10 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), }, "", }, { "max-backoff-not-set", &RetryConfig{ - Backoff: helper.TimeToPtr(2 * time.Minute), + Backoff: pointer.Of(2 * time.Minute), }, "greater than default", }, { "max-backoff-unbounded", &RetryConfig{ - Backoff: helper.TimeToPtr(10 * time.Second), - MaxBackoff: helper.TimeToPtr(0 * time.Second), + Backoff: pointer.Of(10 * time.Second), + MaxBackoff: pointer.Of(0 * time.Second), }, "", }, @@ -607,17 +607,17 @@ func TestRetryConfig_Merge(t *testing.T) { "all-fields", mockRetryConfig(), &RetryConfig{ - Attempts: helper.IntToPtr(4), - Backoff: helper.TimeToPtr(4 * time.Second), + Attempts: pointer.Of(4), + Backoff: pointer.Of(4 * time.Second), BackoffHCL: "4s", - MaxBackoff: helper.TimeToPtr(9 * time.Second), + MaxBackoff: pointer.Of(9 * time.Second), MaxBackoffHCL: "9s", }, &RetryConfig{ - Attempts: helper.IntToPtr(4), - Backoff: helper.TimeToPtr(4 * time.Second), + Attempts: pointer.Of(4), + Backoff: pointer.Of(4 * time.Second), BackoffHCL: "4s", - MaxBackoff: helper.TimeToPtr(9 * time.Second), + MaxBackoff: pointer.Of(9 * time.Second), MaxBackoffHCL: "9s", }, }, @@ -625,17 +625,17 @@ func TestRetryConfig_Merge(t *testing.T) { "attempts-only", mockRetryConfig(), &RetryConfig{ - Attempts: helper.IntToPtr(4), - Backoff: helper.TimeToPtr(5 * time.Second), + Attempts: pointer.Of(4), + Backoff: pointer.Of(5 * time.Second), BackoffHCL: "5s", - MaxBackoff: helper.TimeToPtr(10 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), MaxBackoffHCL: "10s", }, &RetryConfig{ - Attempts: helper.IntToPtr(4), - Backoff: helper.TimeToPtr(5 * time.Second), + Attempts: pointer.Of(4), + Backoff: pointer.Of(5 * time.Second), BackoffHCL: "5s", - MaxBackoff: helper.TimeToPtr(10 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), MaxBackoffHCL: "10s", }, }, @@ -643,17 +643,17 @@ func TestRetryConfig_Merge(t *testing.T) { "multi-field", mockRetryConfig(), &RetryConfig{ - Attempts: helper.IntToPtr(5), - Backoff: helper.TimeToPtr(4 * time.Second), + Attempts: pointer.Of(5), + Backoff: pointer.Of(4 * time.Second), BackoffHCL: "4s", - MaxBackoff: helper.TimeToPtr(9 * time.Second), + MaxBackoff: pointer.Of(9 * time.Second), MaxBackoffHCL: "9s", }, &RetryConfig{ - Attempts: helper.IntToPtr(5), - Backoff: helper.TimeToPtr(4 * time.Second), + Attempts: pointer.Of(5), + Backoff: pointer.Of(4 * time.Second), BackoffHCL: "4s", - MaxBackoff: helper.TimeToPtr(9 * time.Second), + MaxBackoff: pointer.Of(9 * time.Second), MaxBackoffHCL: "9s", }, }, @@ -675,10 +675,10 @@ func TestRetryConfig_ToConsulTemplate(t *testing.T) { ci.Parallel(t) expected := config.RetryConfig{ - Enabled: helper.BoolToPtr(true), - Attempts: helper.IntToPtr(5), - Backoff: helper.TimeToPtr(5 * time.Second), - MaxBackoff: helper.TimeToPtr(10 * time.Second), + Enabled: pointer.Of(true), + Attempts: pointer.Of(5), + Backoff: pointer.Of(5 * time.Second), + MaxBackoff: pointer.Of(10 * time.Second), } actual := mockRetryConfig() diff --git a/client/config/testing.go b/client/config/testing.go index 0204073fb..511505044 100644 --- a/client/config/testing.go +++ b/client/config/testing.go @@ -7,7 +7,7 @@ import ( "time" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" testing "github.com/mitchellh/go-testing-interface" @@ -59,7 +59,7 @@ func TestClientConfig(t testing.T) (*Config, func()) { // Helps make sure we are respecting configured parent conf.CgroupParent = "testing.slice" - conf.VaultConfig.Enabled = helper.BoolToPtr(false) + conf.VaultConfig.Enabled = pointer.Of(false) conf.DevMode = true // Loosen GC threshold diff --git a/client/devicemanager/manager_test.go b/client/devicemanager/manager_test.go index c3da419e2..337f5c795 100644 --- a/client/devicemanager/manager_test.go +++ b/client/devicemanager/manager_test.go @@ -11,8 +11,8 @@ import ( plugin "github.com/hashicorp/go-plugin" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/state" - "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/pluginutils/loader" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" @@ -42,7 +42,7 @@ var ( }, Attributes: map[string]*psstructs.Attribute{ "memory": { - Int: helper.Int64ToPtr(4), + Int: pointer.Of(int64(4)), Unit: "GB", }, }, @@ -61,7 +61,7 @@ var ( }, Attributes: map[string]*psstructs.Attribute{ "memory": { - Int: helper.Int64ToPtr(2), + Int: pointer.Of(int64(2)), Unit: "GB", }, }, @@ -74,14 +74,14 @@ var ( InstanceStats: map[string]*device.DeviceStats{ nvidiaDevice0ID: { Summary: &psstructs.StatValue{ - IntNumeratorVal: helper.Int64ToPtr(212), + IntNumeratorVal: pointer.Of(int64(212)), Unit: "F", Desc: "Temperature", }, }, nvidiaDevice1ID: { Summary: &psstructs.StatValue{ - IntNumeratorVal: helper.Int64ToPtr(218), + IntNumeratorVal: pointer.Of(int64(218)), Unit: "F", Desc: "Temperature", }, @@ -96,7 +96,7 @@ var ( InstanceStats: map[string]*device.DeviceStats{ intelDeviceID: { Summary: &psstructs.StatValue{ - IntNumeratorVal: helper.Int64ToPtr(220), + IntNumeratorVal: pointer.Of(int64(220)), Unit: "F", Desc: "Temperature", }, diff --git a/client/fs_endpoint.go b/client/fs_endpoint.go index 2796c45c2..3d988520a 100644 --- a/client/fs_endpoint.go +++ b/client/fs_endpoint.go @@ -23,7 +23,7 @@ import ( "github.com/hashicorp/nomad/client/allocdir" sframer "github.com/hashicorp/nomad/client/lib/streamframer" cstructs "github.com/hashicorp/nomad/client/structs" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs" ) @@ -166,32 +166,32 @@ func (f *FileSystem) stream(conn io.ReadWriteCloser) { encoder := codec.NewEncoder(conn, structs.MsgpackHandle) if err := decoder.Decode(&req); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } if req.AllocID == "" { - handleStreamResultError(allocIDNotPresentErr, helper.Int64ToPtr(400), encoder) + handleStreamResultError(allocIDNotPresentErr, pointer.Of(int64(400)), encoder) return } alloc, err := f.c.GetAlloc(req.AllocID) if err != nil { - handleStreamResultError(structs.NewErrUnknownAllocation(req.AllocID), helper.Int64ToPtr(404), encoder) + handleStreamResultError(structs.NewErrUnknownAllocation(req.AllocID), pointer.Of(int64(404)), encoder) return } // Check read permissions if aclObj, err := f.c.ResolveToken(req.QueryOptions.AuthToken); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(403), encoder) + handleStreamResultError(err, pointer.Of(int64(403)), encoder) return } else if aclObj != nil && !aclObj.AllowNsOp(alloc.Namespace, acl.NamespaceCapabilityReadFS) { - handleStreamResultError(structs.ErrPermissionDenied, helper.Int64ToPtr(403), encoder) + handleStreamResultError(structs.ErrPermissionDenied, pointer.Of(int64(403)), encoder) return } // Validate the arguments if req.Path == "" { - handleStreamResultError(pathNotPresentErr, helper.Int64ToPtr(400), encoder) + handleStreamResultError(pathNotPresentErr, pointer.Of(int64(400)), encoder) return } switch req.Origin { @@ -199,15 +199,15 @@ func (f *FileSystem) stream(conn io.ReadWriteCloser) { case "": req.Origin = "start" default: - handleStreamResultError(invalidOrigin, helper.Int64ToPtr(400), encoder) + handleStreamResultError(invalidOrigin, pointer.Of(int64(400)), encoder) return } fs, err := f.c.GetAllocFS(req.AllocID) if err != nil { - code := helper.Int64ToPtr(500) + code := pointer.Of(int64(500)) if structs.IsErrUnknownAllocation(err) { - code = helper.Int64ToPtr(404) + code = pointer.Of(int64(404)) } handleStreamResultError(err, code, encoder) @@ -217,13 +217,13 @@ func (f *FileSystem) stream(conn io.ReadWriteCloser) { // Calculate the offset fileInfo, err := fs.Stat(req.Path) if err != nil { - handleStreamResultError(err, helper.Int64ToPtr(400), encoder) + handleStreamResultError(err, pointer.Of(int64(400)), encoder) return } if fileInfo.IsDir { handleStreamResultError( fmt.Errorf("file %q is a directory", req.Path), - helper.Int64ToPtr(400), encoder) + pointer.Of(int64(400)), encoder) return } @@ -325,7 +325,7 @@ OUTER: } if streamErr != nil { - handleStreamResultError(streamErr, helper.Int64ToPtr(500), encoder) + handleStreamResultError(streamErr, pointer.Of(int64(500)), encoder) return } } @@ -341,17 +341,17 @@ func (f *FileSystem) logs(conn io.ReadWriteCloser) { encoder := codec.NewEncoder(conn, structs.MsgpackHandle) if err := decoder.Decode(&req); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } if req.AllocID == "" { - handleStreamResultError(allocIDNotPresentErr, helper.Int64ToPtr(400), encoder) + handleStreamResultError(allocIDNotPresentErr, pointer.Of(int64(400)), encoder) return } alloc, err := f.c.GetAlloc(req.AllocID) if err != nil { - handleStreamResultError(structs.NewErrUnknownAllocation(req.AllocID), helper.Int64ToPtr(404), encoder) + handleStreamResultError(structs.NewErrUnknownAllocation(req.AllocID), pointer.Of(int64(404)), encoder) return } @@ -370,13 +370,13 @@ func (f *FileSystem) logs(conn io.ReadWriteCloser) { // Validate the arguments if req.Task == "" { - handleStreamResultError(taskNotPresentErr, helper.Int64ToPtr(400), encoder) + handleStreamResultError(taskNotPresentErr, pointer.Of(int64(400)), encoder) return } switch req.LogType { case "stdout", "stderr": default: - handleStreamResultError(logTypeNotPresentErr, helper.Int64ToPtr(400), encoder) + handleStreamResultError(logTypeNotPresentErr, pointer.Of(int64(400)), encoder) return } switch req.Origin { @@ -384,15 +384,15 @@ func (f *FileSystem) logs(conn io.ReadWriteCloser) { case "": req.Origin = "start" default: - handleStreamResultError(invalidOrigin, helper.Int64ToPtr(400), encoder) + handleStreamResultError(invalidOrigin, pointer.Of(int64(400)), encoder) return } fs, err := f.c.GetAllocFS(req.AllocID) if err != nil { - code := helper.Int64ToPtr(500) + code := pointer.Of(int64(500)) if structs.IsErrUnknownAllocation(err) { - code = helper.Int64ToPtr(404) + code = pointer.Of(int64(404)) } handleStreamResultError(err, code, encoder) @@ -401,9 +401,9 @@ func (f *FileSystem) logs(conn io.ReadWriteCloser) { allocState, err := f.c.GetAllocState(req.AllocID) if err != nil { - code := helper.Int64ToPtr(500) + code := pointer.Of(int64(500)) if structs.IsErrUnknownAllocation(err) { - code = helper.Int64ToPtr(404) + code = pointer.Of(int64(404)) } handleStreamResultError(err, code, encoder) @@ -415,7 +415,7 @@ func (f *FileSystem) logs(conn io.ReadWriteCloser) { if taskState == nil { handleStreamResultError( fmt.Errorf("unknown task name %q", req.Task), - helper.Int64ToPtr(400), + pointer.Of(int64(400)), encoder) return } @@ -423,7 +423,7 @@ func (f *FileSystem) logs(conn io.ReadWriteCloser) { if taskState.StartedAt.IsZero() { handleStreamResultError( fmt.Errorf("task %q not started yet. No logs available", req.Task), - helper.Int64ToPtr(404), + pointer.Of(int64(404)), encoder) return } diff --git a/client/taskenv/services_test.go b/client/taskenv/services_test.go index eadf49c50..525b6a84d 100644 --- a/client/taskenv/services_test.go +++ b/client/taskenv/services_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" ) @@ -282,17 +282,17 @@ func TestInterpolate_interpolateConnect(t *testing.T) { }}, }, Meta: map[string]string{"${meta1}": "${meta2}"}, - KillTimeout: helper.TimeToPtr(1 * time.Second), + KillTimeout: pointer.Of(1 * time.Second), LogConfig: &structs.LogConfig{ MaxFiles: 1, MaxFileSizeMB: 2, }, - ShutdownDelay: helper.TimeToPtr(2 * time.Second), + ShutdownDelay: pointer.Of(2 * time.Second), KillSignal: "${signal1}", }, Gateway: &structs.ConsulGateway{ Proxy: &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(3 * time.Second), + ConnectTimeout: pointer.Of(3 * time.Second), EnvoyGatewayBindTaggedAddresses: true, EnvoyGatewayBindAddresses: map[string]*structs.ConsulGatewayBindAddress{ "${bind1}": { @@ -390,17 +390,17 @@ func TestInterpolate_interpolateConnect(t *testing.T) { }}, }, Meta: map[string]string{"_meta1": "_meta2"}, - KillTimeout: helper.TimeToPtr(1 * time.Second), + KillTimeout: pointer.Of(1 * time.Second), LogConfig: &structs.LogConfig{ MaxFiles: 1, MaxFileSizeMB: 2, }, - ShutdownDelay: helper.TimeToPtr(2 * time.Second), + ShutdownDelay: pointer.Of(2 * time.Second), KillSignal: "_signal1", }, Gateway: &structs.ConsulGateway{ Proxy: &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(3 * time.Second), + ConnectTimeout: pointer.Of(3 * time.Second), EnvoyGatewayBindTaggedAddresses: true, EnvoyGatewayBindAddresses: map[string]*structs.ConsulGatewayBindAddress{ "_bind1": { diff --git a/command/agent/agent_endpoint_test.go b/command/agent/agent_endpoint_test.go index 85bc61d44..0572b84ac 100644 --- a/command/agent/agent_endpoint_test.go +++ b/command/agent/agent_endpoint_test.go @@ -22,7 +22,7 @@ import ( "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/pool" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -455,7 +455,7 @@ func TestHTTP_AgentMonitor(t *testing.T) { func TestAgent_PprofRequest_Permissions(t *testing.T) { ci.Parallel(t) - trueP, falseP := helper.BoolToPtr(true), helper.BoolToPtr(false) + trueP, falseP := pointer.Of(true), pointer.Of(false) cases := []struct { acl *bool debug *bool @@ -463,7 +463,7 @@ func TestAgent_PprofRequest_Permissions(t *testing.T) { }{ // manually set to false because test helpers // enable to true by default - // enableDebug: helper.BoolToPtr(false), + // enableDebug: pointer.Of(false), {debug: nil, ok: false}, {debug: trueP, ok: true}, {debug: falseP, ok: false}, diff --git a/command/agent/agent_test.go b/command/agent/agent_test.go index 0aaab6432..f01063124 100644 --- a/command/agent/agent_test.go +++ b/command/agent/agent_test.go @@ -15,7 +15,7 @@ import ( "github.com/hashicorp/nomad/ci" cstructs "github.com/hashicorp/nomad/client/structs" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs/config" @@ -259,7 +259,7 @@ func TestAgent_ServerConfig_Limits_Error(t *testing.T) { expectedErr: "rpc_handshake_timeout must be >= 0", limits: config.Limits{ RPCHandshakeTimeout: "-5s", - RPCMaxConnsPerClient: helper.IntToPtr(100), + RPCMaxConnsPerClient: pointer.Of(100), }, }, { @@ -267,7 +267,7 @@ func TestAgent_ServerConfig_Limits_Error(t *testing.T) { expectedErr: "error parsing rpc_handshake_timeout", limits: config.Limits{ RPCHandshakeTimeout: "s", - RPCMaxConnsPerClient: helper.IntToPtr(100), + RPCMaxConnsPerClient: pointer.Of(100), }, }, { @@ -275,7 +275,7 @@ func TestAgent_ServerConfig_Limits_Error(t *testing.T) { expectedErr: "error parsing rpc_handshake_timeout", limits: config.Limits{ RPCHandshakeTimeout: "", - RPCMaxConnsPerClient: helper.IntToPtr(100), + RPCMaxConnsPerClient: pointer.Of(100), }, }, { @@ -283,7 +283,7 @@ func TestAgent_ServerConfig_Limits_Error(t *testing.T) { expectedErr: "rpc_max_conns_per_client must be > 25; found: -100", limits: config.Limits{ RPCHandshakeTimeout: "5s", - RPCMaxConnsPerClient: helper.IntToPtr(-100), + RPCMaxConnsPerClient: pointer.Of(-100), }, }, { @@ -291,7 +291,7 @@ func TestAgent_ServerConfig_Limits_Error(t *testing.T) { expectedErr: "rpc_max_conns_per_client must be > 25; found: 20", limits: config.Limits{ RPCHandshakeTimeout: "5s", - RPCMaxConnsPerClient: helper.IntToPtr(config.LimitsNonStreamingConnsPerClient), + RPCMaxConnsPerClient: pointer.Of(config.LimitsNonStreamingConnsPerClient), }, }, } @@ -335,21 +335,21 @@ func TestAgent_ServerConfig_Limits_OK(t *testing.T) { name: "Zeros are valid", limits: config.Limits{ RPCHandshakeTimeout: "0s", - RPCMaxConnsPerClient: helper.IntToPtr(0), + RPCMaxConnsPerClient: pointer.Of(0), }, }, { name: "Low limits are valid", limits: config.Limits{ RPCHandshakeTimeout: "1ms", - RPCMaxConnsPerClient: helper.IntToPtr(26), + RPCMaxConnsPerClient: pointer.Of(26), }, }, { name: "High limits are valid", limits: config.Limits{ RPCHandshakeTimeout: "5h", - RPCMaxConnsPerClient: helper.IntToPtr(100000), + RPCMaxConnsPerClient: pointer.Of(100000), }, }, } @@ -389,12 +389,12 @@ func TestAgent_ServerConfig_PlanRejectionTracker(t *testing.T) { { name: "valid config", trackerConfig: &PlanRejectionTracker{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), NodeThreshold: 123, NodeWindow: 17 * time.Minute, }, expectedConfig: &PlanRejectionTracker{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), NodeThreshold: 123, NodeWindow: 17 * time.Minute, }, @@ -466,7 +466,7 @@ func TestAgent_ServerConfig_RaftMultiplier_Ok(t *testing.T) { }, { - multiplier: helper.IntToPtr(0), + multiplier: pointer.Of(0), electionTimout: 1 * time.Second, heartbeatTimeout: 1 * time.Second, @@ -474,7 +474,7 @@ func TestAgent_ServerConfig_RaftMultiplier_Ok(t *testing.T) { commitTimeout: 50 * time.Millisecond, }, { - multiplier: helper.IntToPtr(1), + multiplier: pointer.Of(1), electionTimout: 1 * time.Second, heartbeatTimeout: 1 * time.Second, @@ -482,7 +482,7 @@ func TestAgent_ServerConfig_RaftMultiplier_Ok(t *testing.T) { commitTimeout: 50 * time.Millisecond, }, { - multiplier: helper.IntToPtr(5), + multiplier: pointer.Of(5), electionTimout: 5 * time.Second, heartbeatTimeout: 5 * time.Second, @@ -490,7 +490,7 @@ func TestAgent_ServerConfig_RaftMultiplier_Ok(t *testing.T) { commitTimeout: 250 * time.Millisecond, }, { - multiplier: helper.IntToPtr(6), + multiplier: pointer.Of(6), electionTimout: 6 * time.Second, heartbeatTimeout: 6 * time.Second, @@ -498,7 +498,7 @@ func TestAgent_ServerConfig_RaftMultiplier_Ok(t *testing.T) { commitTimeout: 300 * time.Millisecond, }, { - multiplier: helper.IntToPtr(10), + multiplier: pointer.Of(10), electionTimout: 10 * time.Second, heartbeatTimeout: 10 * time.Second, @@ -622,7 +622,7 @@ func TestAgent_ClientConfig(t *testing.T) { // Test the default, and then custom setting of the client service // discovery boolean. require.True(t, c.NomadServiceDiscovery) - conf.Client.NomadServiceDiscovery = helper.BoolToPtr(false) + conf.Client.NomadServiceDiscovery = pointer.Of(false) c, err = a.clientConfig() require.NoError(t, err) require.False(t, c.NomadServiceDiscovery) @@ -674,7 +674,7 @@ func TestAgent_HTTPCheck(t *testing.T) { AdvertiseAddrs: &AdvertiseAddrs{HTTP: "advertise:4646"}, normalizedAddrs: &NormalizedAddrs{HTTP: []string{"normalized:4646"}}, Consul: &config.ConsulConfig{ - ChecksUseAdvertise: helper.BoolToPtr(false), + ChecksUseAdvertise: pointer.Of(false), }, TLSConfig: &config.TLSConfig{EnableHTTP: false}, }, @@ -703,7 +703,7 @@ func TestAgent_HTTPCheck(t *testing.T) { t.Run("Plain HTTP + ChecksUseAdvertise", func(t *testing.T) { a := agent() - a.config.Consul.ChecksUseAdvertise = helper.BoolToPtr(true) + a.config.Consul.ChecksUseAdvertise = pointer.Of(true) check := a.agentHTTPCheck(false) if check == nil { t.Fatalf("expected non-nil check") diff --git a/command/agent/alloc_endpoint_test.go b/command/agent/alloc_endpoint_test.go index cfae04dd9..f85cf296f 100644 --- a/command/agent/alloc_endpoint_test.go +++ b/command/agent/alloc_endpoint_test.go @@ -17,7 +17,7 @@ import ( "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -724,7 +724,7 @@ func TestHTTP_AllocSnapshot_Atomic(t *testing.T) { ci.Parallel(t) httpTest(t, func(c *Config) { // Disable the schedulers - c.Server.NumSchedulers = helper.IntToPtr(0) + c.Server.NumSchedulers = pointer.Of(0) }, func(s *TestAgent) { // Create an alloc state := s.server.State() diff --git a/command/agent/command_test.go b/command/agent/command_test.go index 7d221966c..9ce1a0a5f 100644 --- a/command/agent/command_test.go +++ b/command/agent/command_test.go @@ -8,7 +8,7 @@ import ( "testing" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/mitchellh/cli" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -394,7 +394,7 @@ func TestIsValidConfig(t *testing.T) { Client: &ClientConfig{ Enabled: true, Artifact: &config.ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("-10m"), + HTTPReadTimeout: pointer.Of("-10m"), }, }, }, diff --git a/command/agent/config.go b/command/agent/config.go index 93084a1ea..67f748ae4 100644 --- a/command/agent/config.go +++ b/command/agent/config.go @@ -21,7 +21,7 @@ import ( "github.com/hashicorp/go-sockaddr/template" client "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/fingerprint" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs/config" @@ -974,7 +974,7 @@ func DevConfig(mode *devModeConfig) *Config { conf.Server.BootstrapExpect = 1 conf.EnableDebug = true conf.DisableAnonymousSignature = true - conf.Consul.AutoAdvertise = helper.BoolToPtr(true) + conf.Consul.AutoAdvertise = pointer.Of(true) conf.Client.NetworkInterface = mode.iface conf.Client.Options = map[string]string{ "driver.raw_exec.enable": "true", @@ -990,7 +990,7 @@ func DevConfig(mode *devModeConfig) *Config { } conf.Client.Options[fingerprint.TightenNetworkTimeoutsConfig] = "true" conf.Client.BindWildcardDefaultHostNetwork = true - conf.Client.NomadServiceDiscovery = helper.BoolToPtr(true) + conf.Client.NomadServiceDiscovery = pointer.Of(true) conf.Telemetry.PrometheusMetrics = true conf.Telemetry.PublishAllocationMetrics = true conf.Telemetry.PublishNodeMetrics = true @@ -1028,7 +1028,7 @@ func DefaultConfig() *Config { GCDiskUsageThreshold: 80, GCInodeUsageThreshold: 70, GCMaxAllocs: 50, - NoHostUUID: helper.BoolToPtr(true), + NoHostUUID: pointer.Of(true), DisableRemoteExec: false, ServerJoin: &ServerJoin{ RetryJoin: []string{}, @@ -1042,17 +1042,17 @@ func DefaultConfig() *Config { BindWildcardDefaultHostNetwork: true, CNIPath: "/opt/cni/bin", CNIConfigDir: "/opt/cni/config", - NomadServiceDiscovery: helper.BoolToPtr(true), + NomadServiceDiscovery: pointer.Of(true), Artifact: config.DefaultArtifactConfig(), }, Server: &ServerConfig{ Enabled: false, - EnableEventBroker: helper.BoolToPtr(true), - EventBufferSize: helper.IntToPtr(100), + EnableEventBroker: pointer.Of(true), + EventBufferSize: pointer.Of(100), RaftProtocol: 3, StartJoin: []string{}, PlanRejectionTracker: &PlanRejectionTracker{ - Enabled: helper.BoolToPtr(false), + Enabled: pointer.Of(false), NodeThreshold: 100, NodeWindow: 5 * time.Minute, }, @@ -1083,7 +1083,7 @@ func DefaultConfig() *Config { Version: version.GetVersion(), Autopilot: config.DefaultAutopilotConfig(), Audit: &config.AuditConfig{}, - DisableUpdateCheck: helper.BoolToPtr(false), + DisableUpdateCheck: pointer.Of(false), Limits: config.DefaultLimits(), } } @@ -1169,7 +1169,7 @@ func (c *Config) Merge(b *Config) *Config { result.SyslogFacility = b.SyslogFacility } if b.DisableUpdateCheck != nil { - result.DisableUpdateCheck = helper.BoolToPtr(*b.DisableUpdateCheck) + result.DisableUpdateCheck = pointer.Of(*b.DisableUpdateCheck) } if b.DisableAnonymousSignature { result.DisableAnonymousSignature = true @@ -1574,7 +1574,7 @@ func (s *ServerConfig) Merge(b *ServerConfig) *ServerConfig { result.RaftMultiplier = &c } if b.NumSchedulers != nil { - result.NumSchedulers = helper.IntToPtr(*b.NumSchedulers) + result.NumSchedulers = pointer.Of(*b.NumSchedulers) } if b.NodeGCThreshold != "" { result.NodeGCThreshold = b.NodeGCThreshold diff --git a/command/agent/config_parse_test.go b/command/agent/config_parse_test.go index 5e5a1399c..88d566787 100644 --- a/command/agent/config_parse_test.go +++ b/command/agent/config_parse_test.go @@ -8,7 +8,7 @@ import ( "time" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs/config" "github.com/stretchr/testify/require" @@ -82,7 +82,7 @@ var basicConfig = &Config{ GCDiskUsageThreshold: 82, GCInodeUsageThreshold: 91, GCMaxAllocs: 50, - NoHostUUID: helper.BoolToPtr(false), + NoHostUUID: pointer.Of(false), DisableRemoteExec: true, HostVolumes: []*structs.ClientHostVolumeConfig{ {Name: "tmp", Path: "/tmp"}, @@ -97,8 +97,8 @@ var basicConfig = &Config{ BootstrapExpect: 5, DataDir: "/tmp/data", RaftProtocol: 3, - RaftMultiplier: helper.IntToPtr(4), - NumSchedulers: helper.IntToPtr(2), + RaftMultiplier: pointer.Of(4), + NumSchedulers: pointer.Of(2), EnabledSchedulers: []string{"test"}, NodeGCThreshold: "12h", EvalGCThreshold: "12h", @@ -124,10 +124,10 @@ var basicConfig = &Config{ RedundancyZone: "foo", UpgradeVersion: "0.8.0", EncryptKey: "abc", - EnableEventBroker: helper.BoolToPtr(false), - EventBufferSize: helper.IntToPtr(200), + EnableEventBroker: pointer.Of(false), + EventBufferSize: pointer.Of(200), PlanRejectionTracker: &PlanRejectionTracker{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), NodeThreshold: 100, NodeWindow: 41 * time.Minute, NodeWindowHCL: "41m", @@ -157,7 +157,7 @@ var basicConfig = &Config{ ReplicationToken: "foobar", }, Audit: &config.AuditConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Sinks: []*config.AuditSink{ { DeliveryGuarantee: "enforced", @@ -196,7 +196,7 @@ var basicConfig = &Config{ LeaveOnTerm: true, EnableSyslog: true, SyslogFacility: "LOCAL1", - DisableUpdateCheck: helper.BoolToPtr(true), + DisableUpdateCheck: pointer.Of(true), DisableAnonymousSignature: true, Consul: &config.ConsulConfig{ ServerServiceName: "nomad", @@ -639,7 +639,7 @@ var sample0 = &Config{ Enabled: true, }, Audit: &config.AuditConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Sinks: []*config.AuditSink{ { DeliveryGuarantee: "enforced", @@ -677,11 +677,11 @@ var sample0 = &Config{ SyslogFacility: "LOCAL0", Consul: &config.ConsulConfig{ Token: "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee", - ServerAutoJoin: helper.BoolToPtr(false), - ClientAutoJoin: helper.BoolToPtr(false), + ServerAutoJoin: pointer.Of(false), + ClientAutoJoin: pointer.Of(false), }, Vault: &config.VaultConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Role: "nomad-cluster", Addr: "http://host.example.com:8200", }, @@ -694,7 +694,7 @@ var sample0 = &Config{ KeyFile: "/opt/data/nomad/certs/server-key.pem", }, Autopilot: &config.AutopilotConfig{ - CleanupDeadServers: helper.BoolToPtr(true), + CleanupDeadServers: pointer.Of(true), }, } @@ -734,7 +734,7 @@ var sample1 = &Config{ Enabled: true, }, Audit: &config.AuditConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Sinks: []*config.AuditSink{ { Name: "file", @@ -771,13 +771,13 @@ var sample1 = &Config{ EnableSyslog: true, SyslogFacility: "LOCAL0", Consul: &config.ConsulConfig{ - EnableSSL: helper.BoolToPtr(true), + EnableSSL: pointer.Of(true), Token: "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee", - ServerAutoJoin: helper.BoolToPtr(false), - ClientAutoJoin: helper.BoolToPtr(false), + ServerAutoJoin: pointer.Of(false), + ClientAutoJoin: pointer.Of(false), }, Vault: &config.VaultConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Role: "nomad-cluster", Addr: "http://host.example.com:8200", }, @@ -790,7 +790,7 @@ var sample1 = &Config{ KeyFile: "/opt/data/nomad/certs/server-key.pem", }, Autopilot: &config.AutopilotConfig{ - CleanupDeadServers: helper.BoolToPtr(true), + CleanupDeadServers: pointer.Of(true), }, } diff --git a/command/agent/config_test.go b/command/agent/config_test.go index cfbb4699a..22a1747fd 100644 --- a/command/agent/config_test.go +++ b/command/agent/config_test.go @@ -16,8 +16,8 @@ import ( "github.com/hashicorp/nomad/ci" client "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/testutil" - "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/freeport" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs/config" "github.com/stretchr/testify/require" @@ -62,7 +62,7 @@ func TestConfig_Merge(t *testing.T) { LeaveOnTerm: false, EnableSyslog: false, SyslogFacility: "local0.info", - DisableUpdateCheck: helper.BoolToPtr(false), + DisableUpdateCheck: pointer.Of(false), DisableAnonymousSignature: false, BindAddr: "127.0.0.1", Telemetry: &Telemetry{ @@ -88,7 +88,7 @@ func TestConfig_Merge(t *testing.T) { PrefixFilter: []string{"filter1", "filter2"}, }, Audit: &config.AuditConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Sinks: []*config.AuditSink{ { DeliveryGuarantee: "enforced", @@ -129,7 +129,7 @@ func TestConfig_Merge(t *testing.T) { DiskMB: 10, ReservedPorts: "1,10-30,55", }, - NomadServiceDiscovery: helper.BoolToPtr(false), + NomadServiceDiscovery: pointer.Of(false), }, Server: &ServerConfig{ Enabled: false, @@ -138,18 +138,18 @@ func TestConfig_Merge(t *testing.T) { DataDir: "/tmp/data1", ProtocolVersion: 1, RaftProtocol: 1, - RaftMultiplier: helper.IntToPtr(5), - NumSchedulers: helper.IntToPtr(1), + RaftMultiplier: pointer.Of(5), + NumSchedulers: pointer.Of(1), NodeGCThreshold: "1h", HeartbeatGrace: 30 * time.Second, MinHeartbeatTTL: 30 * time.Second, MaxHeartbeatsPerSecond: 30.0, RedundancyZone: "foo", UpgradeVersion: "foo", - EnableEventBroker: helper.BoolToPtr(false), - EventBufferSize: helper.IntToPtr(0), + EnableEventBroker: pointer.Of(false), + EventBufferSize: pointer.Of(0), PlanRejectionTracker: &PlanRejectionTracker{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), NodeThreshold: 100, NodeWindow: 11 * time.Minute, }, @@ -241,11 +241,11 @@ func TestConfig_Merge(t *testing.T) { LeaveOnTerm: true, EnableSyslog: true, SyslogFacility: "local0.debug", - DisableUpdateCheck: helper.BoolToPtr(true), + DisableUpdateCheck: pointer.Of(true), DisableAnonymousSignature: true, BindAddr: "127.0.0.2", Audit: &config.AuditConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Sinks: []*config.AuditSink{ { DeliveryGuarantee: "enforced", @@ -284,7 +284,7 @@ func TestConfig_Merge(t *testing.T) { CirconusBrokerSelectTag: "dc:dc2", PrefixFilter: []string{"prefix1", "prefix2"}, DisableDispatchedJobSummaryMetrics: true, - FilterDefault: helper.BoolToPtr(false), + FilterDefault: pointer.Of(false), }, Client: &ClientConfig{ Enabled: true, @@ -323,7 +323,7 @@ func TestConfig_Merge(t *testing.T) { GCParallelDestroys: 6, GCDiskUsageThreshold: 71, GCInodeUsageThreshold: 86, - NomadServiceDiscovery: helper.BoolToPtr(false), + NomadServiceDiscovery: pointer.Of(false), }, Server: &ServerConfig{ Enabled: true, @@ -332,8 +332,8 @@ func TestConfig_Merge(t *testing.T) { DataDir: "/tmp/data2", ProtocolVersion: 2, RaftProtocol: 2, - RaftMultiplier: helper.IntToPtr(6), - NumSchedulers: helper.IntToPtr(2), + RaftMultiplier: pointer.Of(6), + NumSchedulers: pointer.Of(2), EnabledSchedulers: []string{structs.JobTypeBatch}, NodeGCThreshold: "12h", HeartbeatGrace: 2 * time.Minute, @@ -346,10 +346,10 @@ func TestConfig_Merge(t *testing.T) { NonVotingServer: true, RedundancyZone: "bar", UpgradeVersion: "bar", - EnableEventBroker: helper.BoolToPtr(true), - EventBufferSize: helper.IntToPtr(100), + EnableEventBroker: pointer.Of(true), + EventBufferSize: pointer.Of(100), PlanRejectionTracker: &PlanRejectionTracker{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), NodeThreshold: 100, NodeWindow: 11 * time.Minute, }, @@ -1364,8 +1364,8 @@ func TestEventBroker_Parse(t *testing.T) { require := require.New(t) { a := &ServerConfig{ - EnableEventBroker: helper.BoolToPtr(false), - EventBufferSize: helper.IntToPtr(0), + EnableEventBroker: pointer.Of(false), + EventBufferSize: pointer.Of(0), } b := DefaultConfig().Server b.EnableEventBroker = nil @@ -1378,8 +1378,8 @@ func TestEventBroker_Parse(t *testing.T) { { a := &ServerConfig{ - EnableEventBroker: helper.BoolToPtr(true), - EventBufferSize: helper.IntToPtr(5000), + EnableEventBroker: pointer.Of(true), + EventBufferSize: pointer.Of(5000), } b := DefaultConfig().Server b.EnableEventBroker = nil @@ -1392,12 +1392,12 @@ func TestEventBroker_Parse(t *testing.T) { { a := &ServerConfig{ - EnableEventBroker: helper.BoolToPtr(false), - EventBufferSize: helper.IntToPtr(0), + EnableEventBroker: pointer.Of(false), + EventBufferSize: pointer.Of(0), } b := DefaultConfig().Server - b.EnableEventBroker = helper.BoolToPtr(true) - b.EventBufferSize = helper.IntToPtr(20000) + b.EnableEventBroker = pointer.Of(true) + b.EventBufferSize = pointer.Of(20000) result := a.Merge(b) require.Equal(true, *result.EnableEventBroker) diff --git a/command/agent/consul/connect_test.go b/command/agent/consul/connect_test.go index 53a71f6ce..356587c6d 100644 --- a/command/agent/consul/connect_test.go +++ b/command/agent/consul/connect_test.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" @@ -501,7 +501,7 @@ func TestConnect_newConnectGateway(t *testing.T) { result := newConnectGateway(&structs.ConsulConnect{ Gateway: &structs.ConsulGateway{ Proxy: &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(1 * time.Second), + ConnectTimeout: pointer.Of(1 * time.Second), EnvoyGatewayBindTaggedAddresses: false, EnvoyGatewayBindAddresses: nil, EnvoyGatewayNoDefaultBind: false, @@ -531,7 +531,7 @@ func TestConnect_newConnectGateway(t *testing.T) { result := newConnectGateway(&structs.ConsulConnect{ Gateway: &structs.ConsulGateway{ Proxy: &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(1 * time.Second), + ConnectTimeout: pointer.Of(1 * time.Second), EnvoyGatewayBindTaggedAddresses: true, EnvoyGatewayBindAddresses: map[string]*structs.ConsulGatewayBindAddress{ "service1": { diff --git a/command/agent/http_test.go b/command/agent/http_test.go index 3801e1f75..36189b253 100644 --- a/command/agent/http_test.go +++ b/command/agent/http_test.go @@ -23,7 +23,7 @@ import ( "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -593,11 +593,11 @@ func TestParseBool(t *testing.T) { }, { Input: "true", - Expected: helper.BoolToPtr(true), + Expected: pointer.Of(true), }, { Input: "false", - Expected: helper.BoolToPtr(false), + Expected: pointer.Of(false), }, { Input: "1234", @@ -640,11 +640,11 @@ func Test_parseInt(t *testing.T) { }, { Input: "13", - Expected: helper.IntToPtr(13), + Expected: pointer.Of(13), }, { Input: "99", - Expected: helper.IntToPtr(99), + Expected: pointer.Of(99), }, { Input: "ten", @@ -979,13 +979,13 @@ func TestHTTPServer_Limits_Error(t *testing.T) { { tls: true, timeout: "5s", - limit: helper.IntToPtr(-1), + limit: pointer.Of(-1), expectedErr: "http_max_conns_per_client must be >= 0", }, { tls: false, timeout: "5s", - limit: helper.IntToPtr(-1), + limit: pointer.Of(-1), expectedErr: "http_max_conns_per_client must be >= 0", }, } @@ -1082,28 +1082,28 @@ func TestHTTPServer_Limits_OK(t *testing.T) { { tls: false, timeout: "0", - limit: helper.IntToPtr(2), + limit: pointer.Of(2), assertTimeout: false, assertLimit: true, }, { tls: true, timeout: "0", - limit: helper.IntToPtr(2), + limit: pointer.Of(2), assertTimeout: false, assertLimit: true, }, { tls: false, timeout: "5s", - limit: helper.IntToPtr(2), + limit: pointer.Of(2), assertTimeout: false, assertLimit: true, }, { tls: true, timeout: "5s", - limit: helper.IntToPtr(2), + limit: pointer.Of(2), assertTimeout: true, assertLimit: true, }, diff --git a/command/agent/job_endpoint_test.go b/command/agent/job_endpoint_test.go index cbf0a65a4..d10d94ace 100644 --- a/command/agent/job_endpoint_test.go +++ b/command/agent/job_endpoint_test.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/nomad/acl" api "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/assert" @@ -641,8 +641,8 @@ func TestHTTP_jobUpdate_systemScaling(t *testing.T) { httpTest(t, nil, func(s *TestAgent) { // Create the job job := MockJob() - job.Type = helper.StringToPtr("system") - job.TaskGroups[0].Scaling = &api.ScalingPolicy{Enabled: helper.BoolToPtr(true)} + job.Type = pointer.Of("system") + job.TaskGroups[0].Scaling = &api.ScalingPolicy{Enabled: pointer.Of(true)} args := api.JobRegisterRequest{ Job: job, WriteRequest: api.WriteRequest{ @@ -1150,7 +1150,7 @@ func TestHTTP_Job_ScaleTaskGroup(t *testing.T) { newCount := job.TaskGroups[0].Count + 1 scaleReq := &api.ScalingRequest{ - Count: helper.Int64ToPtr(int64(newCount)), + Count: pointer.Of(int64(newCount)), Message: "testing", Target: map[string]string{ "Job": job.ID, @@ -2043,7 +2043,7 @@ func TestJobs_ParsingWriteRequest(t *testing.T) { srv.agent = &Agent{config: &Config{Region: agentRegion}} job := &api.Job{ - Region: helper.StringToPtr(tc.jobRegion), + Region: pointer.Of(tc.jobRegion), Multiregion: tc.multiregion, } @@ -2167,7 +2167,7 @@ func TestJobs_RegionForJob(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { job := &api.Job{ - Region: helper.StringToPtr(tc.jobRegion), + Region: pointer.Of(tc.jobRegion), Multiregion: tc.multiregion, } requestRegion, jobRegion := regionForJob( @@ -2361,15 +2361,15 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { ci.Parallel(t) apiJob := &api.Job{ - Stop: helper.BoolToPtr(true), - Region: helper.StringToPtr("global"), - Namespace: helper.StringToPtr("foo"), - ID: helper.StringToPtr("foo"), - ParentID: helper.StringToPtr("lol"), - Name: helper.StringToPtr("name"), - Type: helper.StringToPtr("service"), - Priority: helper.IntToPtr(50), - AllAtOnce: helper.BoolToPtr(true), + Stop: pointer.Of(true), + Region: pointer.Of("global"), + Namespace: pointer.Of("foo"), + ID: pointer.Of("foo"), + ParentID: pointer.Of("lol"), + Name: pointer.Of("name"), + Type: pointer.Of("service"), + Priority: pointer.Of(50), + AllAtOnce: pointer.Of(true), Datacenters: []string{"dc1", "dc2"}, Constraints: []*api.Constraint{ { @@ -2383,23 +2383,23 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { LTarget: "a", RTarget: "b", Operand: "c", - Weight: helper.Int8ToPtr(50), + Weight: pointer.Of(int8(50)), }, }, Update: &api.UpdateStrategy{ - Stagger: helper.TimeToPtr(1 * time.Second), - MaxParallel: helper.IntToPtr(5), - HealthCheck: helper.StringToPtr(structs.UpdateStrategyHealthCheck_Manual), - MinHealthyTime: helper.TimeToPtr(1 * time.Minute), - HealthyDeadline: helper.TimeToPtr(3 * time.Minute), - ProgressDeadline: helper.TimeToPtr(3 * time.Minute), - AutoRevert: helper.BoolToPtr(false), - Canary: helper.IntToPtr(1), + Stagger: pointer.Of(1 * time.Second), + MaxParallel: pointer.Of(5), + HealthCheck: pointer.Of(structs.UpdateStrategyHealthCheck_Manual), + MinHealthyTime: pointer.Of(1 * time.Minute), + HealthyDeadline: pointer.Of(3 * time.Minute), + ProgressDeadline: pointer.Of(3 * time.Minute), + AutoRevert: pointer.Of(false), + Canary: pointer.Of(1), }, Spreads: []*api.Spread{ { Attribute: "${meta.rack}", - Weight: helper.Int8ToPtr(100), + Weight: pointer.Of(int8(100)), SpreadTarget: []*api.SpreadTarget{ { Value: "r1", @@ -2409,11 +2409,11 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { }, }, Periodic: &api.PeriodicConfig{ - Enabled: helper.BoolToPtr(true), - Spec: helper.StringToPtr("spec"), - SpecType: helper.StringToPtr("cron"), - ProhibitOverlap: helper.BoolToPtr(true), - TimeZone: helper.StringToPtr("test zone"), + Enabled: pointer.Of(true), + Spec: pointer.Of("spec"), + SpecType: pointer.Of("cron"), + ProhibitOverlap: pointer.Of(true), + TimeZone: pointer.Of("test zone"), }, ParameterizedJob: &api.ParameterizedJobConfig{ Payload: "payload", @@ -2426,13 +2426,13 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { }, Multiregion: &api.Multiregion{ Strategy: &api.MultiregionStrategy{ - MaxParallel: helper.IntToPtr(2), - OnFailure: helper.StringToPtr("fail_all"), + MaxParallel: pointer.Of(2), + OnFailure: pointer.Of("fail_all"), }, Regions: []*api.MultiregionRegion{ { Name: "west", - Count: helper.IntToPtr(1), + Count: pointer.Of(1), Datacenters: []string{"dc1", "dc2"}, Meta: map[string]string{"region_code": "W"}, }, @@ -2440,8 +2440,8 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { }, TaskGroups: []*api.TaskGroup{ { - Name: helper.StringToPtr("group1"), - Count: helper.IntToPtr(5), + Name: pointer.Of("group1"), + Count: pointer.Of(5), Constraints: []*api.Constraint{ { LTarget: "x", @@ -2454,33 +2454,33 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { LTarget: "x", RTarget: "y", Operand: "z", - Weight: helper.Int8ToPtr(100), + Weight: pointer.Of(int8(100)), }, }, RestartPolicy: &api.RestartPolicy{ - Interval: helper.TimeToPtr(1 * time.Second), - Attempts: helper.IntToPtr(5), - Delay: helper.TimeToPtr(10 * time.Second), - Mode: helper.StringToPtr("delay"), + Interval: pointer.Of(1 * time.Second), + Attempts: pointer.Of(5), + Delay: pointer.Of(10 * time.Second), + Mode: pointer.Of("delay"), }, ReschedulePolicy: &api.ReschedulePolicy{ - Interval: helper.TimeToPtr(12 * time.Hour), - Attempts: helper.IntToPtr(5), - DelayFunction: helper.StringToPtr("constant"), - Delay: helper.TimeToPtr(30 * time.Second), - Unlimited: helper.BoolToPtr(true), - MaxDelay: helper.TimeToPtr(20 * time.Minute), + Interval: pointer.Of(12 * time.Hour), + Attempts: pointer.Of(5), + DelayFunction: pointer.Of("constant"), + Delay: pointer.Of(30 * time.Second), + Unlimited: pointer.Of(true), + MaxDelay: pointer.Of(20 * time.Minute), }, Migrate: &api.MigrateStrategy{ - MaxParallel: helper.IntToPtr(12), - HealthCheck: helper.StringToPtr("task_events"), - MinHealthyTime: helper.TimeToPtr(12 * time.Hour), - HealthyDeadline: helper.TimeToPtr(12 * time.Hour), + MaxParallel: pointer.Of(12), + HealthCheck: pointer.Of("task_events"), + MinHealthyTime: pointer.Of(12 * time.Hour), + HealthyDeadline: pointer.Of(12 * time.Hour), }, Spreads: []*api.Spread{ { Attribute: "${node.datacenter}", - Weight: helper.Int8ToPtr(100), + Weight: pointer.Of(int8(100)), SpreadTarget: []*api.SpreadTarget{ { Value: "dc1", @@ -2490,16 +2490,16 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { }, }, EphemeralDisk: &api.EphemeralDisk{ - SizeMB: helper.IntToPtr(100), - Sticky: helper.BoolToPtr(true), - Migrate: helper.BoolToPtr(true), + SizeMB: pointer.Of(100), + Sticky: pointer.Of(true), + Migrate: pointer.Of(true), }, Update: &api.UpdateStrategy{ - HealthCheck: helper.StringToPtr(structs.UpdateStrategyHealthCheck_Checks), - MinHealthyTime: helper.TimeToPtr(2 * time.Minute), - HealthyDeadline: helper.TimeToPtr(5 * time.Minute), - ProgressDeadline: helper.TimeToPtr(5 * time.Minute), - AutoRevert: helper.BoolToPtr(true), + HealthCheck: pointer.Of(structs.UpdateStrategyHealthCheck_Checks), + MinHealthyTime: pointer.Of(2 * time.Minute), + HealthyDeadline: pointer.Of(5 * time.Minute), + ProgressDeadline: pointer.Of(5 * time.Minute), + AutoRevert: pointer.Of(true), }, Meta: map[string]string{ "key": "value", @@ -2523,7 +2523,7 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { }, CheckRestart: &api.CheckRestart{ Limit: 4, - Grace: helper.TimeToPtr(11 * time.Second), + Grace: pointer.Of(11 * time.Second), }, Checks: []api.ServiceCheck{ { @@ -2561,7 +2561,7 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { }, }, }, - MaxClientDisconnect: helper.TimeToPtr(30 * time.Second), + MaxClientDisconnect: pointer.Of(30 * time.Second), Tasks: []*api.Task{ { Name: "task1", @@ -2586,22 +2586,22 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { LTarget: "a", RTarget: "b", Operand: "c", - Weight: helper.Int8ToPtr(50), + Weight: pointer.Of(int8(50)), }, }, VolumeMounts: []*api.VolumeMount{ { - Volume: helper.StringToPtr("vol"), - Destination: helper.StringToPtr("dest"), - ReadOnly: helper.BoolToPtr(false), - PropagationMode: helper.StringToPtr("a"), + Volume: pointer.Of("vol"), + Destination: pointer.Of("dest"), + ReadOnly: pointer.Of(false), + PropagationMode: pointer.Of("a"), }, }, RestartPolicy: &api.RestartPolicy{ - Interval: helper.TimeToPtr(2 * time.Second), - Attempts: helper.IntToPtr(10), - Delay: helper.TimeToPtr(20 * time.Second), - Mode: helper.StringToPtr("delay"), + Interval: pointer.Of(2 * time.Second), + Attempts: pointer.Of(10), + Delay: pointer.Of(20 * time.Second), + Mode: pointer.Of("delay"), }, Services: []*api.Service{ { @@ -2616,7 +2616,7 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { }, CheckRestart: &api.CheckRestart{ Limit: 4, - Grace: helper.TimeToPtr(11 * time.Second), + Grace: pointer.Of(11 * time.Second), }, Checks: []api.ServiceCheck{ { @@ -2651,12 +2651,12 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { }, }, Resources: &api.Resources{ - CPU: helper.IntToPtr(100), - MemoryMB: helper.IntToPtr(10), + CPU: pointer.Of(100), + MemoryMB: pointer.Of(10), Networks: []*api.NetworkResource{ { IP: "10.10.11.1", - MBits: helper.IntToPtr(10), + MBits: pointer.Of(10), Hostname: "foobar", ReservedPorts: []api.Port{ { @@ -2675,7 +2675,7 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { Devices: []*api.RequestedDevice{ { Name: "nvidia/gpu", - Count: helper.Uint64ToPtr(4), + Count: pointer.Of(uint64(4)), Constraints: []*api.Constraint{ { LTarget: "x", @@ -2688,7 +2688,7 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { LTarget: "a", RTarget: "b", Operand: "c", - Weight: helper.Int8ToPtr(50), + Weight: pointer.Of(int8(50)), }, }, }, @@ -2701,46 +2701,46 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { Meta: map[string]string{ "lol": "code", }, - KillTimeout: helper.TimeToPtr(10 * time.Second), + KillTimeout: pointer.Of(10 * time.Second), KillSignal: "SIGQUIT", LogConfig: &api.LogConfig{ - MaxFiles: helper.IntToPtr(10), - MaxFileSizeMB: helper.IntToPtr(100), + MaxFiles: pointer.Of(10), + MaxFileSizeMB: pointer.Of(100), }, Artifacts: []*api.TaskArtifact{ { - GetterSource: helper.StringToPtr("source"), + GetterSource: pointer.Of("source"), GetterOptions: map[string]string{ "a": "b", }, - GetterMode: helper.StringToPtr("dir"), - RelativeDest: helper.StringToPtr("dest"), + GetterMode: pointer.Of("dir"), + RelativeDest: pointer.Of("dest"), }, }, Vault: &api.Vault{ - Namespace: helper.StringToPtr("ns1"), + Namespace: pointer.Of("ns1"), Policies: []string{"a", "b", "c"}, - Env: helper.BoolToPtr(true), - ChangeMode: helper.StringToPtr("c"), - ChangeSignal: helper.StringToPtr("sighup"), + Env: pointer.Of(true), + ChangeMode: pointer.Of("c"), + ChangeSignal: pointer.Of("sighup"), }, Templates: []*api.Template{ { - SourcePath: helper.StringToPtr("source"), - DestPath: helper.StringToPtr("dest"), - EmbeddedTmpl: helper.StringToPtr("embedded"), - ChangeMode: helper.StringToPtr("change"), - ChangeSignal: helper.StringToPtr("signal"), - Splay: helper.TimeToPtr(1 * time.Minute), - Perms: helper.StringToPtr("666"), - Uid: helper.IntToPtr(1000), - Gid: helper.IntToPtr(1000), - LeftDelim: helper.StringToPtr("abc"), - RightDelim: helper.StringToPtr("def"), - Envvars: helper.BoolToPtr(true), + SourcePath: pointer.Of("source"), + DestPath: pointer.Of("dest"), + EmbeddedTmpl: pointer.Of("embedded"), + ChangeMode: pointer.Of("change"), + ChangeSignal: pointer.Of("signal"), + Splay: pointer.Of(1 * time.Minute), + Perms: pointer.Of("666"), + Uid: pointer.Of(1000), + Gid: pointer.Of(1000), + LeftDelim: pointer.Of("abc"), + RightDelim: pointer.Of("def"), + Envvars: pointer.Of(true), Wait: &api.WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), }, }, }, @@ -2751,15 +2751,15 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { }, }, }, - ConsulToken: helper.StringToPtr("abc123"), - VaultToken: helper.StringToPtr("def456"), - VaultNamespace: helper.StringToPtr("ghi789"), - Status: helper.StringToPtr("status"), - StatusDescription: helper.StringToPtr("status_desc"), - Version: helper.Uint64ToPtr(10), - CreateIndex: helper.Uint64ToPtr(1), - ModifyIndex: helper.Uint64ToPtr(3), - JobModifyIndex: helper.Uint64ToPtr(5), + ConsulToken: pointer.Of("abc123"), + VaultToken: pointer.Of("def456"), + VaultNamespace: pointer.Of("ghi789"), + Status: pointer.Of("status"), + StatusDescription: pointer.Of("status_desc"), + Version: pointer.Of(uint64(10)), + CreateIndex: pointer.Of(uint64(1)), + ModifyIndex: pointer.Of(uint64(3)), + JobModifyIndex: pointer.Of(uint64(5)), } expected := &structs.Job{ @@ -2962,7 +2962,7 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { }, }, }, - MaxClientDisconnect: helper.TimeToPtr(30 * time.Second), + MaxClientDisconnect: pointer.Of(30 * time.Second), Tasks: []*structs.Task{ { Name: "task1", @@ -3146,8 +3146,8 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { RightDelim: "def", Envvars: true, Wait: &structs.WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), }, }, }, @@ -3168,15 +3168,15 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { require.Equal(t, expected, structsJob) systemAPIJob := &api.Job{ - Stop: helper.BoolToPtr(true), - Region: helper.StringToPtr("global"), - Namespace: helper.StringToPtr("foo"), - ID: helper.StringToPtr("foo"), - ParentID: helper.StringToPtr("lol"), - Name: helper.StringToPtr("name"), - Type: helper.StringToPtr("system"), - Priority: helper.IntToPtr(50), - AllAtOnce: helper.BoolToPtr(true), + Stop: pointer.Of(true), + Region: pointer.Of("global"), + Namespace: pointer.Of("foo"), + ID: pointer.Of("foo"), + ParentID: pointer.Of("lol"), + Name: pointer.Of("name"), + Type: pointer.Of("system"), + Priority: pointer.Of(50), + AllAtOnce: pointer.Of(true), Datacenters: []string{"dc1", "dc2"}, Constraints: []*api.Constraint{ { @@ -3187,8 +3187,8 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { }, TaskGroups: []*api.TaskGroup{ { - Name: helper.StringToPtr("group1"), - Count: helper.IntToPtr(5), + Name: pointer.Of("group1"), + Count: pointer.Of(5), Constraints: []*api.Constraint{ { LTarget: "x", @@ -3197,15 +3197,15 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { }, }, RestartPolicy: &api.RestartPolicy{ - Interval: helper.TimeToPtr(1 * time.Second), - Attempts: helper.IntToPtr(5), - Delay: helper.TimeToPtr(10 * time.Second), - Mode: helper.StringToPtr("delay"), + Interval: pointer.Of(1 * time.Second), + Attempts: pointer.Of(5), + Delay: pointer.Of(10 * time.Second), + Mode: pointer.Of("delay"), }, EphemeralDisk: &api.EphemeralDisk{ - SizeMB: helper.IntToPtr(100), - Sticky: helper.BoolToPtr(true), - Migrate: helper.BoolToPtr(true), + SizeMB: pointer.Of(100), + Sticky: pointer.Of(true), + Migrate: pointer.Of(true), }, Meta: map[string]string{ "key": "value", @@ -3233,12 +3233,12 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { }, }, Resources: &api.Resources{ - CPU: helper.IntToPtr(100), - MemoryMB: helper.IntToPtr(10), + CPU: pointer.Of(100), + MemoryMB: pointer.Of(10), Networks: []*api.NetworkResource{ { IP: "10.10.11.1", - MBits: helper.IntToPtr(10), + MBits: pointer.Of(10), ReservedPorts: []api.Port{ { Label: "http", @@ -3257,19 +3257,19 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { Meta: map[string]string{ "lol": "code", }, - KillTimeout: helper.TimeToPtr(10 * time.Second), + KillTimeout: pointer.Of(10 * time.Second), KillSignal: "SIGQUIT", LogConfig: &api.LogConfig{ - MaxFiles: helper.IntToPtr(10), - MaxFileSizeMB: helper.IntToPtr(100), + MaxFiles: pointer.Of(10), + MaxFileSizeMB: pointer.Of(100), }, Artifacts: []*api.TaskArtifact{ { - GetterSource: helper.StringToPtr("source"), + GetterSource: pointer.Of("source"), GetterOptions: map[string]string{"a": "b"}, GetterHeaders: map[string]string{"User-Agent": "nomad"}, - GetterMode: helper.StringToPtr("dir"), - RelativeDest: helper.StringToPtr("dest"), + GetterMode: pointer.Of("dir"), + RelativeDest: pointer.Of("dest"), }, }, DispatchPayload: &api.DispatchPayloadConfig{ @@ -3279,12 +3279,12 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { }, }, }, - Status: helper.StringToPtr("status"), - StatusDescription: helper.StringToPtr("status_desc"), - Version: helper.Uint64ToPtr(10), - CreateIndex: helper.Uint64ToPtr(1), - ModifyIndex: helper.Uint64ToPtr(3), - JobModifyIndex: helper.Uint64ToPtr(5), + Status: pointer.Of("status"), + StatusDescription: pointer.Of("status_desc"), + Version: pointer.Of(uint64(10)), + CreateIndex: pointer.Of(uint64(1)), + ModifyIndex: pointer.Of(uint64(3)), + JobModifyIndex: pointer.Of(uint64(5)), } expectedSystemJob := &structs.Job{ @@ -3415,26 +3415,26 @@ func TestJobs_ApiJobToStructsJobUpdate(t *testing.T) { apiJob := &api.Job{ Update: &api.UpdateStrategy{ - Stagger: helper.TimeToPtr(1 * time.Second), - MaxParallel: helper.IntToPtr(5), - HealthCheck: helper.StringToPtr(structs.UpdateStrategyHealthCheck_Manual), - MinHealthyTime: helper.TimeToPtr(1 * time.Minute), - HealthyDeadline: helper.TimeToPtr(3 * time.Minute), - ProgressDeadline: helper.TimeToPtr(3 * time.Minute), - AutoRevert: helper.BoolToPtr(false), + Stagger: pointer.Of(1 * time.Second), + MaxParallel: pointer.Of(5), + HealthCheck: pointer.Of(structs.UpdateStrategyHealthCheck_Manual), + MinHealthyTime: pointer.Of(1 * time.Minute), + HealthyDeadline: pointer.Of(3 * time.Minute), + ProgressDeadline: pointer.Of(3 * time.Minute), + AutoRevert: pointer.Of(false), AutoPromote: nil, - Canary: helper.IntToPtr(1), + Canary: pointer.Of(1), }, TaskGroups: []*api.TaskGroup{ { Update: &api.UpdateStrategy{ - Canary: helper.IntToPtr(2), - AutoRevert: helper.BoolToPtr(true), + Canary: pointer.Of(2), + AutoRevert: pointer.Of(true), }, }, { Update: &api.UpdateStrategy{ - Canary: helper.IntToPtr(3), - AutoPromote: helper.BoolToPtr(true), + Canary: pointer.Of(3), + AutoPromote: pointer.Of(true), }, }, }, @@ -3510,16 +3510,16 @@ func TestHTTP_JobValidate_SystemMigrate(t *testing.T) { httpTest(t, nil, func(s *TestAgent) { // Create the job job := &api.Job{ - Region: helper.StringToPtr("global"), + Region: pointer.Of("global"), Datacenters: []string{"dc1"}, - ID: helper.StringToPtr("systemmigrate"), - Name: helper.StringToPtr("systemmigrate"), + ID: pointer.Of("systemmigrate"), + Name: pointer.Of("systemmigrate"), TaskGroups: []*api.TaskGroup{ - {Name: helper.StringToPtr("web")}, + {Name: pointer.Of("web")}, }, // System job... - Type: helper.StringToPtr("system"), + Type: pointer.Of("system"), // ...with an empty migrate stanza Migrate: &api.MigrateStrategy{}, @@ -3549,7 +3549,7 @@ func TestHTTP_JobValidate_SystemMigrate(t *testing.T) { func TestConversion_dereferenceInt(t *testing.T) { ci.Parallel(t) require.Equal(t, 0, dereferenceInt(nil)) - require.Equal(t, 42, dereferenceInt(helper.IntToPtr(42))) + require.Equal(t, 42, dereferenceInt(pointer.Of(42))) } func TestConversion_apiLogConfigToStructs(t *testing.T) { @@ -3559,8 +3559,8 @@ func TestConversion_apiLogConfigToStructs(t *testing.T) { MaxFiles: 2, MaxFileSizeMB: 8, }, apiLogConfigToStructs(&api.LogConfig{ - MaxFiles: helper.IntToPtr(2), - MaxFileSizeMB: helper.IntToPtr(8), + MaxFiles: pointer.Of(2), + MaxFileSizeMB: pointer.Of(8), })) } @@ -3580,8 +3580,8 @@ func TestConversion_apiResourcesToStructs(t *testing.T) { { "plain", &api.Resources{ - CPU: helper.IntToPtr(100), - MemoryMB: helper.IntToPtr(200), + CPU: pointer.Of(100), + MemoryMB: pointer.Of(200), }, &structs.Resources{ CPU: 100, @@ -3591,9 +3591,9 @@ func TestConversion_apiResourcesToStructs(t *testing.T) { { "with memory max", &api.Resources{ - CPU: helper.IntToPtr(100), - MemoryMB: helper.IntToPtr(200), - MemoryMaxMB: helper.IntToPtr(300), + CPU: pointer.Of(100), + MemoryMB: pointer.Of(200), + MemoryMaxMB: pointer.Of(300), }, &structs.Resources{ CPU: 100, @@ -3644,14 +3644,14 @@ func TestConversion_apiConnectSidecarTaskToStructs(t *testing.T) { Config: config, Env: env, Resources: &api.Resources{ - CPU: helper.IntToPtr(1), - MemoryMB: helper.IntToPtr(128), + CPU: pointer.Of(1), + MemoryMB: pointer.Of(128), }, Meta: meta, KillTimeout: &timeout, LogConfig: &api.LogConfig{ - MaxFiles: helper.IntToPtr(2), - MaxFileSizeMB: helper.IntToPtr(8), + MaxFiles: pointer.Of(2), + MaxFileSizeMB: pointer.Of(8), }, ShutdownDelay: &delay, KillSignal: "SIGTERM", @@ -3783,7 +3783,7 @@ func TestConversion_ApiConsulConnectToStructs(t *testing.T) { require.Equal(t, &structs.ConsulConnect{ Gateway: &structs.ConsulGateway{ Proxy: &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(3 * time.Second), + ConnectTimeout: pointer.Of(3 * time.Second), EnvoyGatewayBindTaggedAddresses: true, EnvoyGatewayBindAddresses: map[string]*structs.ConsulGatewayBindAddress{ "service": { @@ -3800,7 +3800,7 @@ func TestConversion_ApiConsulConnectToStructs(t *testing.T) { }, ApiConsulConnectToStructs(&api.ConsulConnect{ Gateway: &api.ConsulGateway{ Proxy: &api.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(3 * time.Second), + ConnectTimeout: pointer.Of(3 * time.Second), EnvoyGatewayBindTaggedAddresses: true, EnvoyGatewayBindAddresses: map[string]*api.ConsulGatewayBindAddress{ "service": { diff --git a/command/agent/testingutils_test.go b/command/agent/testingutils_test.go index e553028cf..152effae3 100644 --- a/command/agent/testingutils_test.go +++ b/command/agent/testingutils_test.go @@ -4,18 +4,18 @@ import ( "time" "github.com/hashicorp/nomad/api" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" ) func MockJob() *api.Job { job := &api.Job{ - Region: helper.StringToPtr("global"), - ID: helper.StringToPtr(uuid.Generate()), - Name: helper.StringToPtr("my-job"), - Type: helper.StringToPtr("service"), - Priority: helper.IntToPtr(50), - AllAtOnce: helper.BoolToPtr(false), + Region: pointer.Of("global"), + ID: pointer.Of(uuid.Generate()), + Name: pointer.Of("my-job"), + Type: pointer.Of("service"), + Priority: pointer.Of(50), + AllAtOnce: pointer.Of(false), Datacenters: []string{"dc1"}, Constraints: []*api.Constraint{ { @@ -26,16 +26,16 @@ func MockJob() *api.Job { }, TaskGroups: []*api.TaskGroup{ { - Name: helper.StringToPtr("web"), - Count: helper.IntToPtr(10), + Name: pointer.Of("web"), + Count: pointer.Of(10), EphemeralDisk: &api.EphemeralDisk{ - SizeMB: helper.IntToPtr(150), + SizeMB: pointer.Of(150), }, RestartPolicy: &api.RestartPolicy{ - Attempts: helper.IntToPtr(3), - Interval: helper.TimeToPtr(10 * time.Minute), - Delay: helper.TimeToPtr(1 * time.Minute), - Mode: helper.StringToPtr("delay"), + Attempts: pointer.Of(3), + Interval: pointer.Of(10 * time.Minute), + Delay: pointer.Of(1 * time.Minute), + Mode: pointer.Of("delay"), }, Networks: []*api.NetworkResource{ { @@ -76,8 +76,8 @@ func MockJob() *api.Job { }, LogConfig: api.DefaultLogConfig(), Resources: &api.Resources{ - CPU: helper.IntToPtr(500), - MemoryMB: helper.IntToPtr(256), + CPU: pointer.Of(500), + MemoryMB: pointer.Of(256), }, Meta: map[string]string{ "foo": "bar", @@ -101,6 +101,6 @@ func MockJob() *api.Job { func MockRegionalJob() *api.Job { j := MockJob() - j.Region = helper.StringToPtr("north-america") + j.Region = pointer.Of("north-america") return j } diff --git a/command/helper_devices_test.go b/command/helper_devices_test.go index 062a4c1b1..17577d094 100644 --- a/command/helper_devices_test.go +++ b/command/helper_devices_test.go @@ -5,7 +5,7 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -31,12 +31,12 @@ func TestBuildDeviceStatsSummaryMap(t *testing.T) { InstanceStats: map[string]*api.DeviceStats{ "id1": { Summary: &api.StatValue{ - StringVal: helper.StringToPtr("stat1"), + StringVal: pointer.Of("stat1"), }, }, "id2": { Summary: &api.StatValue{ - IntNumeratorVal: helper.Int64ToPtr(2), + IntNumeratorVal: pointer.Of(int64(2)), }, }, }, @@ -47,12 +47,12 @@ func TestBuildDeviceStatsSummaryMap(t *testing.T) { InstanceStats: map[string]*api.DeviceStats{ "id1": { Summary: &api.StatValue{ - StringVal: helper.StringToPtr("stat3"), + StringVal: pointer.Of("stat3"), }, }, "id2": { Summary: &api.StatValue{ - IntNumeratorVal: helper.Int64ToPtr(4), + IntNumeratorVal: pointer.Of(int64(4)), }, }, }, @@ -61,16 +61,16 @@ func TestBuildDeviceStatsSummaryMap(t *testing.T) { expected := map[string]*api.StatValue{ "vendor1/type1/name1[id1]": { - StringVal: helper.StringToPtr("stat1"), + StringVal: pointer.Of("stat1"), }, "vendor1/type1/name1[id2]": { - IntNumeratorVal: helper.Int64ToPtr(2), + IntNumeratorVal: pointer.Of(int64(2)), }, "vendor2/type2[id1]": { - StringVal: helper.StringToPtr("stat3"), + StringVal: pointer.Of("stat3"), }, "vendor2/type2[id2]": { - IntNumeratorVal: helper.Int64ToPtr(4), + IntNumeratorVal: pointer.Of(int64(4)), }, } @@ -82,7 +82,7 @@ func TestFormatDeviceStats(t *testing.T) { statValue := func(v string) *api.StatValue { return &api.StatValue{ - StringVal: helper.StringToPtr(v), + StringVal: pointer.Of(v), } } @@ -145,12 +145,12 @@ func TestNodeStatusCommand_GetDeviceResourcesForNode(t *testing.T) { InstanceStats: map[string]*api.DeviceStats{ "id1": { Summary: &api.StatValue{ - StringVal: helper.StringToPtr("stat1"), + StringVal: pointer.Of("stat1"), }, }, "id2": { Summary: &api.StatValue{ - IntNumeratorVal: helper.Int64ToPtr(2), + IntNumeratorVal: pointer.Of(int64(2)), }, }, }, @@ -161,12 +161,12 @@ func TestNodeStatusCommand_GetDeviceResourcesForNode(t *testing.T) { InstanceStats: map[string]*api.DeviceStats{ "id1": { Summary: &api.StatValue{ - StringVal: helper.StringToPtr("stat3"), + StringVal: pointer.Of("stat3"), }, }, "id2": { Summary: &api.StatValue{ - IntNumeratorVal: helper.Int64ToPtr(4), + IntNumeratorVal: pointer.Of(int64(4)), }, }, }, @@ -219,12 +219,12 @@ func TestNodeStatusCommand_GetDeviceResources(t *testing.T) { InstanceStats: map[string]*api.DeviceStats{ "id1": { Summary: &api.StatValue{ - StringVal: helper.StringToPtr("stat1"), + StringVal: pointer.Of("stat1"), }, }, "id2": { Summary: &api.StatValue{ - IntNumeratorVal: helper.Int64ToPtr(2), + IntNumeratorVal: pointer.Of(int64(2)), }, }, }, @@ -235,12 +235,12 @@ func TestNodeStatusCommand_GetDeviceResources(t *testing.T) { InstanceStats: map[string]*api.DeviceStats{ "id1": { Summary: &api.StatValue{ - StringVal: helper.StringToPtr("stat3"), + StringVal: pointer.Of("stat3"), }, }, "id2": { Summary: &api.StatValue{ - IntNumeratorVal: helper.Int64ToPtr(4), + IntNumeratorVal: pointer.Of(int64(4)), }, }, }, @@ -267,11 +267,11 @@ func TestGetDeviceAttributes(t *testing.T) { Attributes: map[string]*api.Attribute{ "utilization": { - FloatVal: helper.Float64ToPtr(0.78), + FloatVal: pointer.Of(float64(0.78)), Unit: "%", }, "filesystem": { - StringVal: helper.StringToPtr("ext4"), + StringVal: pointer.Of("ext4"), }, }, } diff --git a/command/helpers_test.go b/command/helpers_test.go index 1c19f2dd1..f1e3b7436 100644 --- a/command/helpers_test.go +++ b/command/helpers_test.go @@ -14,8 +14,8 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/flatmap" + "github.com/hashicorp/nomad/helper/pointer" "github.com/kr/pretty" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" @@ -229,18 +229,18 @@ const ( var ( expectedApiJob = &api.Job{ - ID: helper.StringToPtr("job1"), - Name: helper.StringToPtr("job1"), - Type: helper.StringToPtr("service"), + ID: pointer.Of("job1"), + Name: pointer.Of("job1"), + Type: pointer.Of("service"), Datacenters: []string{"dc1"}, TaskGroups: []*api.TaskGroup{ { - Name: helper.StringToPtr("group1"), - Count: helper.IntToPtr(1), + Name: pointer.Of("group1"), + Count: pointer.Of(1), RestartPolicy: &api.RestartPolicy{ - Attempts: helper.IntToPtr(10), - Interval: helper.TimeToPtr(15 * time.Second), - Mode: helper.StringToPtr("delay"), + Attempts: pointer.Of(10), + Interval: pointer.Of(15 * time.Second), + Mode: pointer.Of("delay"), }, Tasks: []*api.Task{ diff --git a/command/job_periodic_force_test.go b/command/job_periodic_force_test.go index b7ffc19e0..bbae868b2 100644 --- a/command/job_periodic_force_test.go +++ b/command/job_periodic_force_test.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -136,10 +136,10 @@ func TestJobPeriodicForceCommand_SuccessfulPeriodicForceDetach(t *testing.T) { // Register a job j := testJob("job1_is_periodic") j.Periodic = &api.PeriodicConfig{ - SpecType: helper.StringToPtr(api.PeriodicSpecCron), - Spec: helper.StringToPtr("*/15 * * * * *"), - ProhibitOverlap: helper.BoolToPtr(true), - TimeZone: helper.StringToPtr("Europe/Minsk"), + SpecType: pointer.Of(api.PeriodicSpecCron), + Spec: pointer.Of("*/15 * * * * *"), + ProhibitOverlap: pointer.Of(true), + TimeZone: pointer.Of("Europe/Minsk"), } ui := cli.NewMockUi() @@ -178,10 +178,10 @@ func TestJobPeriodicForceCommand_SuccessfulPeriodicForce(t *testing.T) { // Register a job j := testJob("job2_is_periodic") j.Periodic = &api.PeriodicConfig{ - SpecType: helper.StringToPtr(api.PeriodicSpecCron), - Spec: helper.StringToPtr("*/15 * * * * *"), - ProhibitOverlap: helper.BoolToPtr(true), - TimeZone: helper.StringToPtr("Europe/Minsk"), + SpecType: pointer.Of(api.PeriodicSpecCron), + Spec: pointer.Of("*/15 * * * * *"), + ProhibitOverlap: pointer.Of(true), + TimeZone: pointer.Of("Europe/Minsk"), } ui := cli.NewMockUi() diff --git a/command/job_run.go b/command/job_run.go index 805680f84..425479029 100644 --- a/command/job_run.go +++ b/command/job_run.go @@ -10,7 +10,7 @@ import ( "time" "github.com/hashicorp/nomad/api" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/posener/complete" ) @@ -264,11 +264,11 @@ func (c *JobRunCommand) Run(args []string) int { } if consulToken != "" { - job.ConsulToken = helper.StringToPtr(consulToken) + job.ConsulToken = pointer.Of(consulToken) } if consulNamespace != "" { - job.ConsulNamespace = helper.StringToPtr(consulNamespace) + job.ConsulNamespace = pointer.Of(consulNamespace) } // Parse the Vault token @@ -278,11 +278,11 @@ func (c *JobRunCommand) Run(args []string) int { } if vaultToken != "" { - job.VaultToken = helper.StringToPtr(vaultToken) + job.VaultToken = pointer.Of(vaultToken) } if vaultNamespace != "" { - job.VaultNamespace = helper.StringToPtr(vaultNamespace) + job.VaultNamespace = pointer.Of(vaultNamespace) } if output { diff --git a/command/job_scale_test.go b/command/job_scale_test.go index d3726c55c..a7fd2a461 100644 --- a/command/job_scale_test.go +++ b/command/job_scale_test.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" ) @@ -83,17 +83,17 @@ func TestJobScaleCommand_MultiGroup(t *testing.T) { SetConfig("run_for", "5s"). SetConfig("exit_code", 0). Require(&api.Resources{ - MemoryMB: helper.IntToPtr(256), - CPU: helper.IntToPtr(100), + MemoryMB: pointer.Of(256), + CPU: pointer.Of(100), }). SetLogConfig(&api.LogConfig{ - MaxFiles: helper.IntToPtr(1), - MaxFileSizeMB: helper.IntToPtr(2), + MaxFiles: pointer.Of(1), + MaxFileSizeMB: pointer.Of(2), }) group2 := api.NewTaskGroup("group2", 1). AddTask(task). RequireDisk(&api.EphemeralDisk{ - SizeMB: helper.IntToPtr(20), + SizeMB: pointer.Of(20), }) job.AddTaskGroup(group2) diff --git a/command/job_scaling_events_test.go b/command/job_scaling_events_test.go index 530021280..eb062d277 100644 --- a/command/job_scaling_events_test.go +++ b/command/job_scaling_events_test.go @@ -6,7 +6,7 @@ import ( "testing" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" ) @@ -62,7 +62,7 @@ func TestJobScalingEventsCommand_Run(t *testing.T) { // Perform a scaling action to generate an event. _, _, err = client.Jobs().Scale( "scale_events_test_job", - "group1", helper.IntToPtr(2), + "group1", pointer.Of(2), "searchable custom test message", false, nil, nil) if err != nil { t.Fatalf("err: %s", err) diff --git a/command/job_validate.go b/command/job_validate.go index 1ddafee3f..258d1ee32 100644 --- a/command/job_validate.go +++ b/command/job_validate.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/command/agent" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/posener/complete" ) @@ -161,11 +161,11 @@ func (c *JobValidateCommand) Run(args []string) int { } if vaultToken != "" { - job.VaultToken = helper.StringToPtr(vaultToken) + job.VaultToken = pointer.Of(vaultToken) } if vaultNamespace != "" { - job.VaultNamespace = helper.StringToPtr(vaultNamespace) + job.VaultNamespace = pointer.Of(vaultNamespace) } // Check that the job is valid diff --git a/command/node_drain_test.go b/command/node_drain_test.go index 02b3f11e0..26eae57d7 100644 --- a/command/node_drain_test.go +++ b/command/node_drain_test.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" "github.com/posener/complete" @@ -49,12 +49,12 @@ func TestNodeDrainCommand_Detach(t *testing.T) { // Register a job to create an alloc to drain that will block draining job := &api.Job{ - ID: helper.StringToPtr("mock_service"), - Name: helper.StringToPtr("mock_service"), + ID: pointer.Of("mock_service"), + Name: pointer.Of("mock_service"), Datacenters: []string{"dc1"}, TaskGroups: []*api.TaskGroup{ { - Name: helper.StringToPtr("mock_group"), + Name: pointer.Of("mock_group"), Tasks: []*api.Task{ { Name: "mock_task", @@ -126,19 +126,19 @@ func TestNodeDrainCommand_Monitor(t *testing.T) { // Register a service job to create allocs to drain serviceCount := 3 job := &api.Job{ - ID: helper.StringToPtr("mock_service"), - Name: helper.StringToPtr("mock_service"), + ID: pointer.Of("mock_service"), + Name: pointer.Of("mock_service"), Datacenters: []string{"dc1"}, - Type: helper.StringToPtr("service"), + Type: pointer.Of("service"), TaskGroups: []*api.TaskGroup{ { - Name: helper.StringToPtr("mock_group"), + Name: pointer.Of("mock_group"), Count: &serviceCount, Migrate: &api.MigrateStrategy{ - MaxParallel: helper.IntToPtr(1), - HealthCheck: helper.StringToPtr("task_states"), - MinHealthyTime: helper.TimeToPtr(10 * time.Millisecond), - HealthyDeadline: helper.TimeToPtr(5 * time.Minute), + MaxParallel: pointer.Of(1), + HealthCheck: pointer.Of("task_states"), + MinHealthyTime: pointer.Of(10 * time.Millisecond), + HealthyDeadline: pointer.Of(5 * time.Minute), }, Tasks: []*api.Task{ { @@ -148,8 +148,8 @@ func TestNodeDrainCommand_Monitor(t *testing.T) { "run_for": "10m", }, Resources: &api.Resources{ - CPU: helper.IntToPtr(50), - MemoryMB: helper.IntToPtr(50), + CPU: pointer.Of(50), + MemoryMB: pointer.Of(50), }, }, }, @@ -162,14 +162,14 @@ func TestNodeDrainCommand_Monitor(t *testing.T) { // Register a system job to ensure it is ignored during draining sysjob := &api.Job{ - ID: helper.StringToPtr("mock_system"), - Name: helper.StringToPtr("mock_system"), + ID: pointer.Of("mock_system"), + Name: pointer.Of("mock_system"), Datacenters: []string{"dc1"}, - Type: helper.StringToPtr("system"), + Type: pointer.Of("system"), TaskGroups: []*api.TaskGroup{ { - Name: helper.StringToPtr("mock_sysgroup"), - Count: helper.IntToPtr(1), + Name: pointer.Of("mock_sysgroup"), + Count: pointer.Of(1), Tasks: []*api.Task{ { Name: "mock_systask", @@ -178,8 +178,8 @@ func TestNodeDrainCommand_Monitor(t *testing.T) { "run_for": "10m", }, Resources: &api.Resources{ - CPU: helper.IntToPtr(50), - MemoryMB: helper.IntToPtr(50), + CPU: pointer.Of(50), + MemoryMB: pointer.Of(50), }, }, }, diff --git a/command/node_status.go b/command/node_status.go index bb6702125..f212235ca 100644 --- a/command/node_status.go +++ b/command/node_status.go @@ -12,7 +12,7 @@ import ( humanize "github.com/dustin/go-humanize" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/api/contexts" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/posener/complete" ) @@ -942,9 +942,9 @@ func computeNodeTotalResources(node *api.Node) api.Resources { if res == nil { res = &api.Resources{} } - total.CPU = helper.IntToPtr(*r.CPU - *res.CPU) - total.MemoryMB = helper.IntToPtr(*r.MemoryMB - *res.MemoryMB) - total.DiskMB = helper.IntToPtr(*r.DiskMB - *res.DiskMB) + total.CPU = pointer.Of(*r.CPU - *res.CPU) + total.MemoryMB = pointer.Of(*r.MemoryMB - *res.MemoryMB) + total.DiskMB = pointer.Of(*r.DiskMB - *res.DiskMB) return total } diff --git a/command/operator_debug_test.go b/command/operator_debug_test.go index 04471ed7c..d986ac26a 100644 --- a/command/operator_debug_test.go +++ b/command/operator_debug_test.go @@ -19,6 +19,7 @@ import ( clienttest "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" @@ -884,7 +885,7 @@ func testServerWithoutLeader(t *testing.T, runClient bool, cb func(*agent.Config a := agent.NewTestAgent(t, t.Name(), func(config *agent.Config) { config.Client.Enabled = runClient config.Server.Enabled = true - config.Server.NumSchedulers = helper.IntToPtr(0) + config.Server.NumSchedulers = pointer.Of(0) config.Server.BootstrapExpect = 3 if cb != nil { diff --git a/command/quota_delete_test.go b/command/quota_delete_test.go index f8afb7079..69cb2f4d2 100644 --- a/command/quota_delete_test.go +++ b/command/quota_delete_test.go @@ -99,7 +99,7 @@ func testQuotaSpec() *api.QuotaSpec { { Region: "global", RegionLimit: &api.Resources{ - CPU: helper.IntToPtr(100), + CPU: pointer.Of(100), }, }, }, diff --git a/command/scaling_policy_info_test.go b/command/scaling_policy_info_test.go index 962460bb7..8271477f2 100644 --- a/command/scaling_policy_info_test.go +++ b/command/scaling_policy_info_test.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" ) @@ -64,9 +64,9 @@ func TestScalingPolicyInfoCommand_Run(t *testing.T) { // Generate an example scaling policy. job.TaskGroups[0].Scaling = &api.ScalingPolicy{ - Enabled: helper.BoolToPtr(true), - Min: helper.Int64ToPtr(1), - Max: helper.Int64ToPtr(1), + Enabled: pointer.Of(true), + Min: pointer.Of(int64(1)), + Max: pointer.Of(int64(1)), } // Register the job. diff --git a/command/scaling_policy_list_test.go b/command/scaling_policy_list_test.go index 20439379d..36ffa400c 100644 --- a/command/scaling_policy_list_test.go +++ b/command/scaling_policy_list_test.go @@ -5,7 +5,7 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) @@ -31,9 +31,9 @@ func TestScalingPolicyListCommand_Run(t *testing.T) { // Generate an example scaling policy. scalingPolicy := api.ScalingPolicy{ Type: api.ScalingPolicyTypeHorizontal, - Enabled: helper.BoolToPtr(true), - Min: helper.Int64ToPtr(1), - Max: helper.Int64ToPtr(1), + Enabled: pointer.Of(true), + Min: pointer.Of(int64(1)), + Max: pointer.Of(int64(1)), } // Iterate the jobs, add the scaling policy and register. diff --git a/command/util_test.go b/command/util_test.go index 474b0ded2..03efe6bb0 100644 --- a/command/util_test.go +++ b/command/util_test.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/command/agent" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/testutil" ) @@ -49,18 +49,18 @@ func testJob(jobID string) *api.Job { SetConfig("run_for", "5s"). SetConfig("exit_code", 0). Require(&api.Resources{ - MemoryMB: helper.IntToPtr(256), - CPU: helper.IntToPtr(100), + MemoryMB: pointer.Of(256), + CPU: pointer.Of(100), }). SetLogConfig(&api.LogConfig{ - MaxFiles: helper.IntToPtr(1), - MaxFileSizeMB: helper.IntToPtr(2), + MaxFiles: pointer.Of(1), + MaxFileSizeMB: pointer.Of(2), }) group := api.NewTaskGroup("group1", 1). AddTask(task). RequireDisk(&api.EphemeralDisk{ - SizeMB: helper.IntToPtr(20), + SizeMB: pointer.Of(20), }) job := api.NewBatchJob(jobID, jobID, "global", 1). @@ -76,18 +76,18 @@ func testMultiRegionJob(jobID, region, datacenter string) *api.Job { SetConfig("run_for", "15s"). SetConfig("exit_code", 0). Require(&api.Resources{ - MemoryMB: helper.IntToPtr(256), - CPU: helper.IntToPtr(100), + MemoryMB: pointer.Of(256), + CPU: pointer.Of(100), }). SetLogConfig(&api.LogConfig{ - MaxFiles: helper.IntToPtr(1), - MaxFileSizeMB: helper.IntToPtr(2), + MaxFiles: pointer.Of(1), + MaxFileSizeMB: pointer.Of(2), }) group := api.NewTaskGroup("group1", 1). AddTask(task). RequireDisk(&api.EphemeralDisk{ - SizeMB: helper.IntToPtr(20), + SizeMB: pointer.Of(20), }) job := api.NewServiceJob(jobID, jobID, region, 1).AddDatacenter(datacenter).AddTaskGroup(group) diff --git a/drivers/docker/driver_linux_test.go b/drivers/docker/driver_linux_test.go index e6c51a1fc..a00291e81 100644 --- a/drivers/docker/driver_linux_test.go +++ b/drivers/docker/driver_linux_test.go @@ -12,8 +12,8 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/testutil" - "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/freeport" + "github.com/hashicorp/nomad/helper/pointer" tu "github.com/hashicorp/nomad/testutil" "github.com/stretchr/testify/require" ) @@ -69,7 +69,7 @@ func TestDockerDriver_PluginConfig_PidsLimit(t *testing.T) { cfg.PidsLimit = 3 opts, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") require.NoError(t, err) - require.Equal(t, helper.Int64ToPtr(3), opts.HostConfig.PidsLimit) + require.Equal(t, pointer.Of(int64(3)), opts.HostConfig.PidsLimit) } func TestDockerDriver_PidsLimit(t *testing.T) { diff --git a/drivers/docker/fingerprint.go b/drivers/docker/fingerprint.go index f37a16978..d6b260d38 100644 --- a/drivers/docker/fingerprint.go +++ b/drivers/docker/fingerprint.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/plugins/drivers" pstructs "github.com/hashicorp/nomad/plugins/shared/structs" ) @@ -40,14 +40,14 @@ func (d *Driver) setDetected(detected bool) { // setFingerprintSuccess marks the driver as having fingerprinted successfully func (d *Driver) setFingerprintSuccess() { d.fingerprintLock.Lock() - d.fingerprintSuccess = helper.BoolToPtr(true) + d.fingerprintSuccess = pointer.Of(true) d.fingerprintLock.Unlock() } // setFingerprintFailure marks the driver as having failed fingerprinting func (d *Driver) setFingerprintFailure() { d.fingerprintLock.Lock() - d.fingerprintSuccess = helper.BoolToPtr(false) + d.fingerprintSuccess = pointer.Of(false) d.fingerprintLock.Unlock() } diff --git a/drivers/exec/driver.go b/drivers/exec/driver.go index eff9627ea..01e449592 100644 --- a/drivers/exec/driver.go +++ b/drivers/exec/driver.go @@ -16,8 +16,8 @@ import ( "github.com/hashicorp/nomad/drivers/shared/eventer" "github.com/hashicorp/nomad/drivers/shared/executor" "github.com/hashicorp/nomad/drivers/shared/resolvconf" - "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/pluginutils/loader" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/plugins/base" "github.com/hashicorp/nomad/plugins/drivers" "github.com/hashicorp/nomad/plugins/drivers/utils" @@ -247,14 +247,14 @@ func NewExecDriver(ctx context.Context, logger hclog.Logger) drivers.DriverPlugi // setFingerprintSuccess marks the driver as having fingerprinted successfully func (d *Driver) setFingerprintSuccess() { d.fingerprintLock.Lock() - d.fingerprintSuccess = helper.BoolToPtr(true) + d.fingerprintSuccess = pointer.Of(true) d.fingerprintLock.Unlock() } // setFingerprintFailure marks the driver as having failed fingerprinting func (d *Driver) setFingerprintFailure() { d.fingerprintLock.Lock() - d.fingerprintSuccess = helper.BoolToPtr(false) + d.fingerprintSuccess = pointer.Of(false) d.fingerprintLock.Unlock() } diff --git a/e2e/e2eutil/utils.go b/e2e/e2eutil/utils.go index 75c616060..b98beb9eb 100644 --- a/e2e/e2eutil/utils.go +++ b/e2e/e2eutil/utils.go @@ -7,7 +7,7 @@ import ( "time" api "github.com/hashicorp/nomad/api" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/jobspec2" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -58,7 +58,7 @@ func stringToPtrOrNil(s string) *string { if s == "" { return nil } - return helper.StringToPtr(s) + return pointer.Of(s) } func Parse2(t *testing.T, jobFile string) (*api.Job, error) { @@ -74,7 +74,7 @@ func RegisterAllocs(t *testing.T, nomadClient *api.Client, jobFile, jobID, cToke require.NoError(t, err) // Set custom job ID (distinguish among tests) - job.ID = helper.StringToPtr(jobID) + job.ID = pointer.Of(jobID) // Set a Consul "operator" token for the job, if provided. job.ConsulToken = stringToPtrOrNil(cToken) diff --git a/e2e/scaling/scaling.go b/e2e/scaling/scaling.go index cb046ae9c..cb231bed6 100644 --- a/e2e/scaling/scaling.go +++ b/e2e/scaling/scaling.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/e2e/e2eutil" "github.com/hashicorp/nomad/e2e/framework" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" ) @@ -74,7 +74,7 @@ func (tc *ScalingE2ETest) TestScalingBasic(f *framework.F) { // Simple scaling action. testMeta := map[string]interface{}{"scaling-e2e-test": "value"} scaleResp, _, err := tc.Nomad().Jobs().Scale( - jobID, "horizontally_scalable", helper.IntToPtr(3), + jobID, "horizontally_scalable", pointer.Of(3), "Nomad e2e testing", false, testMeta, nil) f.NoError(err) f.NotEmpty(scaleResp.EvalID) @@ -87,11 +87,11 @@ func (tc *ScalingE2ETest) TestScalingBasic(f *framework.F) { // Attempt break break the policy min/max parameters. _, _, err = tc.Nomad().Jobs().Scale( - jobID, "horizontally_scalable", helper.IntToPtr(4), + jobID, "horizontally_scalable", pointer.Of(4), "Nomad e2e testing", false, nil, nil) f.Error(err) _, _, err = tc.Nomad().Jobs().Scale( - jobID, "horizontally_scalable", helper.IntToPtr(1), + jobID, "horizontally_scalable", pointer.Of(1), "Nomad e2e testing", false, nil, nil) f.Error(err) @@ -143,22 +143,22 @@ func (tc *ScalingE2ETest) TestScalingNamespaces(f *framework.F) { // We shouldn't be able to trigger scaling across the namespace boundary. _, _, err = tc.Nomad().Jobs().Scale( - defaultJobID, "horizontally_scalable", helper.IntToPtr(3), + defaultJobID, "horizontally_scalable", pointer.Of(3), "Nomad e2e testing", false, nil, &aWriteOpts) f.Error(err) _, _, err = tc.Nomad().Jobs().Scale( - aJobID, "horizontally_scalable", helper.IntToPtr(3), + aJobID, "horizontally_scalable", pointer.Of(3), "Nomad e2e testing", false, nil, &defaultWriteOpts) f.Error(err) // We should be able to trigger scaling when using the correct namespace, // duh. _, _, err = tc.Nomad().Jobs().Scale( - defaultJobID, "horizontally_scalable", helper.IntToPtr(3), + defaultJobID, "horizontally_scalable", pointer.Of(3), "Nomad e2e testing", false, nil, &defaultWriteOpts) f.NoError(err) _, _, err = tc.Nomad().Jobs().Scale( - aJobID, "horizontally_scalable", helper.IntToPtr(3), + aJobID, "horizontally_scalable", pointer.Of(3), "Nomad e2e testing", false, nil, &aWriteOpts) f.NoError(err) } diff --git a/e2e/vaultcompat/consts_test.go b/e2e/vaultcompat/consts_test.go index cd09b8d2e..12e2558c6 100644 --- a/e2e/vaultcompat/consts_test.go +++ b/e2e/vaultcompat/consts_test.go @@ -2,7 +2,7 @@ package vaultcompat import ( "github.com/hashicorp/nomad/api" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" ) const ( @@ -45,12 +45,12 @@ var ( // job is a test job that is used to request a Vault token and cat the token // out before exiting. job = &api.Job{ - ID: helper.StringToPtr("test"), - Type: helper.StringToPtr("batch"), + ID: pointer.Of("test"), + Type: pointer.Of("batch"), Datacenters: []string{"dc1"}, TaskGroups: []*api.TaskGroup{ { - Name: helper.StringToPtr("test"), + Name: pointer.Of("test"), Tasks: []*api.Task{ { Name: "test", @@ -65,8 +65,8 @@ var ( }, }, RestartPolicy: &api.RestartPolicy{ - Attempts: helper.IntToPtr(0), - Mode: helper.StringToPtr("fail"), + Attempts: pointer.Of(0), + Mode: pointer.Of("fail"), }, }, }, diff --git a/e2e/vaultcompat/vault_test.go b/e2e/vaultcompat/vault_test.go index 27446ebbd..0fcb25ffd 100644 --- a/e2e/vaultcompat/vault_test.go +++ b/e2e/vaultcompat/vault_test.go @@ -18,7 +18,7 @@ import ( "github.com/hashicorp/go-version" "github.com/hashicorp/nomad/command/agent" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs/config" "github.com/hashicorp/nomad/testutil" vapi "github.com/hashicorp/vault/api" @@ -331,10 +331,10 @@ func testVaultCompatibility(t *testing.T, vault string, version string) { if c.Vault == nil { c.Vault = &config.VaultConfig{} } - c.Vault.Enabled = helper.BoolToPtr(true) + c.Vault.Enabled = pointer.Of(true) c.Vault.Token = token c.Vault.Role = "nomad-cluster" - c.Vault.AllowUnauthenticated = helper.BoolToPtr(true) + c.Vault.AllowUnauthenticated = pointer.Of(true) c.Vault.Addr = v.HTTPAddr }) defer nomad.Shutdown() diff --git a/helper/funcs.go b/helper/funcs.go index 8b5137cad..6325dcb91 100644 --- a/helper/funcs.go +++ b/helper/funcs.go @@ -70,67 +70,6 @@ func HashUUID(input string) (output string, hashed bool) { return output, true } -// BoolToPtr returns the pointer to a boolean. -// -// Deprecated; use pointer.Of instead. -func BoolToPtr(b bool) *bool { - return &b -} - -// IntToPtr returns the pointer to an int. -// -// Deprecated; use pointer.Of instead. -func IntToPtr(i int) *int { - return &i -} - -// Int8ToPtr returns the pointer to an int8. -// -// Deprecated; use pointer.Of instead. -func Int8ToPtr(i int8) *int8 { - return &i -} - -// Int32ToPtr returns the pointer to an int32. -// -// Deprecated; use pointer.Of instead. -func Int32ToPtr(i int32) *int32 { - return &i -} - -// Int64ToPtr returns the pointer to an int64. -// -// Deprecated; use pointer.Of instead. -func Int64ToPtr(i int64) *int64 { - return &i -} - -// Uint64ToPtr returns the pointer to an uint64. -// -// Deprecated; use pointer.Of instead. -func Uint64ToPtr(u uint64) *uint64 { - return &u -} - -// UintToPtr returns the pointer to an uint. -// -// Deprecated; use pointer.Of instead. -func UintToPtr(u uint) *uint { - return &u -} - -// StringToPtr returns the pointer to a string. -// -// Deprecated; use pointer.Of instead. -func StringToPtr(str string) *string { - return &str -} - -// TimeToPtr returns the pointer to a time.Duration. -func TimeToPtr(t time.Duration) *time.Duration { - return &t -} - // CompareTimePtrs return true if a is the same as b. func CompareTimePtrs(a, b *time.Duration) bool { if a == nil || b == nil { @@ -139,13 +78,6 @@ func CompareTimePtrs(a, b *time.Duration) bool { return *a == *b } -// Float64ToPtr returns the pointer to an float64. -// -// Deprecated; use pointer.Of instead. -func Float64ToPtr(f float64) *float64 { - return &f -} - // Min returns the minimum of a and b. func Min[T constraints.Ordered](a, b T) T { if a < b { @@ -162,36 +94,6 @@ func Max[T constraints.Ordered](a, b T) T { return b } -// IntMin returns the minimum of a and b. -// -// Deprecated; use Min instead. -func IntMin(a, b int) int { - if a < b { - return a - } - return b -} - -// IntMax returns the maximum of a and b. -// -// Deprecated; use Max instead. -func IntMax(a, b int) int { - if a > b { - return a - } - return b -} - -// Uint64Max returns the maximum of a and b. -// -// Deprecated; use Max instead. -func Uint64Max(a, b uint64) uint64 { - if a > b { - return a - } - return b -} - // MapStringStringSliceValueSet returns the set of values in a map[string][]string func MapStringStringSliceValueSet(m map[string][]string) []string { set := make(map[string]struct{}) diff --git a/helper/funcs_test.go b/helper/funcs_test.go index 977454bcd..94afa3c67 100644 --- a/helper/funcs_test.go +++ b/helper/funcs_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/helper/pointer" "github.com/shoenig/test/must" "github.com/stretchr/testify/require" ) @@ -136,15 +137,15 @@ func TestCompareTimePtrs(t *testing.T) { a := (*time.Duration)(nil) b := (*time.Duration)(nil) require.True(t, CompareTimePtrs(a, b)) - c := TimeToPtr(3 * time.Second) + c := pointer.Of(3 * time.Second) require.False(t, CompareTimePtrs(a, c)) require.False(t, CompareTimePtrs(c, a)) }) t.Run("not nil", func(t *testing.T) { - a := TimeToPtr(1 * time.Second) - b := TimeToPtr(1 * time.Second) - c := TimeToPtr(2 * time.Second) + a := pointer.Of(1 * time.Second) + b := pointer.Of(1 * time.Second) + c := pointer.Of(2 * time.Second) require.True(t, CompareTimePtrs(a, b)) require.False(t, CompareTimePtrs(a, c)) }) diff --git a/helper/pluginutils/loader/testing.go b/helper/pluginutils/loader/testing.go index f811804ec..75d2432fe 100644 --- a/helper/pluginutils/loader/testing.go +++ b/helper/pluginutils/loader/testing.go @@ -6,7 +6,7 @@ import ( log "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/plugins/base" ) @@ -51,7 +51,7 @@ func (m *MockInstance) ApiVersion() string { return // passed inst as the plugin func MockBasicExternalPlugin(inst interface{}, apiVersion string) *MockInstance { var killedLock sync.Mutex - killed := helper.BoolToPtr(false) + killed := pointer.Of(false) return &MockInstance{ InternalPlugin: false, KillF: func() { diff --git a/internal/testing/apitests/tasks_test.go b/internal/testing/apitests/tasks_test.go index b4cbe8cf9..847fb4cd5 100644 --- a/internal/testing/apitests/tasks_test.go +++ b/internal/testing/apitests/tasks_test.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/assert" ) @@ -27,130 +28,130 @@ func TestTaskGroup_Canonicalize_ReschedulePolicy(t *testing.T) { jobReschedulePolicy: nil, taskReschedulePolicy: nil, expected: &api.ReschedulePolicy{ - Attempts: intToPtr(structs.DefaultBatchJobReschedulePolicy.Attempts), - Interval: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval), - Delay: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Delay), - DelayFunction: stringToPtr(structs.DefaultBatchJobReschedulePolicy.DelayFunction), - MaxDelay: timeToPtr(structs.DefaultBatchJobReschedulePolicy.MaxDelay), - Unlimited: boolToPtr(structs.DefaultBatchJobReschedulePolicy.Unlimited), + Attempts: pointer.Of(structs.DefaultBatchJobReschedulePolicy.Attempts), + Interval: pointer.Of(structs.DefaultBatchJobReschedulePolicy.Interval), + Delay: pointer.Of(structs.DefaultBatchJobReschedulePolicy.Delay), + DelayFunction: pointer.Of(structs.DefaultBatchJobReschedulePolicy.DelayFunction), + MaxDelay: pointer.Of(structs.DefaultBatchJobReschedulePolicy.MaxDelay), + Unlimited: pointer.Of(structs.DefaultBatchJobReschedulePolicy.Unlimited), }, }, { desc: "Empty job reschedule policy", jobReschedulePolicy: &api.ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - Delay: timeToPtr(0), - MaxDelay: timeToPtr(0), - DelayFunction: stringToPtr(""), - Unlimited: boolToPtr(false), + Attempts: pointer.Of(0), + Interval: pointer.Of(time.Duration(0)), + Delay: pointer.Of(time.Duration(0)), + MaxDelay: pointer.Of(time.Duration(0)), + DelayFunction: pointer.Of(""), + Unlimited: pointer.Of(false), }, taskReschedulePolicy: nil, expected: &api.ReschedulePolicy{ - Attempts: intToPtr(0), - Interval: timeToPtr(0), - Delay: timeToPtr(0), - MaxDelay: timeToPtr(0), - DelayFunction: stringToPtr(""), - Unlimited: boolToPtr(false), + Attempts: pointer.Of(0), + Interval: pointer.Of(time.Duration(0)), + Delay: pointer.Of(time.Duration(0)), + MaxDelay: pointer.Of(time.Duration(0)), + DelayFunction: pointer.Of(""), + Unlimited: pointer.Of(false), }, }, { desc: "Inherit from job", jobReschedulePolicy: &api.ReschedulePolicy{ - Attempts: intToPtr(1), - Interval: timeToPtr(20 * time.Second), - Delay: timeToPtr(20 * time.Second), - MaxDelay: timeToPtr(10 * time.Minute), - DelayFunction: stringToPtr("constant"), - Unlimited: boolToPtr(false), + Attempts: pointer.Of(1), + Interval: pointer.Of(20 * time.Second), + Delay: pointer.Of(20 * time.Second), + MaxDelay: pointer.Of(10 * time.Minute), + DelayFunction: pointer.Of("constant"), + Unlimited: pointer.Of(false), }, taskReschedulePolicy: nil, expected: &api.ReschedulePolicy{ - Attempts: intToPtr(1), - Interval: timeToPtr(20 * time.Second), - Delay: timeToPtr(20 * time.Second), - MaxDelay: timeToPtr(10 * time.Minute), - DelayFunction: stringToPtr("constant"), - Unlimited: boolToPtr(false), + Attempts: pointer.Of(1), + Interval: pointer.Of(20 * time.Second), + Delay: pointer.Of(20 * time.Second), + MaxDelay: pointer.Of(10 * time.Minute), + DelayFunction: pointer.Of("constant"), + Unlimited: pointer.Of(false), }, }, { desc: "Set in task", jobReschedulePolicy: nil, taskReschedulePolicy: &api.ReschedulePolicy{ - Attempts: intToPtr(5), - Interval: timeToPtr(2 * time.Minute), - Delay: timeToPtr(20 * time.Second), - MaxDelay: timeToPtr(10 * time.Minute), - DelayFunction: stringToPtr("constant"), - Unlimited: boolToPtr(false), + Attempts: pointer.Of(5), + Interval: pointer.Of(2 * time.Minute), + Delay: pointer.Of(20 * time.Second), + MaxDelay: pointer.Of(10 * time.Minute), + DelayFunction: pointer.Of("constant"), + Unlimited: pointer.Of(false), }, expected: &api.ReschedulePolicy{ - Attempts: intToPtr(5), - Interval: timeToPtr(2 * time.Minute), - Delay: timeToPtr(20 * time.Second), - MaxDelay: timeToPtr(10 * time.Minute), - DelayFunction: stringToPtr("constant"), - Unlimited: boolToPtr(false), + Attempts: pointer.Of(5), + Interval: pointer.Of(2 * time.Minute), + Delay: pointer.Of(20 * time.Second), + MaxDelay: pointer.Of(10 * time.Minute), + DelayFunction: pointer.Of("constant"), + Unlimited: pointer.Of(false), }, }, { desc: "Merge from job", jobReschedulePolicy: &api.ReschedulePolicy{ - Attempts: intToPtr(1), - Delay: timeToPtr(20 * time.Second), - MaxDelay: timeToPtr(10 * time.Minute), + Attempts: pointer.Of(1), + Delay: pointer.Of(20 * time.Second), + MaxDelay: pointer.Of(10 * time.Minute), }, taskReschedulePolicy: &api.ReschedulePolicy{ - Interval: timeToPtr(5 * time.Minute), - DelayFunction: stringToPtr("constant"), - Unlimited: boolToPtr(false), + Interval: pointer.Of(5 * time.Minute), + DelayFunction: pointer.Of("constant"), + Unlimited: pointer.Of(false), }, expected: &api.ReschedulePolicy{ - Attempts: intToPtr(1), - Interval: timeToPtr(5 * time.Minute), - Delay: timeToPtr(20 * time.Second), - MaxDelay: timeToPtr(10 * time.Minute), - DelayFunction: stringToPtr("constant"), - Unlimited: boolToPtr(false), + Attempts: pointer.Of(1), + Interval: pointer.Of(5 * time.Minute), + Delay: pointer.Of(20 * time.Second), + MaxDelay: pointer.Of(10 * time.Minute), + DelayFunction: pointer.Of("constant"), + Unlimited: pointer.Of(false), }, }, { desc: "Override from group", jobReschedulePolicy: &api.ReschedulePolicy{ - Attempts: intToPtr(1), - MaxDelay: timeToPtr(10 * time.Second), + Attempts: pointer.Of(1), + MaxDelay: pointer.Of(10 * time.Second), }, taskReschedulePolicy: &api.ReschedulePolicy{ - Attempts: intToPtr(5), - Delay: timeToPtr(20 * time.Second), - MaxDelay: timeToPtr(20 * time.Minute), - DelayFunction: stringToPtr("constant"), - Unlimited: boolToPtr(false), + Attempts: pointer.Of(5), + Delay: pointer.Of(20 * time.Second), + MaxDelay: pointer.Of(20 * time.Minute), + DelayFunction: pointer.Of("constant"), + Unlimited: pointer.Of(false), }, expected: &api.ReschedulePolicy{ - Attempts: intToPtr(5), - Interval: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval), - Delay: timeToPtr(20 * time.Second), - MaxDelay: timeToPtr(20 * time.Minute), - DelayFunction: stringToPtr("constant"), - Unlimited: boolToPtr(false), + Attempts: pointer.Of(5), + Interval: pointer.Of(structs.DefaultBatchJobReschedulePolicy.Interval), + Delay: pointer.Of(20 * time.Second), + MaxDelay: pointer.Of(20 * time.Minute), + DelayFunction: pointer.Of("constant"), + Unlimited: pointer.Of(false), }, }, { desc: "Attempts from job, default interval", jobReschedulePolicy: &api.ReschedulePolicy{ - Attempts: intToPtr(1), + Attempts: pointer.Of(1), }, taskReschedulePolicy: nil, expected: &api.ReschedulePolicy{ - Attempts: intToPtr(1), - Interval: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval), - Delay: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Delay), - DelayFunction: stringToPtr(structs.DefaultBatchJobReschedulePolicy.DelayFunction), - MaxDelay: timeToPtr(structs.DefaultBatchJobReschedulePolicy.MaxDelay), - Unlimited: boolToPtr(structs.DefaultBatchJobReschedulePolicy.Unlimited), + Attempts: pointer.Of(1), + Interval: pointer.Of(structs.DefaultBatchJobReschedulePolicy.Interval), + Delay: pointer.Of(structs.DefaultBatchJobReschedulePolicy.Delay), + DelayFunction: pointer.Of(structs.DefaultBatchJobReschedulePolicy.DelayFunction), + MaxDelay: pointer.Of(structs.DefaultBatchJobReschedulePolicy.MaxDelay), + Unlimited: pointer.Of(structs.DefaultBatchJobReschedulePolicy.Unlimited), }, }, } @@ -158,13 +159,13 @@ func TestTaskGroup_Canonicalize_ReschedulePolicy(t *testing.T) { for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { job := &api.Job{ - ID: stringToPtr("test"), + ID: pointer.Of("test"), Reschedule: tc.jobReschedulePolicy, - Type: stringToPtr(api.JobTypeBatch), + Type: pointer.Of(api.JobTypeBatch), } job.Canonicalize() tg := &api.TaskGroup{ - Name: stringToPtr("foo"), + Name: pointer.Of("foo"), ReschedulePolicy: tc.taskReschedulePolicy, } tg.Canonicalize(job) diff --git a/internal/testing/apitests/util_test.go b/internal/testing/apitests/util_test.go index d751c04eb..d6046a2e6 100644 --- a/internal/testing/apitests/util_test.go +++ b/internal/testing/apitests/util_test.go @@ -2,31 +2,11 @@ package apitests import ( "testing" - "time" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/helper/pointer" ) -// boolToPtr returns the pointer to a boolean -func boolToPtr(b bool) *bool { - return &b -} - -// intToPtr returns the pointer to an int -func intToPtr(i int) *int { - return &i -} - -// timeToPtr returns the pointer to a time stamp -func timeToPtr(t time.Duration) *time.Duration { - return &t -} - -// stringToPtr returns the pointer to a string -func stringToPtr(str string) *string { - return &str -} - func assertQueryMeta(t *testing.T, qm *api.QueryMeta) { t.Helper() if qm.LastIndex == 0 { @@ -48,18 +28,18 @@ func testJob() *api.Job { task := api.NewTask("task1", "exec"). SetConfig("command", "/bin/sleep"). Require(&api.Resources{ - CPU: intToPtr(100), - MemoryMB: intToPtr(256), + CPU: pointer.Of(100), + MemoryMB: pointer.Of(256), }). SetLogConfig(&api.LogConfig{ - MaxFiles: intToPtr(1), - MaxFileSizeMB: intToPtr(2), + MaxFiles: pointer.Of(1), + MaxFileSizeMB: pointer.Of(2), }) group := api.NewTaskGroup("group1", 1). AddTask(task). RequireDisk(&api.EphemeralDisk{ - SizeMB: intToPtr(25), + SizeMB: pointer.Of(25), }) job := api.NewBatchJob("job1", "redis", "global", 1). diff --git a/nomad/alloc_endpoint.go b/nomad/alloc_endpoint.go index 30957db09..79745bb29 100644 --- a/nomad/alloc_endpoint.go +++ b/nomad/alloc_endpoint.go @@ -11,7 +11,7 @@ import ( multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/acl" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/state/paginator" @@ -311,8 +311,8 @@ func (a *Alloc) Stop(args *structs.AllocStopRequest, reply *structs.AllocStopRes Evals: []*structs.Evaluation{eval}, Allocs: map[string]*structs.DesiredTransition{ args.AllocID: { - Migrate: helper.BoolToPtr(true), - NoShutdownDelay: helper.BoolToPtr(args.NoShutdownDelay), + Migrate: pointer.Of(true), + NoShutdownDelay: pointer.Of(args.NoShutdownDelay), }, }, } diff --git a/nomad/alloc_endpoint_test.go b/nomad/alloc_endpoint_test.go index 63a43774d..3ed12965b 100644 --- a/nomad/alloc_endpoint_test.go +++ b/nomad/alloc_endpoint_test.go @@ -8,7 +8,7 @@ import ( msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -1047,7 +1047,7 @@ func TestAllocEndpoint_UpdateDesiredTransition(t *testing.T) { require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc, alloc2})) t1 := &structs.DesiredTransition{ - Migrate: helper.BoolToPtr(true), + Migrate: pointer.Of(true), } // Update the allocs desired status diff --git a/nomad/blocked_evals.go b/nomad/blocked_evals.go index 8aab48eec..0943ac0b9 100644 --- a/nomad/blocked_evals.go +++ b/nomad/blocked_evals.go @@ -290,7 +290,7 @@ func latestEvalIndex(eval *structs.Evaluation) uint64 { return 0 } - return helper.Uint64Max(eval.CreateIndex, eval.SnapshotIndex) + return helper.Max(eval.CreateIndex, eval.SnapshotIndex) } // missedUnblock returns whether an evaluation missed an unblock while it was in diff --git a/nomad/client_agent_endpoint.go b/nomad/client_agent_endpoint.go index 4d61b625b..8a2f29e01 100644 --- a/nomad/client_agent_endpoint.go +++ b/nomad/client_agent_endpoint.go @@ -16,7 +16,7 @@ import ( "github.com/hashicorp/nomad/command/agent/host" "github.com/hashicorp/nomad/command/agent/monitor" "github.com/hashicorp/nomad/command/agent/pprof" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/go-msgpack/codec" @@ -121,7 +121,7 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) { encoder := codec.NewEncoder(conn, structs.MsgpackHandle) if err := decoder.Decode(&args); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } @@ -130,7 +130,7 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) { handleStreamResultError(err, nil, encoder) return } else if aclObj != nil && !aclObj.AllowAgentRead() { - handleStreamResultError(structs.ErrPermissionDenied, helper.Int64ToPtr(403), encoder) + handleStreamResultError(structs.ErrPermissionDenied, pointer.Of(int64(403)), encoder) return } @@ -140,7 +140,7 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) { } if logLevel == log.NoLevel { - handleStreamResultError(errors.New("Unknown log level"), helper.Int64ToPtr(400), encoder) + handleStreamResultError(errors.New("Unknown log level"), pointer.Of(int64(400)), encoder) return } @@ -153,7 +153,7 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) { region := args.RequestRegion() if region == "" { - handleStreamResultError(fmt.Errorf("missing target RPC"), helper.Int64ToPtr(400), encoder) + handleStreamResultError(fmt.Errorf("missing target RPC"), pointer.Of(int64(400)), encoder) return } if region != a.srv.config.Region { @@ -165,7 +165,7 @@ func (a *Agent) monitor(conn io.ReadWriteCloser) { if args.ServerID != "" { serverToFwd, err := a.forwardFor(args.ServerID, region) if err != nil { - handleStreamResultError(err, helper.Int64ToPtr(400), encoder) + handleStreamResultError(err, pointer.Of(int64(400)), encoder) return } if serverToFwd != nil { @@ -268,7 +268,7 @@ OUTER: } if streamErr != nil { - handleStreamResultError(streamErr, helper.Int64ToPtr(500), encoder) + handleStreamResultError(streamErr, pointer.Of(int64(500)), encoder) return } } @@ -317,7 +317,7 @@ func (a *Agent) forwardMonitorClient(conn io.ReadWriteCloser, args cstructs.Moni state, srv, err := a.findClientConn(args.NodeID) if err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } @@ -357,7 +357,7 @@ func (a *Agent) forwardMonitorServer(conn io.ReadWriteCloser, server *serverPart serverConn, err := a.srv.streamingRpc(server, "Agent.Monitor") if err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } defer serverConn.Close() @@ -365,7 +365,7 @@ func (a *Agent) forwardMonitorServer(conn io.ReadWriteCloser, server *serverPart // Send the Request outEncoder := codec.NewEncoder(serverConn, structs.MsgpackHandle) if err := outEncoder.Encode(args); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } diff --git a/nomad/client_alloc_endpoint.go b/nomad/client_alloc_endpoint.go index ee57e1373..644304909 100644 --- a/nomad/client_alloc_endpoint.go +++ b/nomad/client_alloc_endpoint.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/acl" cstructs "github.com/hashicorp/nomad/client/structs" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs" ) @@ -282,7 +282,7 @@ func (a *ClientAllocations) exec(conn io.ReadWriteCloser) { encoder := codec.NewEncoder(conn, structs.MsgpackHandle) if err := decoder.Decode(&args); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } @@ -295,7 +295,7 @@ func (a *ClientAllocations) exec(conn io.ReadWriteCloser) { // Verify the arguments. if args.AllocID == "" { - handleStreamResultError(errors.New("missing AllocID"), helper.Int64ToPtr(400), encoder) + handleStreamResultError(errors.New("missing AllocID"), pointer.Of(int64(400)), encoder) return } @@ -308,7 +308,7 @@ func (a *ClientAllocations) exec(conn io.ReadWriteCloser) { alloc, err := getAlloc(snap, args.AllocID) if structs.IsErrUnknownAllocation(err) { - handleStreamResultError(err, helper.Int64ToPtr(404), encoder) + handleStreamResultError(err, pointer.Of(int64(404)), encoder) return } if err != nil { @@ -331,18 +331,18 @@ func (a *ClientAllocations) exec(conn io.ReadWriteCloser) { // Make sure Node is valid and new enough to support RPC node, err := snap.NodeByID(nil, nodeID) if err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } if node == nil { err := fmt.Errorf("Unknown node %q", nodeID) - handleStreamResultError(err, helper.Int64ToPtr(400), encoder) + handleStreamResultError(err, pointer.Of(int64(400)), encoder) return } if err := nodeSupportsRpc(node); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(400), encoder) + handleStreamResultError(err, pointer.Of(int64(400)), encoder) return } @@ -356,7 +356,7 @@ func (a *ClientAllocations) exec(conn io.ReadWriteCloser) { if err != nil { var code *int64 if structs.IsErrNoNodeConn(err) { - code = helper.Int64ToPtr(404) + code = pointer.Of(int64(404)) } handleStreamResultError(err, code, encoder) return diff --git a/nomad/client_fs_endpoint.go b/nomad/client_fs_endpoint.go index 6612bfdce..9e73fd60e 100644 --- a/nomad/client_fs_endpoint.go +++ b/nomad/client_fs_endpoint.go @@ -11,10 +11,10 @@ import ( metrics "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/acl" - "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" ) @@ -62,7 +62,7 @@ func forwardRegionStreamingRpc(fsrv *Server, conn io.ReadWriteCloser, } if allocResp.Alloc == nil { - handleStreamResultError(structs.NewErrUnknownAllocation(allocID), helper.Int64ToPtr(404), encoder) + handleStreamResultError(structs.NewErrUnknownAllocation(allocID), pointer.Of(int64(404)), encoder) return } @@ -71,7 +71,7 @@ func forwardRegionStreamingRpc(fsrv *Server, conn io.ReadWriteCloser, if err != nil { var code *int64 if structs.IsErrNoNodeConn(err) { - code = helper.Int64ToPtr(404) + code = pointer.Of(int64(404)) } handleStreamResultError(err, code, encoder) return @@ -213,7 +213,7 @@ func (f *FileSystem) stream(conn io.ReadWriteCloser) { encoder := codec.NewEncoder(conn, structs.MsgpackHandle) if err := decoder.Decode(&args); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } @@ -226,7 +226,7 @@ func (f *FileSystem) stream(conn io.ReadWriteCloser) { // Verify the arguments. if args.AllocID == "" { - handleStreamResultError(errors.New("missing AllocID"), helper.Int64ToPtr(400), encoder) + handleStreamResultError(errors.New("missing AllocID"), pointer.Of(int64(400)), encoder) return } @@ -239,7 +239,7 @@ func (f *FileSystem) stream(conn io.ReadWriteCloser) { alloc, err := getAlloc(snap, args.AllocID) if structs.IsErrUnknownAllocation(err) { - handleStreamResultError(structs.NewErrUnknownAllocation(args.AllocID), helper.Int64ToPtr(404), encoder) + handleStreamResultError(structs.NewErrUnknownAllocation(args.AllocID), pointer.Of(int64(404)), encoder) return } if err != nil { @@ -261,18 +261,18 @@ func (f *FileSystem) stream(conn io.ReadWriteCloser) { // Make sure Node is valid and new enough to support RPC node, err := snap.NodeByID(nil, nodeID) if err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } if node == nil { err := fmt.Errorf("Unknown node %q", nodeID) - handleStreamResultError(err, helper.Int64ToPtr(400), encoder) + handleStreamResultError(err, pointer.Of(int64(400)), encoder) return } if err := nodeSupportsRpc(node); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(400), encoder) + handleStreamResultError(err, pointer.Of(int64(400)), encoder) return } @@ -286,7 +286,7 @@ func (f *FileSystem) stream(conn io.ReadWriteCloser) { if err != nil { var code *int64 if structs.IsErrNoNodeConn(err) { - code = helper.Int64ToPtr(404) + code = pointer.Of(int64(404)) } handleStreamResultError(err, code, encoder) return @@ -331,7 +331,7 @@ func (f *FileSystem) logs(conn io.ReadWriteCloser) { encoder := codec.NewEncoder(conn, structs.MsgpackHandle) if err := decoder.Decode(&args); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } @@ -344,7 +344,7 @@ func (f *FileSystem) logs(conn io.ReadWriteCloser) { // Verify the arguments. if args.AllocID == "" { - handleStreamResultError(structs.ErrMissingAllocID, helper.Int64ToPtr(400), encoder) + handleStreamResultError(structs.ErrMissingAllocID, pointer.Of(int64(400)), encoder) return } @@ -357,7 +357,7 @@ func (f *FileSystem) logs(conn io.ReadWriteCloser) { alloc, err := getAlloc(snap, args.AllocID) if structs.IsErrUnknownAllocation(err) { - handleStreamResultError(structs.NewErrUnknownAllocation(args.AllocID), helper.Int64ToPtr(404), encoder) + handleStreamResultError(structs.NewErrUnknownAllocation(args.AllocID), pointer.Of(int64(404)), encoder) return } if err != nil { @@ -382,18 +382,18 @@ func (f *FileSystem) logs(conn io.ReadWriteCloser) { // Make sure Node is valid and new enough to support RPC node, err := snap.NodeByID(nil, nodeID) if err != nil { - handleStreamResultError(err, helper.Int64ToPtr(500), encoder) + handleStreamResultError(err, pointer.Of(int64(500)), encoder) return } if node == nil { err := fmt.Errorf("Unknown node %q", nodeID) - handleStreamResultError(err, helper.Int64ToPtr(400), encoder) + handleStreamResultError(err, pointer.Of(int64(400)), encoder) return } if err := nodeSupportsRpc(node); err != nil { - handleStreamResultError(err, helper.Int64ToPtr(400), encoder) + handleStreamResultError(err, pointer.Of(int64(400)), encoder) return } @@ -407,7 +407,7 @@ func (f *FileSystem) logs(conn io.ReadWriteCloser) { if err != nil { var code *int64 if structs.IsErrNoNodeConn(err) { - code = helper.Int64ToPtr(404) + code = pointer.Of(int64(404)) } handleStreamResultError(err, code, encoder) return diff --git a/nomad/deployment_endpoint_test.go b/nomad/deployment_endpoint_test.go index e58c62da3..17d4b9980 100644 --- a/nomad/deployment_endpoint_test.go +++ b/nomad/deployment_endpoint_test.go @@ -8,7 +8,7 @@ import ( msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -492,7 +492,7 @@ func TestDeploymentEndpoint_Promote(t *testing.T) { d.TaskGroups[a.TaskGroup].PlacedCanaries = []string{a.ID} a.DeploymentID = d.ID a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } state := s1.fsm.State() @@ -557,7 +557,7 @@ func TestDeploymentEndpoint_Promote_ACL(t *testing.T) { d.TaskGroups[a.TaskGroup].PlacedCanaries = []string{a.ID} a.DeploymentID = d.ID a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } state := s1.fsm.State() diff --git a/nomad/deploymentwatcher/deployment_watcher.go b/nomad/deploymentwatcher/deployment_watcher.go index 32648c081..36a8f4d7c 100644 --- a/nomad/deploymentwatcher/deployment_watcher.go +++ b/nomad/deploymentwatcher/deployment_watcher.go @@ -8,7 +8,7 @@ import ( log "github.com/hashicorp/go-hclog" memdb "github.com/hashicorp/go-memdb" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" @@ -26,7 +26,7 @@ var ( // allocations part of a deployment to be rescheduled. We create a one off // variable to avoid creating a new object for every request. allowRescheduleTransition = &structs.DesiredTransition{ - Reschedule: helper.BoolToPtr(true), + Reschedule: pointer.Of(true), } ) @@ -233,7 +233,7 @@ func (w *deploymentWatcher) SetAllocHealth( resp.DeploymentModifyIndex = index resp.Index = index if j != nil { - resp.RevertedJobVersion = helper.Uint64ToPtr(j.Version) + resp.RevertedJobVersion = pointer.Of(j.Version) } return nil } @@ -394,7 +394,7 @@ func (w *deploymentWatcher) FailDeployment( resp.DeploymentModifyIndex = i resp.Index = i if rollbackJob != nil { - resp.RevertedJobVersion = helper.Uint64ToPtr(rollbackJob.Version) + resp.RevertedJobVersion = pointer.Of(rollbackJob.Version) } return nil } diff --git a/nomad/deploymentwatcher/deployments_watcher_test.go b/nomad/deploymentwatcher/deployments_watcher_test.go index ca2cfd7c3..73277ac27 100644 --- a/nomad/deploymentwatcher/deployments_watcher_test.go +++ b/nomad/deploymentwatcher/deployments_watcher_test.go @@ -7,7 +7,7 @@ import ( memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -326,7 +326,7 @@ func TestWatcher_SetAllocHealth_Unhealthy_Rollback(t *testing.T) { Status: structs.DeploymentStatusFailed, StatusDescription: structs.DeploymentStatusDescriptionFailedAllocations, }, - JobVersion: helper.Uint64ToPtr(0), + JobVersion: pointer.Of(uint64(0)), } matcher := matchDeploymentAllocHealthRequest(matchConfig) m.On("UpdateDeploymentAllocHealth", mocker.MatchedBy(matcher)).Return(nil) @@ -436,7 +436,7 @@ func TestWatcher_PromoteDeployment_HealthyCanaries(t *testing.T) { d.TaskGroups[a.TaskGroup].DesiredCanaries = 1 d.TaskGroups[a.TaskGroup].PlacedCanaries = []string{a.ID} a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } a.DeploymentID = d.ID require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") @@ -1077,7 +1077,7 @@ func TestDeploymentWatcher_Watch_NoProgressDeadline(t *testing.T) { DeploymentID: d.ID, Status: structs.DeploymentStatusFailed, StatusDescription: structs.DeploymentStatusDescriptionRollback(structs.DeploymentStatusDescriptionFailedAllocations, 0), - JobVersion: helper.Uint64ToPtr(0), + JobVersion: pointer.Of(uint64(0)), Eval: true, } m2 := matchDeploymentStatusUpdateRequest(c) @@ -1150,7 +1150,7 @@ func TestDeploymentWatcher_Watch_NoProgressDeadline(t *testing.T) { DeploymentID: d.ID, Status: structs.DeploymentStatusFailed, StatusDescription: structs.DeploymentStatusDescriptionRollback(structs.DeploymentStatusDescriptionFailedAllocations, 0), - JobVersion: helper.Uint64ToPtr(2), + JobVersion: pointer.Of(uint64(2)), Eval: true, } m3 := matchDeploymentStatusUpdateRequest(c2) @@ -1199,7 +1199,7 @@ func TestDeploymentWatcher_Watch_ProgressDeadline(t *testing.T) { // Update the alloc to be unhealthy and require that nothing happens. a2 := a.Copy() a2.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), Timestamp: now, } require.Nil(m.state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 100, []*structs.Allocation{a2})) @@ -1303,7 +1303,7 @@ func TestDeploymentWatcher_ProgressCutoff(t *testing.T) { // Update the first allocation to be healthy a3 := a.Copy() - a3.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: helper.BoolToPtr(true)} + a3.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: pointer.Of(true)} require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a3}), "UpsertAllocs") // Get the updated deployment @@ -1322,7 +1322,7 @@ func TestDeploymentWatcher_ProgressCutoff(t *testing.T) { // Update the second allocation to be healthy a4 := a2.Copy() - a4.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: helper.BoolToPtr(true)} + a4.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: pointer.Of(true)} require.Nil(m.state.UpsertAllocs(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a4}), "UpsertAllocs") // Get the updated deployment @@ -1383,7 +1383,7 @@ func TestDeploymentWatcher_Watch_ProgressDeadline_Canaries(t *testing.T) { // Update the alloc to be unhealthy and require that nothing happens. a2 := a.Copy() a2.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Timestamp: now, } require.Nil(m.state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a2})) @@ -1456,7 +1456,7 @@ func TestDeploymentWatcher_PromotedCanary_UpdatedAllocs(t *testing.T) { a.ModifyTime = now.UnixNano() a.DeploymentID = d.ID a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Timestamp: now, } require.Nil(m.state.UpsertJob(structs.MsgTypeTestSetup, m.nextIndex(), j), "UpsertJob") @@ -1477,7 +1477,7 @@ func TestDeploymentWatcher_PromotedCanary_UpdatedAllocs(t *testing.T) { a2.CreateTime = now.UnixNano() a2.ModifyTime = now.UnixNano() a2.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Timestamp: now, } d.TaskGroups["web"].RequireProgressBy = time.Now().Add(2 * time.Second) @@ -1605,7 +1605,7 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { canary2.ModifyTime = now.UnixNano() canary2.DeploymentStatus = &structs.AllocDeploymentStatus{ Canary: true, - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Timestamp: now, } @@ -1624,7 +1624,7 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { canary1.ModifyTime = now.UnixNano() canary1.DeploymentStatus = &structs.AllocDeploymentStatus{ Canary: true, - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Timestamp: now, } @@ -1680,7 +1680,7 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { alloc1a.ModifyTime = now.UnixNano() alloc1a.DeploymentStatus = &structs.AllocDeploymentStatus{ Canary: false, - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Timestamp: now, } @@ -1689,7 +1689,7 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { alloc1b.ModifyTime = now.UnixNano() alloc1b.DeploymentStatus = &structs.AllocDeploymentStatus{ Canary: false, - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Timestamp: now, } @@ -1762,7 +1762,7 @@ func TestDeploymentWatcher_Watch_StartWithoutProgressDeadline(t *testing.T) { // Update the alloc to be unhealthy a2 := a.Copy() a2.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), Timestamp: time.Now(), } require.Nil(m.state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, m.nextIndex(), []*structs.Allocation{a2})) diff --git a/nomad/drainer/drainer.go b/nomad/drainer/drainer.go index 2e4e8528f..f610a0bce 100644 --- a/nomad/drainer/drainer.go +++ b/nomad/drainer/drainer.go @@ -7,7 +7,7 @@ import ( log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" @@ -396,7 +396,7 @@ func (n *NodeDrainer) drainAllocs(future *structs.BatchFuture, allocs []*structs transitions := make(map[string]*structs.DesiredTransition, len(allocs)) for _, alloc := range allocs { transitions[alloc.ID] = &structs.DesiredTransition{ - Migrate: helper.BoolToPtr(true), + Migrate: pointer.Of(true), } jobs[alloc.JobNamespacedID()] = alloc } diff --git a/nomad/drainer/watch_jobs.go b/nomad/drainer/watch_jobs.go index b3dbc842e..36d301cc3 100644 --- a/nomad/drainer/watch_jobs.go +++ b/nomad/drainer/watch_jobs.go @@ -411,7 +411,7 @@ func handleTaskGroup(snap *state.StateSnapshot, batch bool, tg *structs.TaskGrou // Determine how many we can drain thresholdCount := tg.Count - tg.Migrate.MaxParallel numToDrain := healthy - thresholdCount - numToDrain = helper.IntMin(len(drainable), numToDrain) + numToDrain = helper.Min(len(drainable), numToDrain) if numToDrain <= 0 { return nil } diff --git a/nomad/drainer/watch_jobs_test.go b/nomad/drainer/watch_jobs_test.go index e8ccd77e6..9f6f637dd 100644 --- a/nomad/drainer/watch_jobs_test.go +++ b/nomad/drainer/watch_jobs_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -138,7 +138,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { for i := 0; i < count; i++ { a := newAlloc(drainingNode, job) a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } allocs = append(allocs, a) } @@ -160,7 +160,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { // the old ones drainedAllocs := make([]*structs.Allocation, len(drains.Allocs)) for i, a := range drains.Allocs { - a.DesiredTransition.Migrate = helper.BoolToPtr(true) + a.DesiredTransition.Migrate = pointer.Of(true) // create a copy so we can reuse this slice drainedAllocs[i] = a.Copy() @@ -203,7 +203,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { for _, a := range replacements { a.ClientStatus = structs.AllocClientStatusRunning a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } } require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, index, replacements)) @@ -217,7 +217,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { // Fake migrations once more to finish the drain drainedAllocs = make([]*structs.Allocation, len(drains.Allocs)) for i, a := range drains.Allocs { - a.DesiredTransition.Migrate = helper.BoolToPtr(true) + a.DesiredTransition.Migrate = pointer.Of(true) // create a copy so we can reuse this slice drainedAllocs[i] = a.Copy() @@ -246,7 +246,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { for _, a := range replacements { a.ClientStatus = structs.AllocClientStatusRunning a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } } require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, index, replacements)) @@ -260,7 +260,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { // Fake migrations once more to finish the drain drainedAllocs = make([]*structs.Allocation, len(drains.Allocs)) for i, a := range drains.Allocs { - a.DesiredTransition.Migrate = helper.BoolToPtr(true) + a.DesiredTransition.Migrate = pointer.Of(true) // create a copy so we can reuse this slice drainedAllocs[i] = a.Copy() @@ -289,7 +289,7 @@ func TestDrainingJobWatcher_DrainJobs(t *testing.T) { for _, a := range replacements { a.ClientStatus = structs.AllocClientStatusRunning a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } } require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, index, replacements)) @@ -382,7 +382,7 @@ func TestHandeTaskGroup_Table(t *testing.T) { ExpectedDone: false, AddAlloc: func(i int, a *structs.Allocation, drainingID, runningID string) { if i == 0 { - a.DesiredTransition.Migrate = helper.BoolToPtr(true) + a.DesiredTransition.Migrate = pointer.Of(true) return } a.NodeID = runningID @@ -583,7 +583,7 @@ func testHandleTaskGroup(t *testing.T, tc handleTaskGroupTestCase) { // Default to being healthy on the draining node a.NodeID = drainingNode.ID a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } if tc.AddAlloc != nil { tc.AddAlloc(i, a, drainingNode.ID, runningNode.ID) @@ -630,7 +630,7 @@ func TestHandleTaskGroup_Migrations(t *testing.T) { a.TaskGroup = job.TaskGroups[0].Name a.NodeID = n.ID a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), } if i%2 == 0 { @@ -699,7 +699,7 @@ func TestHandleTaskGroup_GarbageCollectedNode(t *testing.T) { a.TaskGroup = job.TaskGroups[0].Name a.NodeID = n.ID a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), } if i%2 == 0 { diff --git a/nomad/drainer_int_test.go b/nomad/drainer_int_test.go index 5c23ac891..33abf4812 100644 --- a/nomad/drainer_int_test.go +++ b/nomad/drainer_int_test.go @@ -12,7 +12,7 @@ import ( msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/drainer" "github.com/hashicorp/nomad/nomad/mock" @@ -52,7 +52,7 @@ func allocPromoter(errCh chan<- error, ctx context.Context, } newAlloc := alloc.Copy() newAlloc.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Timestamp: now, } updates = append(updates, newAlloc) diff --git a/nomad/event_endpoint.go b/nomad/event_endpoint.go index 522d130e1..2628b11a1 100644 --- a/nomad/event_endpoint.go +++ b/nomad/event_endpoint.go @@ -7,7 +7,7 @@ import ( "time" "github.com/hashicorp/go-msgpack/codec" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/stream" "github.com/hashicorp/nomad/nomad/structs" ) @@ -28,7 +28,7 @@ func (e *Event) stream(conn io.ReadWriteCloser) { encoder := codec.NewEncoder(conn, structs.MsgpackHandle) if err := decoder.Decode(&args); err != nil { - handleJsonResultError(err, helper.Int64ToPtr(500), encoder) + handleJsonResultError(err, pointer.Of(int64(500)), encoder) return } @@ -36,7 +36,7 @@ func (e *Event) stream(conn io.ReadWriteCloser) { if args.Region != e.srv.config.Region { err := e.forwardStreamingRPC(args.Region, "Event.Stream", args, conn) if err != nil { - handleJsonResultError(err, helper.Int64ToPtr(500), encoder) + handleJsonResultError(err, pointer.Of(int64(500)), encoder) } return } @@ -52,7 +52,7 @@ func (e *Event) stream(conn io.ReadWriteCloser) { // Get the servers broker and subscribe publisher, err := e.srv.State().EventBroker() if err != nil { - handleJsonResultError(err, helper.Int64ToPtr(500), encoder) + handleJsonResultError(err, pointer.Of(int64(500)), encoder) return } @@ -66,7 +66,7 @@ func (e *Event) stream(conn io.ReadWriteCloser) { subscription, subErr = publisher.Subscribe(subReq) } if subErr != nil { - handleJsonResultError(subErr, helper.Int64ToPtr(500), encoder) + handleJsonResultError(subErr, pointer.Of(int64(500)), encoder) return } defer subscription.Unsubscribe() @@ -141,7 +141,7 @@ OUTER: } if streamErr != nil { - handleJsonResultError(streamErr, helper.Int64ToPtr(500), encoder) + handleJsonResultError(streamErr, pointer.Of(int64(500)), encoder) return } diff --git a/nomad/fsm.go b/nomad/fsm.go index 2c0dc6b6f..9e84ad83b 100644 --- a/nomad/fsm.go +++ b/nomad/fsm.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" "github.com/hashicorp/go-msgpack/codec" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" @@ -700,7 +700,7 @@ func (n *nomadFSM) handleJobDeregister(index uint64, jobID, namespace string, pu if err != nil { return err } - transition := &structs.DesiredTransition{NoShutdownDelay: helper.BoolToPtr(true)} + transition := &structs.DesiredTransition{NoShutdownDelay: pointer.Of(true)} for _, alloc := range allocs { err := n.state.UpdateAllocDesiredTransitionTxn(tx, index, alloc.ID, transition) if err != nil { diff --git a/nomad/fsm_test.go b/nomad/fsm_test.go index 0d9a5fe3a..b851300dc 100644 --- a/nomad/fsm_test.go +++ b/nomad/fsm_test.go @@ -12,7 +12,7 @@ import ( "github.com/google/go-cmp/cmp" memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -1581,7 +1581,7 @@ func TestFSM_UpdateAllocDesiredTransition(t *testing.T) { state.UpsertAllocs(structs.MsgTypeTestSetup, 10, []*structs.Allocation{alloc, alloc2}) t1 := &structs.DesiredTransition{ - Migrate: helper.BoolToPtr(true), + Migrate: pointer.Of(true), } eval := &structs.Evaluation{ @@ -2082,7 +2082,7 @@ func TestFSM_DeploymentPromotion(t *testing.T) { c1.DeploymentID = d.ID d.TaskGroups[c1.TaskGroup].PlacedCanaries = append(d.TaskGroups[c1.TaskGroup].PlacedCanaries, c1.ID) c1.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } c2 := mock.Alloc() c2.JobID = j.ID @@ -2090,7 +2090,7 @@ func TestFSM_DeploymentPromotion(t *testing.T) { d.TaskGroups[c2.TaskGroup].PlacedCanaries = append(d.TaskGroups[c2.TaskGroup].PlacedCanaries, c2.ID) c2.TaskGroup = tg2.Name c2.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{c1, c2}); err != nil { diff --git a/nomad/heartbeat_test.go b/nomad/heartbeat_test.go index 17a5ce08e..58879cf9a 100644 --- a/nomad/heartbeat_test.go +++ b/nomad/heartbeat_test.go @@ -8,7 +8,7 @@ import ( memdb "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -301,18 +301,18 @@ func TestHeartbeat_InvalidateHeartbeat_DisconnectedClient(t *testing.T) { { name: "has-pending-reconnects", now: time.Now().UTC(), - maxClientDisconnect: helper.TimeToPtr(5 * time.Second), + maxClientDisconnect: pointer.Of(5 * time.Second), expectedNodeStatus: structs.NodeStatusDisconnected, }, { name: "has-expired-reconnects", - maxClientDisconnect: helper.TimeToPtr(5 * time.Second), + maxClientDisconnect: pointer.Of(5 * time.Second), now: time.Now().UTC().Add(-10 * time.Second), expectedNodeStatus: structs.NodeStatusDown, }, { name: "has-expired-reconnects-equal-timestamp", - maxClientDisconnect: helper.TimeToPtr(5 * time.Second), + maxClientDisconnect: pointer.Of(5 * time.Second), now: time.Now().UTC().Add(-5 * time.Second), expectedNodeStatus: structs.NodeStatusDown, }, diff --git a/nomad/job_endpoint.go b/nomad/job_endpoint.go index a066e82d5..cf93971e6 100644 --- a/nomad/job_endpoint.go +++ b/nomad/job_endpoint.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/state/paginator" @@ -41,7 +42,7 @@ var ( // allocations to be force rescheduled. We create a one off // variable to avoid creating a new object for every request. allowForceRescheduleTransition = &structs.DesiredTransition{ - ForceReschedule: helper.BoolToPtr(true), + ForceReschedule: pointer.Of(true), } ) @@ -1389,7 +1390,7 @@ func (j *Job) List(args *structs.JobListRequest, reply *structs.JobListResponse) if err != nil { return err } - reply.Index = helper.Uint64Max(jindex, sindex) + reply.Index = helper.Max(jindex, sindex) // Set the query response j.srv.setQueryMeta(&reply.QueryMeta) diff --git a/nomad/job_endpoint_hook_connect.go b/nomad/job_endpoint_hook_connect.go index 0cba5c895..2af2d7199 100644 --- a/nomad/job_endpoint_hook_connect.go +++ b/nomad/job_endpoint_hook_connect.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/envoy" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" ) @@ -391,7 +392,7 @@ func gatewayProxy(gateway *structs.ConsulGateway, mode string) *structs.ConsulGa // set default connect timeout if not set if proxy.ConnectTimeout == nil { - proxy.ConnectTimeout = helper.TimeToPtr(defaultConnectTimeout) + proxy.ConnectTimeout = pointer.Of(defaultConnectTimeout) } if mode == "bridge" { diff --git a/nomad/job_endpoint_hook_connect_test.go b/nomad/job_endpoint_hook_connect_test.go index b665205ca..4e5bd7fc9 100644 --- a/nomad/job_endpoint_hook_connect_test.go +++ b/nomad/job_endpoint_hook_connect_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -705,7 +705,7 @@ func TestJobEndpointConnect_gatewayProxyIsDefault(t *testing.T) { t.Run("unrelated fields set", func(t *testing.T) { result := gatewayProxyIsDefault(&structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(2 * time.Second), + ConnectTimeout: pointer.Of(2 * time.Second), Config: map[string]interface{}{"foo": 1}, }) require.True(t, result) @@ -826,7 +826,7 @@ func TestJobEndpointConnect_gatewayProxy(t *testing.T) { }, }, "bridge") require.Equal(t, &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(defaultConnectTimeout), + ConnectTimeout: pointer.Of(defaultConnectTimeout), EnvoyGatewayNoDefaultBind: true, EnvoyGatewayBindTaggedAddresses: false, EnvoyGatewayBindAddresses: map[string]*structs.ConsulGatewayBindAddress{ @@ -840,7 +840,7 @@ func TestJobEndpointConnect_gatewayProxy(t *testing.T) { t.Run("ingress set defaults", func(t *testing.T) { result := gatewayProxy(&structs.ConsulGateway{ Proxy: &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(2 * time.Second), + ConnectTimeout: pointer.Of(2 * time.Second), Config: map[string]interface{}{"foo": 1}, }, Ingress: &structs.ConsulIngressConfigEntry{ @@ -854,7 +854,7 @@ func TestJobEndpointConnect_gatewayProxy(t *testing.T) { }, }, "bridge") require.Equal(t, &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(2 * time.Second), + ConnectTimeout: pointer.Of(2 * time.Second), Config: map[string]interface{}{"foo": 1}, EnvoyGatewayNoDefaultBind: true, EnvoyGatewayBindTaggedAddresses: false, @@ -894,7 +894,7 @@ func TestJobEndpointConnect_gatewayProxy(t *testing.T) { t.Run("terminating set defaults", func(t *testing.T) { result := gatewayProxy(&structs.ConsulGateway{ Proxy: &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(2 * time.Second), + ConnectTimeout: pointer.Of(2 * time.Second), EnvoyDNSDiscoveryType: "STRICT_DNS", }, Terminating: &structs.ConsulTerminatingConfigEntry{ @@ -908,7 +908,7 @@ func TestJobEndpointConnect_gatewayProxy(t *testing.T) { }, }, "bridge") require.Equal(t, &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(2 * time.Second), + ConnectTimeout: pointer.Of(2 * time.Second), EnvoyGatewayNoDefaultBind: true, EnvoyGatewayBindTaggedAddresses: false, EnvoyDNSDiscoveryType: "STRICT_DNS", @@ -945,14 +945,14 @@ func TestJobEndpointConnect_gatewayProxy(t *testing.T) { t.Run("mesh set defaults in bridge", func(t *testing.T) { result := gatewayProxy(&structs.ConsulGateway{ Proxy: &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(2 * time.Second), + ConnectTimeout: pointer.Of(2 * time.Second), }, Mesh: &structs.ConsulMeshConfigEntry{ // nothing }, }, "bridge") require.Equal(t, &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(2 * time.Second), + ConnectTimeout: pointer.Of(2 * time.Second), EnvoyGatewayNoDefaultBind: true, EnvoyGatewayBindTaggedAddresses: false, EnvoyGatewayBindAddresses: map[string]*structs.ConsulGatewayBindAddress{ @@ -971,14 +971,14 @@ func TestJobEndpointConnect_gatewayProxy(t *testing.T) { t.Run("mesh set defaults in host", func(t *testing.T) { result := gatewayProxy(&structs.ConsulGateway{ Proxy: &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(2 * time.Second), + ConnectTimeout: pointer.Of(2 * time.Second), }, Mesh: &structs.ConsulMeshConfigEntry{ // nothing }, }, "host") require.Equal(t, &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(2 * time.Second), + ConnectTimeout: pointer.Of(2 * time.Second), }, result) }) diff --git a/nomad/job_endpoint_oss_test.go b/nomad/job_endpoint_oss_test.go index 304422d81..446abe5d1 100644 --- a/nomad/job_endpoint_oss_test.go +++ b/nomad/job_endpoint_oss_test.go @@ -10,7 +10,7 @@ import ( msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent/consul" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -26,7 +26,7 @@ func TestJobEndpoint_Register_Connect_AllowUnauthenticatedFalse_oss(t *testing.T s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue - c.ConsulConfig.AllowUnauthenticated = helper.BoolToPtr(false) + c.ConsulConfig.AllowUnauthenticated = pointer.Of(false) }) defer cleanupS1() codec := rpcClient(t, s1) diff --git a/nomad/job_endpoint_test.go b/nomad/job_endpoint_test.go index dae304208..684c17a3b 100644 --- a/nomad/job_endpoint_test.go +++ b/nomad/job_endpoint_test.go @@ -12,7 +12,7 @@ import ( msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -361,7 +361,7 @@ func TestJobEndpoint_Register_ConnectIngressGateway_full(t *testing.T) { job.TaskGroups[0].Services[0].Connect = &structs.ConsulConnect{ Gateway: &structs.ConsulGateway{ Proxy: &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(1 * time.Second), + ConnectTimeout: pointer.Of(1 * time.Second), EnvoyGatewayBindTaggedAddresses: true, EnvoyGatewayBindAddresses: map[string]*structs.ConsulGatewayBindAddress{ "service1": { @@ -2393,7 +2393,7 @@ func TestJobEndpoint_Revert(t *testing.T) { revertReq := &structs.JobRevertRequest{ JobID: job.ID, JobVersion: 0, - EnforcePriorVersion: helper.Uint64ToPtr(10), + EnforcePriorVersion: pointer.Of(uint64(10)), WriteRequest: structs.WriteRequest{ Region: "global", Namespace: job.Namespace, @@ -2426,7 +2426,7 @@ func TestJobEndpoint_Revert(t *testing.T) { revertReq = &structs.JobRevertRequest{ JobID: job.ID, JobVersion: 0, - EnforcePriorVersion: helper.Uint64ToPtr(1), + EnforcePriorVersion: pointer.Of(uint64(1)), WriteRequest: structs.WriteRequest{ Region: "global", Namespace: job.Namespace, @@ -2594,7 +2594,7 @@ func TestJobEndpoint_Revert_Vault_NoToken(t *testing.T) { revertReq = &structs.JobRevertRequest{ JobID: job.ID, JobVersion: 0, - EnforcePriorVersion: helper.Uint64ToPtr(1), + EnforcePriorVersion: pointer.Of(uint64(1)), WriteRequest: structs.WriteRequest{ Region: "global", Namespace: job.Namespace, @@ -7103,7 +7103,7 @@ func TestJobEndpoint_Scale(t *testing.T) { Target: map[string]string{ structs.ScalingTargetGroup: groupName, }, - Count: helper.Int64ToPtr(int64(originalCount + 1)), + Count: pointer.Of(int64(originalCount + 1)), Message: "because of the load", Meta: map[string]interface{}{ "metrics": map[string]string{ @@ -7188,7 +7188,7 @@ func TestJobEndpoint_Scale_DeploymentBlocking(t *testing.T) { }, Meta: scalingMetadata, Message: scalingMessage, - Count: helper.Int64ToPtr(newCount), + Count: pointer.Of(newCount), WriteRequest: structs.WriteRequest{ Region: "global", Namespace: job.Namespace, @@ -7480,7 +7480,7 @@ func TestJobEndpoint_Scale_Invalid(t *testing.T) { Target: map[string]string{ structs.ScalingTargetGroup: job.TaskGroups[0].Name, }, - Count: helper.Int64ToPtr(int64(count) + 1), + Count: pointer.Of(int64(count) + 1), Message: "this should fail", Meta: map[string]interface{}{ "metrics": map[string]string{ @@ -7504,7 +7504,7 @@ func TestJobEndpoint_Scale_Invalid(t *testing.T) { err = state.UpsertJob(structs.MsgTypeTestSetup, 1000, job) require.Nil(err) - scale.Count = helper.Int64ToPtr(10) + scale.Count = pointer.Of(int64(10)) scale.Message = "error message" scale.Error = true err = msgpackrpc.CallWithCodec(codec, "Job.Scale", scale, &resp) @@ -7537,7 +7537,7 @@ func TestJobEndpoint_Scale_OutOfBounds(t *testing.T) { Target: map[string]string{ structs.ScalingTargetGroup: job.TaskGroups[0].Name, }, - Count: helper.Int64ToPtr(pol.Max + 1), + Count: pointer.Of(pol.Max + 1), Message: "out of bounds", PolicyOverride: false, WriteRequest: structs.WriteRequest{ @@ -7549,7 +7549,7 @@ func TestJobEndpoint_Scale_OutOfBounds(t *testing.T) { require.Error(err) require.Contains(err.Error(), "group count was greater than scaling policy maximum: 11 > 10") - scale.Count = helper.Int64ToPtr(2) + scale.Count = pointer.Of(int64(2)) err = msgpackrpc.CallWithCodec(codec, "Job.Scale", scale, &resp) require.Error(err) require.Contains(err.Error(), "group count was less than scaling policy minimum: 2 < 3") @@ -7643,7 +7643,7 @@ func TestJobEndpoint_Scale_Priority(t *testing.T) { Target: map[string]string{ structs.ScalingTargetGroup: groupName, }, - Count: helper.Int64ToPtr(int64(originalCount + 1)), + Count: pointer.Of(int64(originalCount + 1)), Message: "scotty, we need more power", PolicyOverride: false, WriteRequest: structs.WriteRequest{ @@ -7689,7 +7689,7 @@ func TestJobEndpoint_InvalidCount(t *testing.T) { Target: map[string]string{ structs.ScalingTargetGroup: job.TaskGroups[0].Name, }, - Count: helper.Int64ToPtr(int64(-1)), + Count: pointer.Of(int64(-1)), WriteRequest: structs.WriteRequest{ Region: "global", Namespace: job.Namespace, @@ -7743,7 +7743,7 @@ func TestJobEndpoint_GetScaleStatus(t *testing.T) { a1.ClientStatus = structs.AllocClientStatusRunning // healthy a1.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } a2 := mock.Alloc() a2.Job = jobV2 @@ -7752,7 +7752,7 @@ func TestJobEndpoint_GetScaleStatus(t *testing.T) { a2.ClientStatus = structs.AllocClientStatusPending // unhealthy a2.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), } a3 := mock.Alloc() a3.Job = jobV2 @@ -7761,7 +7761,7 @@ func TestJobEndpoint_GetScaleStatus(t *testing.T) { a3.ClientStatus = structs.AllocClientStatusRunning // canary a3.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Canary: true, } // no health @@ -7775,7 +7775,7 @@ func TestJobEndpoint_GetScaleStatus(t *testing.T) { event := &structs.ScalingEvent{ Time: time.Now().Unix(), - Count: helper.Int64ToPtr(5), + Count: pointer.Of(int64(5)), Message: "message", Error: false, Meta: map[string]interface{}{ diff --git a/nomad/mock/mock.go b/nomad/mock/mock.go index 09c781296..004ce659c 100644 --- a/nomad/mock/mock.go +++ b/nomad/mock/mock.go @@ -8,8 +8,8 @@ import ( "time" fake "github.com/brianvoe/gofakeit/v6" - "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/envoy" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" psstructs "github.com/hashicorp/nomad/plugins/shared/structs" @@ -1198,7 +1198,7 @@ func ConnectIngressGatewayJob(mode string, inject bool) *structs.Job { Connect: &structs.ConsulConnect{ Gateway: &structs.ConsulGateway{ Proxy: &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(3 * time.Second), + ConnectTimeout: pointer.Of(3 * time.Second), EnvoyGatewayBindAddresses: make(map[string]*structs.ConsulGatewayBindAddress), }, Ingress: &structs.ConsulIngressConfigEntry{ @@ -1249,7 +1249,7 @@ func ConnectTerminatingGatewayJob(mode string, inject bool) *structs.Job { Connect: &structs.ConsulConnect{ Gateway: &structs.ConsulGateway{ Proxy: &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(3 * time.Second), + ConnectTimeout: pointer.Of(3 * time.Second), EnvoyGatewayBindAddresses: make(map[string]*structs.ConsulGatewayBindAddress), }, Terminating: &structs.ConsulTerminatingConfigEntry{ @@ -1300,7 +1300,7 @@ func ConnectMeshGatewayJob(mode string, inject bool) *structs.Job { Connect: &structs.ConsulConnect{ Gateway: &structs.ConsulGateway{ Proxy: &structs.ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(3 * time.Second), + ConnectTimeout: pointer.Of(3 * time.Second), EnvoyGatewayBindAddresses: make(map[string]*structs.ConsulGatewayBindAddress), }, Mesh: &structs.ConsulMeshConfigEntry{ diff --git a/nomad/node_endpoint_test.go b/nomad/node_endpoint_test.go index d4f24153a..899f51470 100644 --- a/nomad/node_endpoint_test.go +++ b/nomad/node_endpoint_test.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/state" @@ -3637,7 +3638,7 @@ func TestClientEndpoint_DeriveSIToken(t *testing.T) { testutil.WaitForLeader(t, s1.RPC) // Set allow unauthenticated (no operator token required) - s1.config.ConsulConfig.AllowUnauthenticated = helper.BoolToPtr(true) + s1.config.ConsulConfig.AllowUnauthenticated = pointer.Of(true) // Create the node node := mock.Node() @@ -3689,7 +3690,7 @@ func TestClientEndpoint_DeriveSIToken_ConsulError(t *testing.T) { testutil.WaitForLeader(t, s1.RPC) // Set allow unauthenticated (no operator token required) - s1.config.ConsulConfig.AllowUnauthenticated = helper.BoolToPtr(true) + s1.config.ConsulConfig.AllowUnauthenticated = pointer.Of(true) // Create the node node := mock.Node() diff --git a/nomad/scaling_endpoint.go b/nomad/scaling_endpoint.go index dd8c5fe11..a93ddd5a1 100644 --- a/nomad/scaling_endpoint.go +++ b/nomad/scaling_endpoint.go @@ -130,7 +130,7 @@ func (p *Scaling) GetPolicy(args *structs.ScalingPolicySpecificRequest, if err != nil { return err } - reply.Index = helper.Uint64Max(1, index) + reply.Index = helper.Max(1, index) } return nil }} @@ -194,7 +194,7 @@ func (p *Scaling) listAllNamespaces(args *structs.ScalingPolicyListRequest, repl if err != nil { return err } - reply.Index = helper.Uint64Max(1, index) + reply.Index = helper.Max(1, index) // Set the query response p.srv.setQueryMeta(&reply.QueryMeta) diff --git a/nomad/server.go b/nomad/server.go index 9a541bb89..948f4362f 100644 --- a/nomad/server.go +++ b/nomad/server.go @@ -2011,7 +2011,7 @@ func (s *Server) setReplyQueryMeta(stateStore *state.StateStore, table string, r if err != nil { return err } - reply.Index = helper.Uint64Max(1, index) + reply.Index = helper.Max(1, index) // Set the query response. s.setQueryMeta(reply) diff --git a/nomad/state/events_test.go b/nomad/state/events_test.go index 1af2cc6cc..44a9c326e 100644 --- a/nomad/state/events_test.go +++ b/nomad/state/events_test.go @@ -6,7 +6,7 @@ import ( memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -177,7 +177,7 @@ func TestEventsFromChanges_DeploymentPromotion(t *testing.T) { c1.DeploymentID = d.ID d.TaskGroups[c1.TaskGroup].PlacedCanaries = append(d.TaskGroups[c1.TaskGroup].PlacedCanaries, c1.ID) c1.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } c2 := mock.Alloc() c2.JobID = j.ID @@ -185,7 +185,7 @@ func TestEventsFromChanges_DeploymentPromotion(t *testing.T) { d.TaskGroups[c2.TaskGroup].PlacedCanaries = append(d.TaskGroups[c2.TaskGroup].PlacedCanaries, c2.ID) c2.TaskGroup = tg2.Name c2.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } require.NoError(t, s.upsertAllocsImpl(10, []*structs.Allocation{c1, c2}, setupTx)) @@ -254,7 +254,7 @@ func TestEventsFromChanges_DeploymentAllocHealthRequestType(t *testing.T) { c1.DeploymentID = d.ID d.TaskGroups[c1.TaskGroup].PlacedCanaries = append(d.TaskGroups[c1.TaskGroup].PlacedCanaries, c1.ID) c1.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } c2 := mock.Alloc() c2.JobID = j.ID @@ -262,7 +262,7 @@ func TestEventsFromChanges_DeploymentAllocHealthRequestType(t *testing.T) { d.TaskGroups[c2.TaskGroup].PlacedCanaries = append(d.TaskGroups[c2.TaskGroup].PlacedCanaries, c2.ID) c2.TaskGroup = tg2.Name c2.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } require.NoError(t, s.upsertAllocsImpl(10, []*structs.Allocation{c1, c2}, setupTx)) @@ -602,7 +602,7 @@ func TestEventsFromChanges_AllocUpdateDesiredTransitionRequestType(t *testing.T) req := &structs.AllocUpdateDesiredTransitionRequest{ Allocs: map[string]*structs.DesiredTransition{ - alloc.ID: {Migrate: helper.BoolToPtr(true)}, + alloc.ID: {Migrate: pointer.Of(true)}, }, Evals: evals, } diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index 737dd6b22..0f5a136b9 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" "github.com/hashicorp/go-multierror" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/stream" "github.com/hashicorp/nomad/nomad/structs" ) @@ -3488,7 +3488,7 @@ func (s *StateStore) nestedUpdateAllocFromClient(txn *txn, index uint64, alloc * // We got new health information from the client if newHasHealthy && (!oldHasHealthy || *copyAlloc.DeploymentStatus.Healthy != *alloc.DeploymentStatus.Healthy) { // Updated deployment health and timestamp - copyAlloc.DeploymentStatus.Healthy = helper.BoolToPtr(*alloc.DeploymentStatus.Healthy) + copyAlloc.DeploymentStatus.Healthy = pointer.Of(*alloc.DeploymentStatus.Healthy) copyAlloc.DeploymentStatus.Timestamp = alloc.DeploymentStatus.Timestamp copyAlloc.DeploymentStatus.ModifyIndex = index } @@ -4564,7 +4564,7 @@ func (s *StateStore) UpdateDeploymentAllocHealth(msgType structs.MessageType, in if copy.DeploymentStatus == nil { copy.DeploymentStatus = &structs.AllocDeploymentStatus{} } - copy.DeploymentStatus.Healthy = helper.BoolToPtr(healthy) + copy.DeploymentStatus.Healthy = pointer.Of(healthy) copy.DeploymentStatus.Timestamp = ts copy.DeploymentStatus.ModifyIndex = index copy.ModifyIndex = index diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index 1e86bd9b7..739c7d2db 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/go-memdb" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -5168,7 +5168,7 @@ func TestStateStore_UpdateAllocsFromClient_Deployment(t *testing.T) { JobID: alloc.JobID, TaskGroup: alloc.TaskGroup, DeploymentStatus: &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Timestamp: healthy, }, } @@ -5213,7 +5213,7 @@ func TestStateStore_UpdateAllocsFromClient_DeploymentStateMerges(t *testing.T) { JobID: alloc.JobID, TaskGroup: alloc.TaskGroup, DeploymentStatus: &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Canary: false, }, } @@ -5684,10 +5684,10 @@ func TestStateStore_UpdateAllocDesiredTransition(t *testing.T) { require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) t1 := &structs.DesiredTransition{ - Migrate: helper.BoolToPtr(true), + Migrate: pointer.Of(true), } t2 := &structs.DesiredTransition{ - Migrate: helper.BoolToPtr(false), + Migrate: pointer.Of(false), } eval := &structs.Evaluation{ ID: uuid.Generate(), @@ -7414,7 +7414,7 @@ func TestStateStore_UpsertDeploymentPromotion_Unhealthy(t *testing.T) { c3.JobID = j.ID c3.DeploymentID = d.ID c3.DesiredStatus = structs.AllocDesiredStatusStop - c3.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: helper.BoolToPtr(true)} + c3.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: pointer.Of(true)} d.TaskGroups[c3.TaskGroup].PlacedCanaries = append(d.TaskGroups[c3.TaskGroup].PlacedCanaries, c3.ID) require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{c1, c2, c3})) @@ -7500,7 +7500,7 @@ func TestStateStore_UpsertDeploymentPromotion_All(t *testing.T) { c1.DeploymentID = d.ID d.TaskGroups[c1.TaskGroup].PlacedCanaries = append(d.TaskGroups[c1.TaskGroup].PlacedCanaries, c1.ID) c1.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } c2 := mock.Alloc() c2.JobID = j.ID @@ -7508,7 +7508,7 @@ func TestStateStore_UpsertDeploymentPromotion_All(t *testing.T) { d.TaskGroups[c2.TaskGroup].PlacedCanaries = append(d.TaskGroups[c2.TaskGroup].PlacedCanaries, c2.ID) c2.TaskGroup = tg2.Name c2.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{c1, c2}); err != nil { @@ -7595,7 +7595,7 @@ func TestStateStore_UpsertDeploymentPromotion_Subset(t *testing.T) { c1.DeploymentID = d.ID d.TaskGroups[c1.TaskGroup].PlacedCanaries = append(d.TaskGroups[c1.TaskGroup].PlacedCanaries, c1.ID) c1.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Canary: true, } @@ -7606,7 +7606,7 @@ func TestStateStore_UpsertDeploymentPromotion_Subset(t *testing.T) { d.TaskGroups[c2.TaskGroup].PlacedCanaries = append(d.TaskGroups[c2.TaskGroup].PlacedCanaries, c2.ID) c2.TaskGroup = tg2.Name c2.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Canary: true, } @@ -7615,7 +7615,7 @@ func TestStateStore_UpsertDeploymentPromotion_Subset(t *testing.T) { c3.DeploymentID = d.ID d.TaskGroups[c3.TaskGroup].PlacedCanaries = append(d.TaskGroups[c3.TaskGroup].PlacedCanaries, c3.ID) c3.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), Canary: true, } @@ -7752,7 +7752,7 @@ func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { a.JobID = job.ID a.DeploymentID = d1.ID a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), Canary: true, } require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{a})) @@ -7770,7 +7770,7 @@ func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { b.JobID = job.ID b.DeploymentID = d1.ID b.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), Canary: false, } require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{b})) @@ -7791,7 +7791,7 @@ func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { c.JobID = job.ID c.DeploymentID = d2.ID c.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), Canary: true, } require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 6, []*structs.Allocation{c})) @@ -7822,7 +7822,7 @@ func TestStateStore_UpsertDeploymentAlloc_NoCanaries(t *testing.T) { a.JobID = job.ID a.DeploymentID = d1.ID a.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Canary: false, } require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{a})) diff --git a/nomad/structs/config/artifact.go b/nomad/structs/config/artifact.go index 732b4ce87..1b942805c 100644 --- a/nomad/structs/config/artifact.go +++ b/nomad/structs/config/artifact.go @@ -6,7 +6,7 @@ import ( "time" "github.com/dustin/go-humanize" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" ) // ArtifactConfig is the configuration specific to the Artifact stanza @@ -43,22 +43,22 @@ func (a *ArtifactConfig) Copy() *ArtifactConfig { newCopy := &ArtifactConfig{} if a.HTTPReadTimeout != nil { - newCopy.HTTPReadTimeout = helper.StringToPtr(*a.HTTPReadTimeout) + newCopy.HTTPReadTimeout = pointer.Of(*a.HTTPReadTimeout) } if a.HTTPMaxSize != nil { - newCopy.HTTPMaxSize = helper.StringToPtr(*a.HTTPMaxSize) + newCopy.HTTPMaxSize = pointer.Of(*a.HTTPMaxSize) } if a.GCSTimeout != nil { - newCopy.GCSTimeout = helper.StringToPtr(*a.GCSTimeout) + newCopy.GCSTimeout = pointer.Of(*a.GCSTimeout) } if a.GitTimeout != nil { - newCopy.GitTimeout = helper.StringToPtr(*a.GitTimeout) + newCopy.GitTimeout = pointer.Of(*a.GitTimeout) } if a.HgTimeout != nil { - newCopy.HgTimeout = helper.StringToPtr(*a.HgTimeout) + newCopy.HgTimeout = pointer.Of(*a.HgTimeout) } if a.S3Timeout != nil { - newCopy.S3Timeout = helper.StringToPtr(*a.S3Timeout) + newCopy.S3Timeout = pointer.Of(*a.S3Timeout) } return newCopy @@ -74,22 +74,22 @@ func (a *ArtifactConfig) Merge(o *ArtifactConfig) *ArtifactConfig { newCopy := a.Copy() if o.HTTPReadTimeout != nil { - newCopy.HTTPReadTimeout = helper.StringToPtr(*o.HTTPReadTimeout) + newCopy.HTTPReadTimeout = pointer.Of(*o.HTTPReadTimeout) } if o.HTTPMaxSize != nil { - newCopy.HTTPMaxSize = helper.StringToPtr(*o.HTTPMaxSize) + newCopy.HTTPMaxSize = pointer.Of(*o.HTTPMaxSize) } if o.GCSTimeout != nil { - newCopy.GCSTimeout = helper.StringToPtr(*o.GCSTimeout) + newCopy.GCSTimeout = pointer.Of(*o.GCSTimeout) } if o.GitTimeout != nil { - newCopy.GitTimeout = helper.StringToPtr(*o.GitTimeout) + newCopy.GitTimeout = pointer.Of(*o.GitTimeout) } if o.HgTimeout != nil { - newCopy.HgTimeout = helper.StringToPtr(*o.HgTimeout) + newCopy.HgTimeout = pointer.Of(*o.HgTimeout) } if o.S3Timeout != nil { - newCopy.S3Timeout = helper.StringToPtr(*o.S3Timeout) + newCopy.S3Timeout = pointer.Of(*o.S3Timeout) } return newCopy @@ -161,26 +161,26 @@ func DefaultArtifactConfig() *ArtifactConfig { return &ArtifactConfig{ // Read timeout for HTTP operations. Must be long enough to // accommodate large/slow downloads. - HTTPReadTimeout: helper.StringToPtr("30m"), + HTTPReadTimeout: pointer.Of("30m"), // Maximum download size. Must be large enough to accommodate // large downloads. - HTTPMaxSize: helper.StringToPtr("100GB"), + HTTPMaxSize: pointer.Of("100GB"), // Timeout for GCS operations. Must be long enough to // accommodate large/slow downloads. - GCSTimeout: helper.StringToPtr("30m"), + GCSTimeout: pointer.Of("30m"), // Timeout for Git operations. Must be long enough to // accommodate large/slow clones. - GitTimeout: helper.StringToPtr("30m"), + GitTimeout: pointer.Of("30m"), // Timeout for Hg operations. Must be long enough to // accommodate large/slow clones. - HgTimeout: helper.StringToPtr("30m"), + HgTimeout: pointer.Of("30m"), // Timeout for S3 operations. Must be long enough to // accommodate large/slow downloads. - S3Timeout: helper.StringToPtr("30m"), + S3Timeout: pointer.Of("30m"), } } diff --git a/nomad/structs/config/artifact_test.go b/nomad/structs/config/artifact_test.go index e8c78d1f6..f1ba29a0b 100644 --- a/nomad/structs/config/artifact_test.go +++ b/nomad/structs/config/artifact_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/stretchr/testify/require" ) @@ -15,10 +15,10 @@ func TestArtifactConfig_Copy(t *testing.T) { b := a.Copy() require.Equal(t, a, b) - b.HTTPReadTimeout = helper.StringToPtr("5m") - b.HTTPMaxSize = helper.StringToPtr("2MB") - b.GitTimeout = helper.StringToPtr("3m") - b.HgTimeout = helper.StringToPtr("2m") + b.HTTPReadTimeout = pointer.Of("5m") + b.HTTPMaxSize = pointer.Of("2MB") + b.GitTimeout = pointer.Of("3m") + b.HgTimeout = pointer.Of("2m") require.NotEqual(t, a, b) } @@ -34,68 +34,68 @@ func TestArtifactConfig_Merge(t *testing.T) { { name: "merge all fields", source: &ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("30m"), - HTTPMaxSize: helper.StringToPtr("100GB"), - GCSTimeout: helper.StringToPtr("30m"), - GitTimeout: helper.StringToPtr("30m"), - HgTimeout: helper.StringToPtr("30m"), - S3Timeout: helper.StringToPtr("30m"), + HTTPReadTimeout: pointer.Of("30m"), + HTTPMaxSize: pointer.Of("100GB"), + GCSTimeout: pointer.Of("30m"), + GitTimeout: pointer.Of("30m"), + HgTimeout: pointer.Of("30m"), + S3Timeout: pointer.Of("30m"), }, other: &ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("5m"), - HTTPMaxSize: helper.StringToPtr("2GB"), - GCSTimeout: helper.StringToPtr("1m"), - GitTimeout: helper.StringToPtr("2m"), - HgTimeout: helper.StringToPtr("3m"), - S3Timeout: helper.StringToPtr("4m"), + HTTPReadTimeout: pointer.Of("5m"), + HTTPMaxSize: pointer.Of("2GB"), + GCSTimeout: pointer.Of("1m"), + GitTimeout: pointer.Of("2m"), + HgTimeout: pointer.Of("3m"), + S3Timeout: pointer.Of("4m"), }, expected: &ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("5m"), - HTTPMaxSize: helper.StringToPtr("2GB"), - GCSTimeout: helper.StringToPtr("1m"), - GitTimeout: helper.StringToPtr("2m"), - HgTimeout: helper.StringToPtr("3m"), - S3Timeout: helper.StringToPtr("4m"), + HTTPReadTimeout: pointer.Of("5m"), + HTTPMaxSize: pointer.Of("2GB"), + GCSTimeout: pointer.Of("1m"), + GitTimeout: pointer.Of("2m"), + HgTimeout: pointer.Of("3m"), + S3Timeout: pointer.Of("4m"), }, }, { name: "null source", source: nil, other: &ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("5m"), - HTTPMaxSize: helper.StringToPtr("2GB"), - GCSTimeout: helper.StringToPtr("1m"), - GitTimeout: helper.StringToPtr("2m"), - HgTimeout: helper.StringToPtr("3m"), - S3Timeout: helper.StringToPtr("4m"), + HTTPReadTimeout: pointer.Of("5m"), + HTTPMaxSize: pointer.Of("2GB"), + GCSTimeout: pointer.Of("1m"), + GitTimeout: pointer.Of("2m"), + HgTimeout: pointer.Of("3m"), + S3Timeout: pointer.Of("4m"), }, expected: &ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("5m"), - HTTPMaxSize: helper.StringToPtr("2GB"), - GCSTimeout: helper.StringToPtr("1m"), - GitTimeout: helper.StringToPtr("2m"), - HgTimeout: helper.StringToPtr("3m"), - S3Timeout: helper.StringToPtr("4m"), + HTTPReadTimeout: pointer.Of("5m"), + HTTPMaxSize: pointer.Of("2GB"), + GCSTimeout: pointer.Of("1m"), + GitTimeout: pointer.Of("2m"), + HgTimeout: pointer.Of("3m"), + S3Timeout: pointer.Of("4m"), }, }, { name: "null other", source: &ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("30m"), - HTTPMaxSize: helper.StringToPtr("100GB"), - GCSTimeout: helper.StringToPtr("30m"), - GitTimeout: helper.StringToPtr("30m"), - HgTimeout: helper.StringToPtr("30m"), - S3Timeout: helper.StringToPtr("30m"), + HTTPReadTimeout: pointer.Of("30m"), + HTTPMaxSize: pointer.Of("100GB"), + GCSTimeout: pointer.Of("30m"), + GitTimeout: pointer.Of("30m"), + HgTimeout: pointer.Of("30m"), + S3Timeout: pointer.Of("30m"), }, other: nil, expected: &ArtifactConfig{ - HTTPReadTimeout: helper.StringToPtr("30m"), - HTTPMaxSize: helper.StringToPtr("100GB"), - GCSTimeout: helper.StringToPtr("30m"), - GitTimeout: helper.StringToPtr("30m"), - HgTimeout: helper.StringToPtr("30m"), - S3Timeout: helper.StringToPtr("30m"), + HTTPReadTimeout: pointer.Of("30m"), + HTTPMaxSize: pointer.Of("100GB"), + GCSTimeout: pointer.Of("30m"), + GitTimeout: pointer.Of("30m"), + HgTimeout: pointer.Of("30m"), + S3Timeout: pointer.Of("30m"), }, }, } @@ -131,28 +131,28 @@ func TestArtifactConfig_Validate(t *testing.T) { { name: "http read timeout is invalid", config: func(a *ArtifactConfig) { - a.HTTPReadTimeout = helper.StringToPtr("invalid") + a.HTTPReadTimeout = pointer.Of("invalid") }, expectedError: "http_read_timeout not a valid duration", }, { name: "http read timeout is empty", config: func(a *ArtifactConfig) { - a.HTTPReadTimeout = helper.StringToPtr("") + a.HTTPReadTimeout = pointer.Of("") }, expectedError: "http_read_timeout not a valid duration", }, { name: "http read timeout is zero", config: func(a *ArtifactConfig) { - a.HTTPReadTimeout = helper.StringToPtr("0") + a.HTTPReadTimeout = pointer.Of("0") }, expectedError: "", }, { name: "http read timeout is negative", config: func(a *ArtifactConfig) { - a.HTTPReadTimeout = helper.StringToPtr("-10m") + a.HTTPReadTimeout = pointer.Of("-10m") }, expectedError: "http_read_timeout must be > 0", }, @@ -166,28 +166,28 @@ func TestArtifactConfig_Validate(t *testing.T) { { name: "http max size is invalid", config: func(a *ArtifactConfig) { - a.HTTPMaxSize = helper.StringToPtr("invalid") + a.HTTPMaxSize = pointer.Of("invalid") }, expectedError: "http_max_size not a valid size", }, { name: "http max size is empty", config: func(a *ArtifactConfig) { - a.HTTPMaxSize = helper.StringToPtr("") + a.HTTPMaxSize = pointer.Of("") }, expectedError: "http_max_size not a valid size", }, { name: "http max size is zero", config: func(a *ArtifactConfig) { - a.HTTPMaxSize = helper.StringToPtr("0") + a.HTTPMaxSize = pointer.Of("0") }, expectedError: "", }, { name: "http max size is negative", config: func(a *ArtifactConfig) { - a.HTTPMaxSize = helper.StringToPtr("-l0MB") + a.HTTPMaxSize = pointer.Of("-l0MB") }, expectedError: "http_max_size not a valid size", }, @@ -201,28 +201,28 @@ func TestArtifactConfig_Validate(t *testing.T) { { name: "gcs timeout is invalid", config: func(a *ArtifactConfig) { - a.GCSTimeout = helper.StringToPtr("invalid") + a.GCSTimeout = pointer.Of("invalid") }, expectedError: "gcs_timeout not a valid duration", }, { name: "gcs timeout is empty", config: func(a *ArtifactConfig) { - a.GCSTimeout = helper.StringToPtr("") + a.GCSTimeout = pointer.Of("") }, expectedError: "gcs_timeout not a valid duration", }, { name: "gcs timeout is zero", config: func(a *ArtifactConfig) { - a.GCSTimeout = helper.StringToPtr("0") + a.GCSTimeout = pointer.Of("0") }, expectedError: "", }, { name: "gcs timeout is negative", config: func(a *ArtifactConfig) { - a.GCSTimeout = helper.StringToPtr("-l0m") + a.GCSTimeout = pointer.Of("-l0m") }, expectedError: "gcs_timeout not a valid duration", }, @@ -236,28 +236,28 @@ func TestArtifactConfig_Validate(t *testing.T) { { name: "git timeout is invalid", config: func(a *ArtifactConfig) { - a.GitTimeout = helper.StringToPtr("invalid") + a.GitTimeout = pointer.Of("invalid") }, expectedError: "git_timeout not a valid duration", }, { name: "git timeout is empty", config: func(a *ArtifactConfig) { - a.GitTimeout = helper.StringToPtr("") + a.GitTimeout = pointer.Of("") }, expectedError: "git_timeout not a valid duration", }, { name: "git timeout is zero", config: func(a *ArtifactConfig) { - a.GitTimeout = helper.StringToPtr("0") + a.GitTimeout = pointer.Of("0") }, expectedError: "", }, { name: "git timeout is negative", config: func(a *ArtifactConfig) { - a.GitTimeout = helper.StringToPtr("-l0m") + a.GitTimeout = pointer.Of("-l0m") }, expectedError: "git_timeout not a valid duration", }, @@ -271,28 +271,28 @@ func TestArtifactConfig_Validate(t *testing.T) { { name: "hg timeout is invalid", config: func(a *ArtifactConfig) { - a.HgTimeout = helper.StringToPtr("invalid") + a.HgTimeout = pointer.Of("invalid") }, expectedError: "hg_timeout not a valid duration", }, { name: "hg timeout is empty", config: func(a *ArtifactConfig) { - a.HgTimeout = helper.StringToPtr("") + a.HgTimeout = pointer.Of("") }, expectedError: "hg_timeout not a valid duration", }, { name: "hg timeout is zero", config: func(a *ArtifactConfig) { - a.HgTimeout = helper.StringToPtr("0") + a.HgTimeout = pointer.Of("0") }, expectedError: "", }, { name: "hg timeout is negative", config: func(a *ArtifactConfig) { - a.HgTimeout = helper.StringToPtr("-l0m") + a.HgTimeout = pointer.Of("-l0m") }, expectedError: "hg_timeout not a valid duration", }, @@ -306,28 +306,28 @@ func TestArtifactConfig_Validate(t *testing.T) { { name: "s3 timeout is invalid", config: func(a *ArtifactConfig) { - a.S3Timeout = helper.StringToPtr("invalid") + a.S3Timeout = pointer.Of("invalid") }, expectedError: "s3_timeout not a valid duration", }, { name: "s3 timeout is empty", config: func(a *ArtifactConfig) { - a.S3Timeout = helper.StringToPtr("") + a.S3Timeout = pointer.Of("") }, expectedError: "s3_timeout not a valid duration", }, { name: "s3 timeout is zero", config: func(a *ArtifactConfig) { - a.S3Timeout = helper.StringToPtr("0") + a.S3Timeout = pointer.Of("0") }, expectedError: "", }, { name: "s3 timeout is negative", config: func(a *ArtifactConfig) { - a.S3Timeout = helper.StringToPtr("-l0m") + a.S3Timeout = pointer.Of("-l0m") }, expectedError: "s3_timeout not a valid duration", }, diff --git a/nomad/structs/config/audit.go b/nomad/structs/config/audit.go index cc7e7bf5d..91a430164 100644 --- a/nomad/structs/config/audit.go +++ b/nomad/structs/config/audit.go @@ -4,6 +4,7 @@ import ( "time" "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" ) // AuditConfig is the configuration specific to Audit Logging @@ -83,7 +84,7 @@ func (a *AuditConfig) Copy() *AuditConfig { // Copy bool pointers if a.Enabled != nil { - nc.Enabled = helper.BoolToPtr(*a.Enabled) + nc.Enabled = pointer.Of(*a.Enabled) } // Copy Sinks and Filters @@ -98,7 +99,7 @@ func (a *AuditConfig) Merge(b *AuditConfig) *AuditConfig { result := a.Copy() if b.Enabled != nil { - result.Enabled = helper.BoolToPtr(*b.Enabled) + result.Enabled = pointer.Of(*b.Enabled) } // Merge Sinks diff --git a/nomad/structs/config/audit_test.go b/nomad/structs/config/audit_test.go index 7cd9d930a..0388edb87 100644 --- a/nomad/structs/config/audit_test.go +++ b/nomad/structs/config/audit_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/stretchr/testify/require" ) @@ -13,7 +13,7 @@ func TestAuditConfig_Merge(t *testing.T) { ci.Parallel(t) c1 := &AuditConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Sinks: []*AuditSink{ { DeliveryGuarantee: "enforced", @@ -71,7 +71,7 @@ func TestAuditConfig_Merge(t *testing.T) { } e := &AuditConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Sinks: []*AuditSink{ { DeliveryGuarantee: "best-effort", diff --git a/nomad/structs/config/autopilot.go b/nomad/structs/config/autopilot.go index d71b0c398..2efd0f20a 100644 --- a/nomad/structs/config/autopilot.go +++ b/nomad/structs/config/autopilot.go @@ -3,7 +3,7 @@ package config import ( "time" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" ) type AutopilotConfig struct { @@ -60,7 +60,7 @@ func (a *AutopilotConfig) Merge(b *AutopilotConfig) *AutopilotConfig { result := a.Copy() if b.CleanupDeadServers != nil { - result.CleanupDeadServers = helper.BoolToPtr(*b.CleanupDeadServers) + result.CleanupDeadServers = pointer.Of(*b.CleanupDeadServers) } if b.ServerStabilizationTime != 0 { result.ServerStabilizationTime = b.ServerStabilizationTime @@ -84,7 +84,7 @@ func (a *AutopilotConfig) Merge(b *AutopilotConfig) *AutopilotConfig { result.EnableRedundancyZones = b.EnableRedundancyZones } if b.DisableUpgradeMigration != nil { - result.DisableUpgradeMigration = helper.BoolToPtr(*b.DisableUpgradeMigration) + result.DisableUpgradeMigration = pointer.Of(*b.DisableUpgradeMigration) } if b.EnableCustomUpgrades != nil { result.EnableCustomUpgrades = b.EnableCustomUpgrades @@ -104,16 +104,16 @@ func (a *AutopilotConfig) Copy() *AutopilotConfig { // Copy the bools if a.CleanupDeadServers != nil { - nc.CleanupDeadServers = helper.BoolToPtr(*a.CleanupDeadServers) + nc.CleanupDeadServers = pointer.Of(*a.CleanupDeadServers) } if a.EnableRedundancyZones != nil { - nc.EnableRedundancyZones = helper.BoolToPtr(*a.EnableRedundancyZones) + nc.EnableRedundancyZones = pointer.Of(*a.EnableRedundancyZones) } if a.DisableUpgradeMigration != nil { - nc.DisableUpgradeMigration = helper.BoolToPtr(*a.DisableUpgradeMigration) + nc.DisableUpgradeMigration = pointer.Of(*a.DisableUpgradeMigration) } if a.EnableCustomUpgrades != nil { - nc.EnableCustomUpgrades = helper.BoolToPtr(*a.EnableCustomUpgrades) + nc.EnableCustomUpgrades = pointer.Of(*a.EnableCustomUpgrades) } return nc diff --git a/nomad/structs/config/consul.go b/nomad/structs/config/consul.go index 44450d9a6..bddd7947f 100644 --- a/nomad/structs/config/consul.go +++ b/nomad/structs/config/consul.go @@ -8,7 +8,7 @@ import ( consul "github.com/hashicorp/consul/api" "github.com/hashicorp/go-secure-stdlib/listenerutil" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" ) // ConsulConfig contains the configuration information necessary to @@ -141,17 +141,17 @@ func DefaultConsulConfig() *ConsulConfig { ServerRPCCheckName: "Nomad Server RPC Check", ClientServiceName: "nomad-client", ClientHTTPCheckName: "Nomad Client HTTP Check", - AutoAdvertise: helper.BoolToPtr(true), - ChecksUseAdvertise: helper.BoolToPtr(false), - ServerAutoJoin: helper.BoolToPtr(true), - ClientAutoJoin: helper.BoolToPtr(true), - AllowUnauthenticated: helper.BoolToPtr(true), + AutoAdvertise: pointer.Of(true), + ChecksUseAdvertise: pointer.Of(false), + ServerAutoJoin: pointer.Of(true), + ClientAutoJoin: pointer.Of(true), + AllowUnauthenticated: pointer.Of(true), Timeout: 5 * time.Second, // From Consul api package defaults Addr: def.Address, - EnableSSL: helper.BoolToPtr(def.Scheme == "https"), - VerifySSL: helper.BoolToPtr(!def.TLSConfig.InsecureSkipVerify), + EnableSSL: pointer.Of(def.Scheme == "https"), + VerifySSL: pointer.Of(!def.TLSConfig.InsecureSkipVerify), CAFile: def.TLSConfig.CAFile, Namespace: def.Namespace, } @@ -190,7 +190,7 @@ func (c *ConsulConfig) Merge(b *ConsulConfig) *ConsulConfig { } result.Tags = append(result.Tags, b.Tags...) if b.AutoAdvertise != nil { - result.AutoAdvertise = helper.BoolToPtr(*b.AutoAdvertise) + result.AutoAdvertise = pointer.Of(*b.AutoAdvertise) } if b.Addr != "" { result.Addr = b.Addr @@ -211,13 +211,13 @@ func (c *ConsulConfig) Merge(b *ConsulConfig) *ConsulConfig { result.Auth = b.Auth } if b.EnableSSL != nil { - result.EnableSSL = helper.BoolToPtr(*b.EnableSSL) + result.EnableSSL = pointer.Of(*b.EnableSSL) } if b.VerifySSL != nil { - result.VerifySSL = helper.BoolToPtr(*b.VerifySSL) + result.VerifySSL = pointer.Of(*b.VerifySSL) } if b.ShareSSL != nil { - result.ShareSSL = helper.BoolToPtr(*b.ShareSSL) + result.ShareSSL = pointer.Of(*b.ShareSSL) } if b.CAFile != "" { result.CAFile = b.CAFile @@ -229,16 +229,16 @@ func (c *ConsulConfig) Merge(b *ConsulConfig) *ConsulConfig { result.KeyFile = b.KeyFile } if b.ServerAutoJoin != nil { - result.ServerAutoJoin = helper.BoolToPtr(*b.ServerAutoJoin) + result.ServerAutoJoin = pointer.Of(*b.ServerAutoJoin) } if b.ClientAutoJoin != nil { - result.ClientAutoJoin = helper.BoolToPtr(*b.ClientAutoJoin) + result.ClientAutoJoin = pointer.Of(*b.ClientAutoJoin) } if b.ChecksUseAdvertise != nil { - result.ChecksUseAdvertise = helper.BoolToPtr(*b.ChecksUseAdvertise) + result.ChecksUseAdvertise = pointer.Of(*b.ChecksUseAdvertise) } if b.AllowUnauthenticated != nil { - result.AllowUnauthenticated = helper.BoolToPtr(*b.AllowUnauthenticated) + result.AllowUnauthenticated = pointer.Of(*b.AllowUnauthenticated) } if b.Namespace != "" { result.Namespace = b.Namespace @@ -319,28 +319,28 @@ func (c *ConsulConfig) Copy() *ConsulConfig { // Copy the bools if nc.AutoAdvertise != nil { - nc.AutoAdvertise = helper.BoolToPtr(*nc.AutoAdvertise) + nc.AutoAdvertise = pointer.Of(*nc.AutoAdvertise) } if nc.ChecksUseAdvertise != nil { - nc.ChecksUseAdvertise = helper.BoolToPtr(*nc.ChecksUseAdvertise) + nc.ChecksUseAdvertise = pointer.Of(*nc.ChecksUseAdvertise) } if nc.EnableSSL != nil { - nc.EnableSSL = helper.BoolToPtr(*nc.EnableSSL) + nc.EnableSSL = pointer.Of(*nc.EnableSSL) } if nc.VerifySSL != nil { - nc.VerifySSL = helper.BoolToPtr(*nc.VerifySSL) + nc.VerifySSL = pointer.Of(*nc.VerifySSL) } if nc.ShareSSL != nil { - nc.ShareSSL = helper.BoolToPtr(*nc.ShareSSL) + nc.ShareSSL = pointer.Of(*nc.ShareSSL) } if nc.ServerAutoJoin != nil { - nc.ServerAutoJoin = helper.BoolToPtr(*nc.ServerAutoJoin) + nc.ServerAutoJoin = pointer.Of(*nc.ServerAutoJoin) } if nc.ClientAutoJoin != nil { - nc.ClientAutoJoin = helper.BoolToPtr(*nc.ClientAutoJoin) + nc.ClientAutoJoin = pointer.Of(*nc.ClientAutoJoin) } if nc.AllowUnauthenticated != nil { - nc.AllowUnauthenticated = helper.BoolToPtr(*nc.AllowUnauthenticated) + nc.AllowUnauthenticated = pointer.Of(*nc.AllowUnauthenticated) } return nc diff --git a/nomad/structs/config/limits.go b/nomad/structs/config/limits.go index 5c17bc99e..77fda5190 100644 --- a/nomad/structs/config/limits.go +++ b/nomad/structs/config/limits.go @@ -1,6 +1,6 @@ package config -import "github.com/hashicorp/nomad/helper" +import "github.com/hashicorp/nomad/helper/pointer" const ( // LimitsNonStreamingConnsPerClient is the number of connections per @@ -47,9 +47,9 @@ type Limits struct { func DefaultLimits() Limits { return Limits{ HTTPSHandshakeTimeout: "5s", - HTTPMaxConnsPerClient: helper.IntToPtr(100), + HTTPMaxConnsPerClient: pointer.Of(100), RPCHandshakeTimeout: "5s", - RPCMaxConnsPerClient: helper.IntToPtr(100), + RPCMaxConnsPerClient: pointer.Of(100), } } @@ -62,13 +62,13 @@ func (l *Limits) Merge(o Limits) Limits { m.HTTPSHandshakeTimeout = o.HTTPSHandshakeTimeout } if o.HTTPMaxConnsPerClient != nil { - m.HTTPMaxConnsPerClient = helper.IntToPtr(*o.HTTPMaxConnsPerClient) + m.HTTPMaxConnsPerClient = pointer.Of(*o.HTTPMaxConnsPerClient) } if o.RPCHandshakeTimeout != "" { m.RPCHandshakeTimeout = o.RPCHandshakeTimeout } if o.RPCMaxConnsPerClient != nil { - m.RPCMaxConnsPerClient = helper.IntToPtr(*o.RPCMaxConnsPerClient) + m.RPCMaxConnsPerClient = pointer.Of(*o.RPCMaxConnsPerClient) } return m @@ -78,10 +78,10 @@ func (l *Limits) Merge(o Limits) Limits { func (l *Limits) Copy() Limits { c := *l if l.HTTPMaxConnsPerClient != nil { - c.HTTPMaxConnsPerClient = helper.IntToPtr(*l.HTTPMaxConnsPerClient) + c.HTTPMaxConnsPerClient = pointer.Of(*l.HTTPMaxConnsPerClient) } if l.RPCMaxConnsPerClient != nil { - c.RPCMaxConnsPerClient = helper.IntToPtr(*l.RPCMaxConnsPerClient) + c.RPCMaxConnsPerClient = pointer.Of(*l.RPCMaxConnsPerClient) } return c } diff --git a/nomad/structs/config/limits_test.go b/nomad/structs/config/limits_test.go index 7a4082f3d..95f68037f 100644 --- a/nomad/structs/config/limits_test.go +++ b/nomad/structs/config/limits_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/stretchr/testify/require" ) @@ -32,9 +32,9 @@ func TestLimits_Copy(t *testing.T) { // Assert changes to copy are not propagated to the original c.HTTPSHandshakeTimeout = "1s" - c.HTTPMaxConnsPerClient = helper.IntToPtr(50) + c.HTTPMaxConnsPerClient = pointer.Of(50) c.RPCHandshakeTimeout = "1s" - c.RPCMaxConnsPerClient = helper.IntToPtr(50) + c.RPCMaxConnsPerClient = pointer.Of(50) require.NotEqual(t, c.HTTPSHandshakeTimeout, o.HTTPSHandshakeTimeout) @@ -74,7 +74,7 @@ func TestLimits_Merge(t *testing.T) { // Use short struct initialization style so it fails to compile if // fields are added - expected := Limits{"10s", helper.IntToPtr(100), "5s", helper.IntToPtr(100)} + expected := Limits{"10s", pointer.Of(100), "5s", pointer.Of(100)} require.Equal(t, expected, m2) // Mergin in 0 values should not change anything diff --git a/nomad/structs/config/vault.go b/nomad/structs/config/vault.go index 83a239a19..f3e9b290f 100644 --- a/nomad/structs/config/vault.go +++ b/nomad/structs/config/vault.go @@ -3,7 +3,7 @@ package config import ( "time" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" vault "github.com/hashicorp/vault/api" ) @@ -87,7 +87,7 @@ func DefaultVaultConfig() *VaultConfig { return &VaultConfig{ Addr: "https://vault.service.consul:8200", ConnectionRetryIntv: DefaultVaultConnectRetryIntv, - AllowUnauthenticated: helper.BoolToPtr(true), + AllowUnauthenticated: pointer.Of(true), } } diff --git a/nomad/structs/diff_test.go b/nomad/structs/diff_test.go index 5ac7ac14e..3e96ca8b0 100644 --- a/nomad/structs/diff_test.go +++ b/nomad/structs/diff_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/stretchr/testify/require" ) @@ -2696,7 +2696,7 @@ func TestTaskGroupDiff(t *testing.T) { }, Gateway: &ConsulGateway{ Proxy: &ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(1 * time.Second), + ConnectTimeout: pointer.Of(1 * time.Second), EnvoyGatewayBindTaggedAddresses: false, EnvoyGatewayBindAddresses: map[string]*ConsulGatewayBindAddress{ "service1": { @@ -2790,7 +2790,7 @@ func TestTaskGroupDiff(t *testing.T) { }, Gateway: &ConsulGateway{ Proxy: &ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(2 * time.Second), + ConnectTimeout: pointer.Of(2 * time.Second), EnvoyGatewayBindTaggedAddresses: true, EnvoyGatewayBindAddresses: map[string]*ConsulGatewayBindAddress{ "service1": { @@ -3736,10 +3736,10 @@ func TestTaskGroupDiff(t *testing.T) { { TestCase: "TaskGroup shutdown_delay edited", Old: &TaskGroup{ - ShutdownDelay: helper.TimeToPtr(30 * time.Second), + ShutdownDelay: pointer.Of(30 * time.Second), }, New: &TaskGroup{ - ShutdownDelay: helper.TimeToPtr(5 * time.Second), + ShutdownDelay: pointer.Of(5 * time.Second), }, Expected: &TaskGroupDiff{ Type: DiffTypeEdited, @@ -3756,7 +3756,7 @@ func TestTaskGroupDiff(t *testing.T) { { TestCase: "TaskGroup shutdown_delay removed", Old: &TaskGroup{ - ShutdownDelay: helper.TimeToPtr(30 * time.Second), + ShutdownDelay: pointer.Of(30 * time.Second), }, New: &TaskGroup{}, Expected: &TaskGroupDiff{ @@ -3775,7 +3775,7 @@ func TestTaskGroupDiff(t *testing.T) { TestCase: "TaskGroup shutdown_delay added", Old: &TaskGroup{}, New: &TaskGroup{ - ShutdownDelay: helper.TimeToPtr(30 * time.Second), + ShutdownDelay: pointer.Of(30 * time.Second), }, Expected: &TaskGroupDiff{ Type: DiffTypeEdited, @@ -3943,7 +3943,7 @@ func TestTaskGroupDiff(t *testing.T) { }, New: &TaskGroup{ Name: "foo", - MaxClientDisconnect: helper.TimeToPtr(20 * time.Second), + MaxClientDisconnect: pointer.Of(20 * time.Second), }, Expected: &TaskGroupDiff{ Type: DiffTypeEdited, @@ -3962,11 +3962,11 @@ func TestTaskGroupDiff(t *testing.T) { TestCase: "MaxClientDisconnect updated", Old: &TaskGroup{ Name: "foo", - MaxClientDisconnect: helper.TimeToPtr(10 * time.Second), + MaxClientDisconnect: pointer.Of(10 * time.Second), }, New: &TaskGroup{ Name: "foo", - MaxClientDisconnect: helper.TimeToPtr(20 * time.Second), + MaxClientDisconnect: pointer.Of(20 * time.Second), }, Expected: &TaskGroupDiff{ Type: DiffTypeEdited, @@ -3985,7 +3985,7 @@ func TestTaskGroupDiff(t *testing.T) { TestCase: "MaxClientDisconnect deleted", Old: &TaskGroup{ Name: "foo", - MaxClientDisconnect: helper.TimeToPtr(10 * time.Second), + MaxClientDisconnect: pointer.Of(10 * time.Second), }, New: &TaskGroup{ Name: "foo", @@ -7047,8 +7047,8 @@ func TestTaskDiff(t *testing.T) { Uid: 1001, Gid: 21, Wait: &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(5 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(5 * time.Second), }, }, { @@ -7078,8 +7078,8 @@ func TestTaskDiff(t *testing.T) { Uid: 1001, Gid: 21, Wait: &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), }, }, { @@ -7093,8 +7093,8 @@ func TestTaskDiff(t *testing.T) { Uid: 1002, Gid: 22, Wait: &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), }, }, }, diff --git a/nomad/structs/services.go b/nomad/structs/services.go index 822414cc2..38ddd5970 100644 --- a/nomad/structs/services.go +++ b/nomad/structs/services.go @@ -19,6 +19,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/args" + "github.com/hashicorp/nomad/helper/pointer" "github.com/mitchellh/copystructure" "golang.org/x/exp/slices" ) @@ -1247,11 +1248,11 @@ func (t *SidecarTask) Copy() *SidecarTask { } if t.KillTimeout != nil { - nt.KillTimeout = helper.TimeToPtr(*t.KillTimeout) + nt.KillTimeout = pointer.Of(*t.KillTimeout) } if t.ShutdownDelay != nil { - nt.ShutdownDelay = helper.TimeToPtr(*t.ShutdownDelay) + nt.ShutdownDelay = pointer.Of(*t.ShutdownDelay) } return nt @@ -1769,7 +1770,7 @@ func (p *ConsulGatewayProxy) Copy() *ConsulGatewayProxy { } return &ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(*p.ConnectTimeout), + ConnectTimeout: pointer.Of(*p.ConnectTimeout), EnvoyGatewayBindTaggedAddresses: p.EnvoyGatewayBindTaggedAddresses, EnvoyGatewayBindAddresses: p.copyBindAddresses(), EnvoyGatewayNoDefaultBind: p.EnvoyGatewayNoDefaultBind, diff --git a/nomad/structs/services_test.go b/nomad/structs/services_test.go index 496082fc3..939547694 100644 --- a/nomad/structs/services_test.go +++ b/nomad/structs/services_test.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/shoenig/test/must" "github.com/stretchr/testify/require" ) @@ -531,7 +531,7 @@ func TestConsulConnect_GatewayProxy_CopyEquals(t *testing.T) { ci.Parallel(t) c := &ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(1 * time.Second), + ConnectTimeout: pointer.Of(1 * time.Second), EnvoyGatewayBindTaggedAddresses: false, EnvoyGatewayBindAddresses: make(map[string]*ConsulGatewayBindAddress), } @@ -565,11 +565,11 @@ func TestSidecarTask_MergeIntoTask(t *testing.T) { Meta: map[string]string{ "abc": "123", }, - KillTimeout: helper.TimeToPtr(15 * time.Second), + KillTimeout: pointer.Of(15 * time.Second), LogConfig: &LogConfig{ MaxFiles: 3, }, - ShutdownDelay: helper.TimeToPtr(5 * time.Second), + ShutdownDelay: pointer.Of(5 * time.Second), KillSignal: "SIGABRT", } @@ -611,12 +611,12 @@ func TestSidecarTask_Equals(t *testing.T) { Env: map[string]string{"color": "blue"}, Resources: &Resources{MemoryMB: 300}, Meta: map[string]string{"index": "1"}, - KillTimeout: helper.TimeToPtr(2 * time.Second), + KillTimeout: pointer.Of(2 * time.Second), LogConfig: &LogConfig{ MaxFiles: 2, MaxFileSizeMB: 300, }, - ShutdownDelay: helper.TimeToPtr(10 * time.Second), + ShutdownDelay: pointer.Of(10 * time.Second), KillSignal: "SIGTERM", } @@ -663,7 +663,7 @@ func TestSidecarTask_Equals(t *testing.T) { }) t.Run("mod kill timeout", func(t *testing.T) { - try(t, func(s *st) { s.KillTimeout = helper.TimeToPtr(3 * time.Second) }) + try(t, func(s *st) { s.KillTimeout = pointer.Of(3 * time.Second) }) }) t.Run("mod log config", func(t *testing.T) { @@ -671,7 +671,7 @@ func TestSidecarTask_Equals(t *testing.T) { }) t.Run("mod shutdown delay", func(t *testing.T) { - try(t, func(s *st) { s.ShutdownDelay = helper.TimeToPtr(20 * time.Second) }) + try(t, func(s *st) { s.ShutdownDelay = pointer.Of(20 * time.Second) }) }) t.Run("mod kill signal", func(t *testing.T) { @@ -824,7 +824,7 @@ func TestConsulSidecarService_Copy(t *testing.T) { var ( consulIngressGateway1 = &ConsulGateway{ Proxy: &ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(1 * time.Second), + ConnectTimeout: pointer.Of(1 * time.Second), EnvoyGatewayBindTaggedAddresses: true, EnvoyGatewayBindAddresses: map[string]*ConsulGatewayBindAddress{ "listener1": {Address: "10.0.0.1", Port: 2001}, @@ -861,7 +861,7 @@ var ( consulTerminatingGateway1 = &ConsulGateway{ Proxy: &ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(1 * time.Second), + ConnectTimeout: pointer.Of(1 * time.Second), EnvoyDNSDiscoveryType: "STRICT_DNS", EnvoyGatewayBindAddresses: nil, }, @@ -880,7 +880,7 @@ var ( consulMeshGateway1 = &ConsulGateway{ Proxy: &ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(1 * time.Second), + ConnectTimeout: pointer.Of(1 * time.Second), }, Mesh: &ConsulMeshConfigEntry{ // nothing @@ -985,7 +985,7 @@ func TestConsulGateway_Equals_ingress(t *testing.T) { // proxy stanza equality checks t.Run("mod gateway timeout", func(t *testing.T) { - try(t, func(g *cg) { g.Proxy.ConnectTimeout = helper.TimeToPtr(9 * time.Second) }) + try(t, func(g *cg) { g.Proxy.ConnectTimeout = pointer.Of(9 * time.Second) }) }) t.Run("mod gateway envoy_gateway_bind_tagged_addresses", func(t *testing.T) { @@ -1267,7 +1267,7 @@ func TestConsulGatewayProxy_Validate(t *testing.T) { t.Run("invalid bind address", func(t *testing.T) { err := (&ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(1 * time.Second), + ConnectTimeout: pointer.Of(1 * time.Second), EnvoyGatewayBindAddresses: map[string]*ConsulGatewayBindAddress{ "service1": { Address: "10.0.0.1", @@ -1279,7 +1279,7 @@ func TestConsulGatewayProxy_Validate(t *testing.T) { t.Run("invalid dns discovery type", func(t *testing.T) { err := (&ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(1 * time.Second), + ConnectTimeout: pointer.Of(1 * time.Second), EnvoyDNSDiscoveryType: "RANDOM_DNS", }).Validate() require.EqualError(t, err, "Consul Gateway Proxy Envoy DNS Discovery type must be STRICT_DNS or LOGICAL_DNS") @@ -1287,14 +1287,14 @@ func TestConsulGatewayProxy_Validate(t *testing.T) { t.Run("ok with nothing set", func(t *testing.T) { err := (&ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(1 * time.Second), + ConnectTimeout: pointer.Of(1 * time.Second), }).Validate() require.NoError(t, err) }) t.Run("ok with everything set", func(t *testing.T) { err := (&ConsulGatewayProxy{ - ConnectTimeout: helper.TimeToPtr(1 * time.Second), + ConnectTimeout: pointer.Of(1 * time.Second), EnvoyGatewayBindAddresses: map[string]*ConsulGatewayBindAddress{ "service1": { Address: "10.0.0.1", diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index 824eddc09..7d2f20c53 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -37,6 +37,7 @@ import ( "github.com/hashicorp/nomad/helper/args" "github.com/hashicorp/nomad/helper/constraints/semver" "github.com/hashicorp/nomad/helper/escapingfs" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/lib/cpuset" "github.com/hashicorp/nomad/lib/kheap" @@ -10823,7 +10824,7 @@ func (a *AllocDeploymentStatus) Copy() *AllocDeploymentStatus { *c = *a if a.Healthy != nil { - c.Healthy = helper.BoolToPtr(*a.Healthy) + c.Healthy = pointer.Of(*a.Healthy) } return c diff --git a/nomad/structs/structs_test.go b/nomad/structs/structs_test.go index 06d9ce1be..1e26dd1bf 100644 --- a/nomad/structs/structs_test.go +++ b/nomad/structs/structs_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/kr/pretty" "github.com/stretchr/testify/assert" @@ -2669,8 +2669,8 @@ func TestTemplate_Validate(t *testing.T) { DestPath: "local/foo", ChangeMode: "noop", Wait: &WaitConfig{ - Min: helper.TimeToPtr(10 * time.Second), - Max: helper.TimeToPtr(5 * time.Second), + Min: pointer.Of(10 * time.Second), + Max: pointer.Of(5 * time.Second), }, }, Fail: true, @@ -2684,8 +2684,8 @@ func TestTemplate_Validate(t *testing.T) { DestPath: "local/foo", ChangeMode: "noop", Wait: &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(5 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(5 * time.Second), }, }, Fail: false, @@ -2696,8 +2696,8 @@ func TestTemplate_Validate(t *testing.T) { DestPath: "local/foo", ChangeMode: "noop", Wait: &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), }, }, Fail: false, @@ -2734,12 +2734,12 @@ func TestTaskWaitConfig_Equals(t *testing.T) { { name: "all-fields", config: &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), }, expected: &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(10 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(10 * time.Second), }, }, { @@ -2750,19 +2750,19 @@ func TestTaskWaitConfig_Equals(t *testing.T) { { name: "min-only", config: &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), + Min: pointer.Of(5 * time.Second), }, expected: &WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), + Min: pointer.Of(5 * time.Second), }, }, { name: "max-only", config: &WaitConfig{ - Max: helper.TimeToPtr(10 * time.Second), + Max: pointer.Of(10 * time.Second), }, expected: &WaitConfig{ - Max: helper.TimeToPtr(10 * time.Second), + Max: pointer.Of(10 * time.Second), }, }, } @@ -5229,11 +5229,11 @@ func TestAllocation_DisconnectTimeout(t *testing.T) { }, { desc: "has max_client_disconnect", - maxDisconnect: helper.TimeToPtr(30 * time.Second), + maxDisconnect: pointer.Of(30 * time.Second), }, { desc: "zero max_client_disconnect", - maxDisconnect: helper.TimeToPtr(0 * time.Second), + maxDisconnect: pointer.Of(0 * time.Second), }, } for _, tc := range testCases { diff --git a/nomad/vault_test.go b/nomad/vault_test.go index 7e5834b54..e7f262a2a 100644 --- a/nomad/vault_test.go +++ b/nomad/vault_test.go @@ -18,7 +18,7 @@ import ( "golang.org/x/time/rate" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -1302,7 +1302,7 @@ func TestVaultClient_CreateToken_Role_Unrecoverable(t *testing.T) { func TestVaultClient_CreateToken_Prestart(t *testing.T) { ci.Parallel(t) vconfig := &config.VaultConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Token: uuid.Generate(), Addr: "http://127.0.0.1:0", } @@ -1334,7 +1334,7 @@ func TestVaultClient_CreateToken_Prestart(t *testing.T) { func TestVaultClient_MarkForRevocation(t *testing.T) { vconfig := &config.VaultConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Token: uuid.Generate(), Addr: "http://127.0.0.1:0", } @@ -1362,7 +1362,7 @@ func TestVaultClient_MarkForRevocation(t *testing.T) { func TestVaultClient_RevokeTokens_PreEstablishs(t *testing.T) { ci.Parallel(t) vconfig := &config.VaultConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Token: uuid.Generate(), Addr: "http://127.0.0.1:0", } @@ -1408,7 +1408,7 @@ func TestVaultClient_RevokeTokens_PreEstablishs(t *testing.T) { func TestVaultClient_RevokeTokens_Failures_TTL(t *testing.T) { ci.Parallel(t) vconfig := &config.VaultConfig{ - Enabled: helper.BoolToPtr(true), + Enabled: pointer.Of(true), Token: uuid.Generate(), Addr: "http://127.0.0.1:0", } @@ -1673,7 +1673,7 @@ func TestVaultClient_RevokeDaemon_Bounded(t *testing.T) { // Disable client until we can change settings for testing conf := v.Config.Copy() - conf.Enabled = helper.BoolToPtr(false) + conf.Enabled = pointer.Of(false) const ( batchSize = 100 @@ -1702,7 +1702,7 @@ func TestVaultClient_RevokeDaemon_Bounded(t *testing.T) { client.maxRevokeBatchSize = batchSize client.revocationIntv = 3 * time.Millisecond conf = v.Config.Copy() - conf.Enabled = helper.BoolToPtr(true) + conf.Enabled = pointer.Of(true) require.NoError(t, client.SetConfig(conf)) client.SetActive(true) diff --git a/plugins/device/cmd/example/device.go b/plugins/device/cmd/example/device.go index 475f115b0..d6dc1fbbf 100644 --- a/plugins/device/cmd/example/device.go +++ b/plugins/device/cmd/example/device.go @@ -10,7 +10,7 @@ import ( "time" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/plugins/base" "github.com/hashicorp/nomad/plugins/device" "github.com/hashicorp/nomad/plugins/shared/hclspec" @@ -343,23 +343,23 @@ func (d *FsDevice) collectStats() (*device.DeviceGroupStats, error) { s := &device.DeviceStats{ Summary: &structs.StatValue{ - IntNumeratorVal: helper.Int64ToPtr(f.Size()), + IntNumeratorVal: pointer.Of(f.Size()), Unit: "bytes", Desc: "Filesize in bytes", }, Stats: &structs.StatObject{ Attributes: map[string]*structs.StatValue{ "size": { - IntNumeratorVal: helper.Int64ToPtr(f.Size()), + IntNumeratorVal: pointer.Of(f.Size()), Unit: "bytes", Desc: "Filesize in bytes", }, "modify_time": { - StringVal: helper.StringToPtr(f.ModTime().String()), + StringVal: pointer.Of(f.ModTime().String()), Desc: "Last modified", }, "mode": { - StringVal: helper.StringToPtr(f.Mode().String()), + StringVal: pointer.Of(f.Mode().String()), Desc: "File mode", }, }, diff --git a/plugins/device/plugin_test.go b/plugins/device/plugin_test.go index 52629489f..4961faf87 100644 --- a/plugins/device/plugin_test.go +++ b/plugins/device/plugin_test.go @@ -9,7 +9,7 @@ import ( pb "github.com/golang/protobuf/proto" plugin "github.com/hashicorp/go-plugin" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/base" "github.com/hashicorp/nomad/plugins/shared/hclspec" @@ -195,7 +195,7 @@ func TestDevicePlugin_Fingerprint(t *testing.T) { Name: "foo", Attributes: map[string]*psstructs.Attribute{ "memory": { - Int: helper.Int64ToPtr(4), + Int: pointer.Of(int64(4)), Unit: "GiB", }, }, @@ -476,8 +476,8 @@ func TestDevicePlugin_Stats(t *testing.T) { InstanceStats: map[string]*DeviceStats{ "1": { Summary: &psstructs.StatValue{ - IntNumeratorVal: helper.Int64ToPtr(10), - IntDenominatorVal: helper.Int64ToPtr(20), + IntNumeratorVal: pointer.Of(int64(10)), + IntDenominatorVal: pointer.Of(int64(20)), Unit: "MB", Desc: "Unit test", }, @@ -493,8 +493,8 @@ func TestDevicePlugin_Stats(t *testing.T) { InstanceStats: map[string]*DeviceStats{ "1": { Summary: &psstructs.StatValue{ - FloatNumeratorVal: helper.Float64ToPtr(10.0), - FloatDenominatorVal: helper.Float64ToPtr(20.0), + FloatNumeratorVal: pointer.Of(float64(10.0)), + FloatDenominatorVal: pointer.Of(float64(20.0)), Unit: "MB", Desc: "Unit test", }, @@ -508,7 +508,7 @@ func TestDevicePlugin_Stats(t *testing.T) { InstanceStats: map[string]*DeviceStats{ "1": { Summary: &psstructs.StatValue{ - StringVal: helper.StringToPtr("foo"), + StringVal: pointer.Of("foo"), Unit: "MB", Desc: "Unit test", }, @@ -522,7 +522,7 @@ func TestDevicePlugin_Stats(t *testing.T) { InstanceStats: map[string]*DeviceStats{ "1": { Summary: &psstructs.StatValue{ - BoolVal: helper.BoolToPtr(true), + BoolVal: pointer.Of(true), Unit: "MB", Desc: "Unit test", }, diff --git a/plugins/shared/structs/attribute.go b/plugins/shared/structs/attribute.go index 9f09257ab..06fb6a5bd 100644 --- a/plugins/shared/structs/attribute.go +++ b/plugins/shared/structs/attribute.go @@ -7,7 +7,7 @@ import ( "strings" "unicode" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" ) const ( @@ -58,7 +58,7 @@ func (u *Unit) Comparable(o *Unit) bool { func ParseAttribute(input string) *Attribute { ll := len(input) if ll == 0 { - return &Attribute{String: helper.StringToPtr(input)} + return &Attribute{String: pointer.Of(input)} } // Check if the string is a number ending with potential units @@ -82,22 +82,22 @@ func ParseAttribute(input string) *Attribute { // Try to parse as an int i, err := strconv.ParseInt(numeric, 10, 64) if err == nil { - return &Attribute{Int: helper.Int64ToPtr(i), Unit: unit} + return &Attribute{Int: pointer.Of(i), Unit: unit} } // Try to parse as a float f, err := strconv.ParseFloat(numeric, 64) if err == nil { - return &Attribute{Float: helper.Float64ToPtr(f), Unit: unit} + return &Attribute{Float: pointer.Of(f), Unit: unit} } // Try to parse as a bool b, err := strconv.ParseBool(input) if err == nil { - return &Attribute{Bool: helper.BoolToPtr(b)} + return &Attribute{Bool: pointer.Of(b)} } - return &Attribute{String: helper.StringToPtr(input)} + return &Attribute{String: pointer.Of(input)} } // Attribute is used to describe the value of an attribute, optionally @@ -122,14 +122,14 @@ type Attribute struct { // NewStringAttribute returns a new string attribute. func NewStringAttribute(s string) *Attribute { return &Attribute{ - String: helper.StringToPtr(s), + String: pointer.Of(s), } } // NewBoolAttribute returns a new boolean attribute. func NewBoolAttribute(b bool) *Attribute { return &Attribute{ - Bool: helper.BoolToPtr(b), + Bool: pointer.Of(b), } } @@ -137,7 +137,7 @@ func NewBoolAttribute(b bool) *Attribute { // to be valid. func NewIntAttribute(i int64, unit string) *Attribute { return &Attribute{ - Int: helper.Int64ToPtr(i), + Int: pointer.Of(i), Unit: unit, } } @@ -146,7 +146,7 @@ func NewIntAttribute(i int64, unit string) *Attribute { // be valid. func NewFloatAttribute(f float64, unit string) *Attribute { return &Attribute{ - Float: helper.Float64ToPtr(f), + Float: pointer.Of(f), Unit: unit, } } @@ -202,16 +202,16 @@ func (a *Attribute) Copy() *Attribute { } if a.Float != nil { - ca.Float = helper.Float64ToPtr(*a.Float) + ca.Float = pointer.Of(*a.Float) } if a.Int != nil { - ca.Int = helper.Int64ToPtr(*a.Int) + ca.Int = pointer.Of(*a.Int) } if a.Bool != nil { - ca.Bool = helper.BoolToPtr(*a.Bool) + ca.Bool = pointer.Of(*a.Bool) } if a.String != nil { - ca.String = helper.StringToPtr(*a.String) + ca.String = pointer.Of(*a.String) } return ca diff --git a/plugins/shared/structs/attribute_test.go b/plugins/shared/structs/attribute_test.go index b30506764..70e1cf473 100644 --- a/plugins/shared/structs/attribute_test.go +++ b/plugins/shared/structs/attribute_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/stretchr/testify/require" ) @@ -15,56 +15,56 @@ func TestAttribute_Validate(t *testing.T) { }{ { Input: &Attribute{ - Bool: helper.BoolToPtr(true), + Bool: pointer.Of(true), }, }, { Input: &Attribute{ - String: helper.StringToPtr("foo"), + String: pointer.Of("foo"), }, }, { Input: &Attribute{ - Int: helper.Int64ToPtr(123), + Int: pointer.Of(int64(123)), }, }, { Input: &Attribute{ - Float: helper.Float64ToPtr(123.2), + Float: pointer.Of(float64(123.2)), }, }, { Input: &Attribute{ - Bool: helper.BoolToPtr(true), + Bool: pointer.Of(true), Unit: "MB", }, Fail: true, }, { Input: &Attribute{ - String: helper.StringToPtr("foo"), + String: pointer.Of("foo"), Unit: "MB", }, Fail: true, }, { Input: &Attribute{ - Int: helper.Int64ToPtr(123), + Int: pointer.Of(int64(123)), Unit: "lolNO", }, Fail: true, }, { Input: &Attribute{ - Float: helper.Float64ToPtr(123.2), + Float: pointer.Of(float64(123.2)), Unit: "lolNO", }, Fail: true, }, { Input: &Attribute{ - Int: helper.Int64ToPtr(123), - Float: helper.Float64ToPtr(123.2), + Int: pointer.Of(int64(123)), + Float: pointer.Of(float64(123.2)), Unit: "mW", }, Fail: true, @@ -91,46 +91,46 @@ func TestAttribute_Compare_Bool(t *testing.T) { cases := []*compareTestCase{ { A: &Attribute{ - Bool: helper.BoolToPtr(true), + Bool: pointer.Of(true), }, B: &Attribute{ - Bool: helper.BoolToPtr(true), + Bool: pointer.Of(true), }, Expected: 0, }, { A: &Attribute{ - Bool: helper.BoolToPtr(true), + Bool: pointer.Of(true), }, B: &Attribute{ - Bool: helper.BoolToPtr(false), + Bool: pointer.Of(false), }, Expected: 1, }, { A: &Attribute{ - Bool: helper.BoolToPtr(true), + Bool: pointer.Of(true), }, B: &Attribute{ - String: helper.StringToPtr("foo"), + String: pointer.Of("foo"), }, NotComparable: true, }, { A: &Attribute{ - Bool: helper.BoolToPtr(true), + Bool: pointer.Of(true), }, B: &Attribute{ - Int: helper.Int64ToPtr(123), + Int: pointer.Of(int64(123)), }, NotComparable: true, }, { A: &Attribute{ - Bool: helper.BoolToPtr(true), + Bool: pointer.Of(true), }, B: &Attribute{ - Float: helper.Float64ToPtr(123.2), + Float: pointer.Of(float64(123.2)), }, NotComparable: true, }, @@ -142,55 +142,55 @@ func TestAttribute_Compare_String(t *testing.T) { cases := []*compareTestCase{ { A: &Attribute{ - String: helper.StringToPtr("a"), + String: pointer.Of("a"), }, B: &Attribute{ - String: helper.StringToPtr("b"), + String: pointer.Of("b"), }, Expected: -1, }, { A: &Attribute{ - String: helper.StringToPtr("hello"), + String: pointer.Of("hello"), }, B: &Attribute{ - String: helper.StringToPtr("hello"), + String: pointer.Of("hello"), }, Expected: 0, }, { A: &Attribute{ - String: helper.StringToPtr("b"), + String: pointer.Of("b"), }, B: &Attribute{ - String: helper.StringToPtr("a"), + String: pointer.Of("a"), }, Expected: 1, }, { A: &Attribute{ - String: helper.StringToPtr("hello"), + String: pointer.Of("hello"), }, B: &Attribute{ - Bool: helper.BoolToPtr(true), + Bool: pointer.Of(true), }, NotComparable: true, }, { A: &Attribute{ - String: helper.StringToPtr("hello"), + String: pointer.Of("hello"), }, B: &Attribute{ - Int: helper.Int64ToPtr(123), + Int: pointer.Of(int64(123)), }, NotComparable: true, }, { A: &Attribute{ - String: helper.StringToPtr("hello"), + String: pointer.Of("hello"), }, B: &Attribute{ - Float: helper.Float64ToPtr(123.2), + Float: pointer.Of(float64(123.2)), }, NotComparable: true, }, @@ -202,46 +202,46 @@ func TestAttribute_Compare_Float(t *testing.T) { cases := []*compareTestCase{ { A: &Attribute{ - Float: helper.Float64ToPtr(101.5), + Float: pointer.Of(float64(101.5)), }, B: &Attribute{ - Float: helper.Float64ToPtr(100001.5), + Float: pointer.Of(float64(100001.5)), }, Expected: -1, }, { A: &Attribute{ - Float: helper.Float64ToPtr(100001.5), + Float: pointer.Of(float64(100001.5)), }, B: &Attribute{ - Float: helper.Float64ToPtr(100001.5), + Float: pointer.Of(float64(100001.5)), }, Expected: 0, }, { A: &Attribute{ - Float: helper.Float64ToPtr(999999999.5), + Float: pointer.Of(float64(999999999.5)), }, B: &Attribute{ - Float: helper.Float64ToPtr(101.5), + Float: pointer.Of(float64(101.5)), }, Expected: 1, }, { A: &Attribute{ - Float: helper.Float64ToPtr(101.5), + Float: pointer.Of(float64(101.5)), }, B: &Attribute{ - Bool: helper.BoolToPtr(true), + Bool: pointer.Of(true), }, NotComparable: true, }, { A: &Attribute{ - Float: helper.Float64ToPtr(101.5), + Float: pointer.Of(float64(101.5)), }, B: &Attribute{ - String: helper.StringToPtr("hello"), + String: pointer.Of("hello"), }, NotComparable: true, }, @@ -253,46 +253,46 @@ func TestAttribute_Compare_Int(t *testing.T) { cases := []*compareTestCase{ { A: &Attribute{ - Int: helper.Int64ToPtr(3), + Int: pointer.Of(int64(3)), }, B: &Attribute{ - Int: helper.Int64ToPtr(10), + Int: pointer.Of(int64(10)), }, Expected: -1, }, { A: &Attribute{ - Int: helper.Int64ToPtr(10), + Int: pointer.Of(int64(10)), }, B: &Attribute{ - Int: helper.Int64ToPtr(10), + Int: pointer.Of(int64(10)), }, Expected: 0, }, { A: &Attribute{ - Int: helper.Int64ToPtr(100), + Int: pointer.Of(int64(100)), }, B: &Attribute{ - Int: helper.Int64ToPtr(10), + Int: pointer.Of(int64(10)), }, Expected: 1, }, { A: &Attribute{ - Int: helper.Int64ToPtr(10), + Int: pointer.Of(int64(10)), }, B: &Attribute{ - Bool: helper.BoolToPtr(true), + Bool: pointer.Of(true), }, NotComparable: true, }, { A: &Attribute{ - Int: helper.Int64ToPtr(10), + Int: pointer.Of(int64(10)), }, B: &Attribute{ - String: helper.StringToPtr("hello"), + String: pointer.Of("hello"), }, NotComparable: true, }, @@ -304,77 +304,77 @@ func TestAttribute_Compare_Int_With_Units(t *testing.T) { cases := []*compareTestCase{ { A: &Attribute{ - Int: helper.Int64ToPtr(3), + Int: pointer.Of(int64(3)), Unit: "MB", }, B: &Attribute{ - Int: helper.Int64ToPtr(10), + Int: pointer.Of(int64(10)), Unit: "MB", }, Expected: -1, }, { A: &Attribute{ - Int: helper.Int64ToPtr(10), + Int: pointer.Of(int64(10)), Unit: "MB", }, B: &Attribute{ - Int: helper.Int64ToPtr(10), + Int: pointer.Of(int64(10)), Unit: "MB", }, Expected: 0, }, { A: &Attribute{ - Int: helper.Int64ToPtr(100), + Int: pointer.Of(int64(100)), Unit: "MB", }, B: &Attribute{ - Int: helper.Int64ToPtr(10), + Int: pointer.Of(int64(10)), Unit: "MB", }, Expected: 1, }, { A: &Attribute{ - Int: helper.Int64ToPtr(3), + Int: pointer.Of(int64(3)), Unit: "GB", }, B: &Attribute{ - Int: helper.Int64ToPtr(3), + Int: pointer.Of(int64(3)), Unit: "MB", }, Expected: 1, }, { A: &Attribute{ - Int: helper.Int64ToPtr(1), + Int: pointer.Of(int64(1)), Unit: "GiB", }, B: &Attribute{ - Int: helper.Int64ToPtr(1024), + Int: pointer.Of(int64(1024)), Unit: "MiB", }, Expected: 0, }, { A: &Attribute{ - Int: helper.Int64ToPtr(1), + Int: pointer.Of(int64(1)), Unit: "GiB", }, B: &Attribute{ - Int: helper.Int64ToPtr(1025), + Int: pointer.Of(int64(1025)), Unit: "MiB", }, Expected: -1, }, { A: &Attribute{ - Int: helper.Int64ToPtr(1000), + Int: pointer.Of(int64(1000)), Unit: "mW", }, B: &Attribute{ - Int: helper.Int64ToPtr(1), + Int: pointer.Of(int64(1)), Unit: "W", }, Expected: 0, @@ -387,88 +387,88 @@ func TestAttribute_Compare_Float_With_Units(t *testing.T) { cases := []*compareTestCase{ { A: &Attribute{ - Float: helper.Float64ToPtr(3.0), + Float: pointer.Of(float64(3.0)), Unit: "MB", }, B: &Attribute{ - Float: helper.Float64ToPtr(10.0), + Float: pointer.Of(float64(10.0)), Unit: "MB", }, Expected: -1, }, { A: &Attribute{ - Float: helper.Float64ToPtr(10.0), + Float: pointer.Of(float64(10.0)), Unit: "MB", }, B: &Attribute{ - Float: helper.Float64ToPtr(10.0), + Float: pointer.Of(float64(10.0)), Unit: "MB", }, Expected: 0, }, { A: &Attribute{ - Float: helper.Float64ToPtr(100.0), + Float: pointer.Of(float64(100.0)), Unit: "MB", }, B: &Attribute{ - Float: helper.Float64ToPtr(10.0), + Float: pointer.Of(float64(10.0)), Unit: "MB", }, Expected: 1, }, { A: &Attribute{ - Float: helper.Float64ToPtr(3.0), + Float: pointer.Of(float64(3.0)), Unit: "GB", }, B: &Attribute{ - Float: helper.Float64ToPtr(3.0), + Float: pointer.Of(float64(3.0)), Unit: "MB", }, Expected: 1, }, { A: &Attribute{ - Float: helper.Float64ToPtr(1.0), + Float: pointer.Of(float64(1.0)), Unit: "GiB", }, B: &Attribute{ - Float: helper.Float64ToPtr(1024.0), + Float: pointer.Of(float64(1024.0)), Unit: "MiB", }, Expected: 0, }, { A: &Attribute{ - Float: helper.Float64ToPtr(1.0), + Float: pointer.Of(float64(1.0)), Unit: "GiB", }, B: &Attribute{ - Float: helper.Float64ToPtr(1025.0), + Float: pointer.Of(float64(1025.0)), Unit: "MiB", }, Expected: -1, }, { A: &Attribute{ - Float: helper.Float64ToPtr(1000.0), + Float: pointer.Of(float64(1000.0)), Unit: "mW", }, B: &Attribute{ - Float: helper.Float64ToPtr(1.0), + Float: pointer.Of(float64(1.0)), Unit: "W", }, Expected: 0, }, { A: &Attribute{ - Float: helper.Float64ToPtr(1.5), + Float: pointer.Of(float64(1.5)), Unit: "GiB", }, B: &Attribute{ - Float: helper.Float64ToPtr(1400.0), + Float: pointer.Of(float64(1400.0)), Unit: "MiB", }, Expected: 1, @@ -481,46 +481,46 @@ func TestAttribute_Compare_IntToFloat(t *testing.T) { cases := []*compareTestCase{ { A: &Attribute{ - Int: helper.Int64ToPtr(3), + Int: pointer.Of(int64(3)), }, B: &Attribute{ - Float: helper.Float64ToPtr(10.0), + Float: pointer.Of(float64(10.0)), }, Expected: -1, }, { A: &Attribute{ - Int: helper.Int64ToPtr(10), + Int: pointer.Of(int64(10)), }, B: &Attribute{ - Float: helper.Float64ToPtr(10.0), + Float: pointer.Of(float64(10.0)), }, Expected: 0, }, { A: &Attribute{ - Int: helper.Int64ToPtr(10), + Int: pointer.Of(int64(10)), }, B: &Attribute{ - Float: helper.Float64ToPtr(10.1), + Float: pointer.Of(float64(10.1)), }, Expected: -1, }, { A: &Attribute{ - Int: helper.Int64ToPtr(100), + Int: pointer.Of(int64(100)), }, B: &Attribute{ - Float: helper.Float64ToPtr(10.0), + Float: pointer.Of(float64(10.0)), }, Expected: 1, }, { A: &Attribute{ - Int: helper.Int64ToPtr(100), + Int: pointer.Of(int64(100)), }, B: &Attribute{ - Float: helper.Float64ToPtr(100.00001), + Float: pointer.Of(float64(100.00001)), }, Expected: -1, }, @@ -549,108 +549,108 @@ func TestAttribute_ParseAndValidate(t *testing.T) { { Input: "true", Expected: &Attribute{ - Bool: helper.BoolToPtr(true), + Bool: pointer.Of(true), }, }, { Input: "false", Expected: &Attribute{ - Bool: helper.BoolToPtr(false), + Bool: pointer.Of(false), }, }, { Input: "1", Expected: &Attribute{ - Int: helper.Int64ToPtr(1), + Int: pointer.Of(int64(1)), }, }, { Input: "100", Expected: &Attribute{ - Int: helper.Int64ToPtr(100), + Int: pointer.Of(int64(100)), }, }, { Input: "-100", Expected: &Attribute{ - Int: helper.Int64ToPtr(-100), + Int: pointer.Of(int64(-100)), }, }, { Input: "-1.0", Expected: &Attribute{ - Float: helper.Float64ToPtr(-1.0), + Float: pointer.Of(float64(-1.0)), }, }, { Input: "-100.25", Expected: &Attribute{ - Float: helper.Float64ToPtr(-100.25), + Float: pointer.Of(float64(-100.25)), }, }, { Input: "1.01", Expected: &Attribute{ - Float: helper.Float64ToPtr(1.01), + Float: pointer.Of(float64(1.01)), }, }, { Input: "100.25", Expected: &Attribute{ - Float: helper.Float64ToPtr(100.25), + Float: pointer.Of(float64(100.25)), }, }, { Input: "foobar", Expected: &Attribute{ - String: helper.StringToPtr("foobar"), + String: pointer.Of("foobar"), }, }, { Input: "foo123bar", Expected: &Attribute{ - String: helper.StringToPtr("foo123bar"), + String: pointer.Of("foo123bar"), }, }, { Input: "100MB", Expected: &Attribute{ - Int: helper.Int64ToPtr(100), + Int: pointer.Of(int64(100)), Unit: "MB", }, }, { Input: "-100MHz", Expected: &Attribute{ - Int: helper.Int64ToPtr(-100), + Int: pointer.Of(int64(-100)), Unit: "MHz", }, }, { Input: "-1.0MB/s", Expected: &Attribute{ - Float: helper.Float64ToPtr(-1.0), + Float: pointer.Of(float64(-1.0)), Unit: "MB/s", }, }, { Input: "-100.25GiB/s", Expected: &Attribute{ - Float: helper.Float64ToPtr(-100.25), + Float: pointer.Of(float64(-100.25)), Unit: "GiB/s", }, }, { Input: "1.01TB", Expected: &Attribute{ - Float: helper.Float64ToPtr(1.01), + Float: pointer.Of(float64(1.01)), Unit: "TB", }, }, { Input: "100.25mW", Expected: &Attribute{ - Float: helper.Float64ToPtr(100.25), + Float: pointer.Of(float64(100.25)), Unit: "mW", }, }, diff --git a/plugins/shared/structs/util.go b/plugins/shared/structs/util.go index 2a4b9b0e7..0e3500a43 100644 --- a/plugins/shared/structs/util.go +++ b/plugins/shared/structs/util.go @@ -2,7 +2,7 @@ package structs import ( "github.com/golang/protobuf/ptypes/wrappers" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/plugins/shared/structs/proto" ) @@ -13,13 +13,13 @@ func ConvertProtoAttribute(in *proto.Attribute) *Attribute { switch in.Value.(type) { case *proto.Attribute_BoolVal: - out.Bool = helper.BoolToPtr(in.GetBoolVal()) + out.Bool = pointer.Of(in.GetBoolVal()) case *proto.Attribute_FloatVal: - out.Float = helper.Float64ToPtr(in.GetFloatVal()) + out.Float = pointer.Of(in.GetFloatVal()) case *proto.Attribute_IntVal: - out.Int = helper.Int64ToPtr(in.GetIntVal()) + out.Int = pointer.Of(in.GetIntVal()) case *proto.Attribute_StringVal: - out.String = helper.StringToPtr(in.GetStringVal()) + out.String = pointer.Of(in.GetStringVal()) default: } diff --git a/scheduler/generic_sched_test.go b/scheduler/generic_sched_test.go index 000e133d2..1711ca0c1 100644 --- a/scheduler/generic_sched_test.go +++ b/scheduler/generic_sched_test.go @@ -10,6 +10,7 @@ import ( memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -2338,7 +2339,7 @@ func TestServiceSched_JobModify_InPlace(t *testing.T) { alloc.JobID = job.ID alloc.Name = fmt.Sprintf("my-job.web[%d]", i) alloc.DeploymentID = d.ID - alloc.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: helper.BoolToPtr(true)} + alloc.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: pointer.Of(true)} alloc.AllocatedResources.Tasks[taskName].Devices = []*structs.AllocatedDeviceResource{&adr} alloc.AllocatedResources.Shared = asr allocs = append(allocs, alloc) @@ -2985,7 +2986,7 @@ func TestServiceSched_NodeDown(t *testing.T) { alloc.ClientStatus = tc.client // Mark for migration if necessary - alloc.DesiredTransition.Migrate = helper.BoolToPtr(tc.migrate) + alloc.DesiredTransition.Migrate = pointer.Of(tc.migrate) allocs := []*structs.Allocation{alloc} require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) @@ -3278,7 +3279,7 @@ func TestServiceSched_NodeDrain(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = fmt.Sprintf("my-job.web[%d]", i) - alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) + alloc.DesiredTransition.Migrate = pointer.Of(true) allocs = append(allocs, alloc) } require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) @@ -3365,7 +3366,7 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { for i := 0; i < 6; i++ { newAlloc := allocs[i].Copy() newAlloc.ClientStatus = structs.AllocDesiredStatusStop - newAlloc.DesiredTransition.Migrate = helper.BoolToPtr(true) + newAlloc.DesiredTransition.Migrate = pointer.Of(true) stop = append(stop, newAlloc) } require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), stop)) @@ -3470,7 +3471,7 @@ func TestServiceSched_NodeDrain_Queued_Allocations(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = fmt.Sprintf("my-job.web[%d]", i) - alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) + alloc.DesiredTransition.Migrate = pointer.Of(true) allocs = append(allocs, alloc) } require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) @@ -3529,7 +3530,7 @@ func TestServiceSched_NodeDrain_TaskHandle(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = fmt.Sprintf("my-job.web[%d]", i) - alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) + alloc.DesiredTransition.Migrate = pointer.Of(true) alloc.TaskStates = map[string]*structs.TaskState{ "web": { TaskHandle: &structs.TaskHandle{ @@ -4178,7 +4179,7 @@ func TestDeployment_FailedAllocs_Reschedule(t *testing.T) { allocs[1].TaskStates = map[string]*structs.TaskState{"web": {State: "start", StartedAt: time.Now().Add(-12 * time.Hour), FinishedAt: time.Now().Add(-10 * time.Hour)}} - allocs[1].DesiredTransition.Reschedule = helper.BoolToPtr(true) + allocs[1].DesiredTransition.Reschedule = pointer.Of(true) require.Nil(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) @@ -5200,7 +5201,7 @@ func TestServiceSched_NodeDrain_Sticky(t *testing.T) { alloc.NodeID = node.ID alloc.Job.TaskGroups[0].Count = 1 alloc.Job.TaskGroups[0].EphemeralDisk.Sticky = true - alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) + alloc.DesiredTransition.Migrate = pointer.Of(true) require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), alloc.Job)) require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) @@ -5836,7 +5837,7 @@ func TestServiceSched_Migrate_NonCanary(t *testing.T) { alloc.Name = "my-job.web[0]" alloc.DesiredStatus = structs.AllocDesiredStatusRun alloc.ClientStatus = structs.AllocClientStatusRunning - alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) + alloc.DesiredTransition.Migrate = pointer.Of(true) require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation @@ -6522,7 +6523,7 @@ func TestPropagateTaskState(t *testing.T) { prevAlloc: &structs.Allocation{ ClientStatus: structs.AllocClientStatusRunning, DesiredTransition: structs.DesiredTransition{ - Migrate: helper.BoolToPtr(true), + Migrate: pointer.Of(true), }, TaskStates: map[string]*structs.TaskState{ taskName: { @@ -6550,7 +6551,7 @@ func TestPropagateTaskState(t *testing.T) { prevAlloc: &structs.Allocation{ ClientStatus: structs.AllocClientStatusRunning, DesiredTransition: structs.DesiredTransition{ - Migrate: helper.BoolToPtr(true), + Migrate: pointer.Of(true), }, TaskStates: map[string]*structs.TaskState{ taskName: {}, diff --git a/scheduler/propertyset.go b/scheduler/propertyset.go index f6d48fe28..e2325744e 100644 --- a/scheduler/propertyset.go +++ b/scheduler/propertyset.go @@ -248,7 +248,7 @@ func (p *propertySet) UsedCount(option *structs.Node, tg string) (string, string // existing and proposed allocations. It also takes into account any stopped // allocations func (p *propertySet) GetCombinedUseMap() map[string]uint64 { - combinedUse := make(map[string]uint64, helper.IntMax(len(p.existingValues), len(p.proposedValues))) + combinedUse := make(map[string]uint64, helper.Max(len(p.existingValues), len(p.proposedValues))) for _, usedValues := range []map[string]uint64{p.existingValues, p.proposedValues} { for propertyValue, usedCount := range usedValues { combinedUse[propertyValue] += usedCount diff --git a/scheduler/reconcile.go b/scheduler/reconcile.go index cfdc0996d..7e645674c 100644 --- a/scheduler/reconcile.go +++ b/scheduler/reconcile.go @@ -766,7 +766,7 @@ func (a *allocReconciler) computeReplacements(deploymentPlaceReady bool, desired a.markStop(failed, "", allocRescheduled) desiredChanges.Stop += uint64(len(failed)) - min := helper.IntMin(len(place), underProvisionedBy) + min := helper.Min(len(place), underProvisionedBy) underProvisionedBy -= min return underProvisionedBy } @@ -778,7 +778,7 @@ func (a *allocReconciler) computeReplacements(deploymentPlaceReady bool, desired // If allocs have been lost, determine the number of replacements that are needed // and add placements to the result for the lost allocs. if len(lost) != 0 { - allowed := helper.IntMin(len(lost), len(place)) + allowed := helper.Min(len(lost), len(place)) desiredChanges.Place += uint64(allowed) a.result.place = append(a.result.place, place[:allowed]...) } @@ -819,7 +819,7 @@ func (a *allocReconciler) computeDestructiveUpdates(destructive allocSet, underP desiredChanges *structs.DesiredUpdates, tg *structs.TaskGroup) { // Do all destructive updates - min := helper.IntMin(len(destructive), underProvisionedBy) + min := helper.Min(len(destructive), underProvisionedBy) desiredChanges.DestructiveUpdate += uint64(min) desiredChanges.Ignore += uint64(len(destructive) - min) for _, alloc := range destructive.nameOrder()[:min] { @@ -903,7 +903,7 @@ func (a *allocReconciler) isDeploymentComplete(groupName string, destructive, in // Final check to see if the deployment is complete is to ensure everything is healthy if dstate, ok := a.deployment.TaskGroups[groupName]; ok { - if dstate.HealthyAllocs < helper.IntMax(dstate.DesiredTotal, dstate.DesiredCanaries) || // Make sure we have enough healthy allocs + if dstate.HealthyAllocs < helper.Max(dstate.DesiredTotal, dstate.DesiredCanaries) || // Make sure we have enough healthy allocs (dstate.DesiredCanaries > 0 && !dstate.Promoted) { // Make sure we are promoted if we have canaries complete = false } diff --git a/scheduler/reconcile_test.go b/scheduler/reconcile_test.go index 3cb29cb90..cb986ded6 100644 --- a/scheduler/reconcile_test.go +++ b/scheduler/reconcile_test.go @@ -9,7 +9,7 @@ import ( "time" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -334,7 +334,7 @@ func buildDisconnectedNodes(allocs []*structs.Allocation, count int) map[string] func buildResumableAllocations(count int, clientStatus, desiredStatus string, nodeScore float64) (*structs.Job, []*structs.Allocation) { job := mock.Job() - job.TaskGroups[0].MaxClientDisconnect = helper.TimeToPtr(5 * time.Minute) + job.TaskGroups[0].MaxClientDisconnect = pointer.Of(5 * time.Minute) job.TaskGroups[0].Count = count return job, buildAllocations(job, count, clientStatus, desiredStatus, nodeScore) @@ -1059,7 +1059,7 @@ func TestReconciler_DrainNode(t *testing.T) { for i := 0; i < 2; i++ { n := mock.DrainNode() n.ID = allocs[i].NodeID - allocs[i].DesiredTransition.Migrate = helper.BoolToPtr(true) + allocs[i].DesiredTransition.Migrate = pointer.Of(true) tainted[n.ID] = n } @@ -1114,7 +1114,7 @@ func TestReconciler_DrainNode_ScaleUp(t *testing.T) { for i := 0; i < 2; i++ { n := mock.DrainNode() n.ID = allocs[i].NodeID - allocs[i].DesiredTransition.Migrate = helper.BoolToPtr(true) + allocs[i].DesiredTransition.Migrate = pointer.Of(true) tainted[n.ID] = n } @@ -1170,7 +1170,7 @@ func TestReconciler_DrainNode_ScaleDown(t *testing.T) { for i := 0; i < 3; i++ { n := mock.DrainNode() n.ID = allocs[i].NodeID - allocs[i].DesiredTransition.Migrate = helper.BoolToPtr(true) + allocs[i].DesiredTransition.Migrate = pointer.Of(true) tainted[n.ID] = n } @@ -2269,7 +2269,7 @@ func TestReconciler_RescheduleNow_Service_WithCanaries(t *testing.T) { alloc.DeploymentID = d.ID alloc.DeploymentStatus = &structs.AllocDeploymentStatus{ Canary: true, - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), } s.PlacedCanaries = append(s.PlacedCanaries, alloc.ID) allocs = append(allocs, alloc) @@ -2360,7 +2360,7 @@ func TestReconciler_RescheduleNow_Service_Canaries(t *testing.T) { alloc.DeploymentID = d.ID alloc.DeploymentStatus = &structs.AllocDeploymentStatus{ Canary: true, - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), } s.PlacedCanaries = append(s.PlacedCanaries, alloc.ID) allocs = append(allocs, alloc) @@ -2368,7 +2368,7 @@ func TestReconciler_RescheduleNow_Service_Canaries(t *testing.T) { // Mark the canaries as failed allocs[5].ClientStatus = structs.AllocClientStatusFailed - allocs[5].DesiredTransition.Reschedule = helper.BoolToPtr(true) + allocs[5].DesiredTransition.Reschedule = pointer.Of(true) // Mark one of them as already rescheduled once allocs[5].RescheduleTracker = &structs.RescheduleTracker{Events: []*structs.RescheduleEvent{ @@ -2382,7 +2382,7 @@ func TestReconciler_RescheduleNow_Service_Canaries(t *testing.T) { StartedAt: now.Add(-1 * time.Hour), FinishedAt: now.Add(-10 * time.Second)}} allocs[6].ClientStatus = structs.AllocClientStatusFailed - allocs[6].DesiredTransition.Reschedule = helper.BoolToPtr(true) + allocs[6].DesiredTransition.Reschedule = pointer.Of(true) // Create 4 unhealthy canary allocations that have already been replaced for i := 0; i < 4; i++ { @@ -2395,7 +2395,7 @@ func TestReconciler_RescheduleNow_Service_Canaries(t *testing.T) { alloc.DeploymentID = d.ID alloc.DeploymentStatus = &structs.AllocDeploymentStatus{ Canary: true, - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), } s.PlacedCanaries = append(s.PlacedCanaries, alloc.ID) allocs = append(allocs, alloc) @@ -2490,7 +2490,7 @@ func TestReconciler_RescheduleNow_Service_Canaries_Limit(t *testing.T) { alloc.DeploymentID = d.ID alloc.DeploymentStatus = &structs.AllocDeploymentStatus{ Canary: true, - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), } s.PlacedCanaries = append(s.PlacedCanaries, alloc.ID) allocs = append(allocs, alloc) @@ -2498,7 +2498,7 @@ func TestReconciler_RescheduleNow_Service_Canaries_Limit(t *testing.T) { // Mark the canaries as failed allocs[5].ClientStatus = structs.AllocClientStatusFailed - allocs[5].DesiredTransition.Reschedule = helper.BoolToPtr(true) + allocs[5].DesiredTransition.Reschedule = pointer.Of(true) // Mark one of them as already rescheduled once allocs[5].RescheduleTracker = &structs.RescheduleTracker{Events: []*structs.RescheduleEvent{ @@ -2512,7 +2512,7 @@ func TestReconciler_RescheduleNow_Service_Canaries_Limit(t *testing.T) { StartedAt: now.Add(-1 * time.Hour), FinishedAt: now.Add(-10 * time.Second)}} allocs[6].ClientStatus = structs.AllocClientStatusFailed - allocs[6].DesiredTransition.Reschedule = helper.BoolToPtr(true) + allocs[6].DesiredTransition.Reschedule = pointer.Of(true) // Create 4 unhealthy canary allocations that have already been replaced for i := 0; i < 4; i++ { @@ -2525,7 +2525,7 @@ func TestReconciler_RescheduleNow_Service_Canaries_Limit(t *testing.T) { alloc.DeploymentID = d.ID alloc.DeploymentStatus = &structs.AllocDeploymentStatus{ Canary: true, - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), } s.PlacedCanaries = append(s.PlacedCanaries, alloc.ID) allocs = append(allocs, alloc) @@ -3255,7 +3255,7 @@ func TestReconciler_DrainNode_Canary(t *testing.T) { tainted := make(map[string]*structs.Node, 1) n := mock.DrainNode() n.ID = allocs[11].NodeID - allocs[11].DesiredTransition.Migrate = helper.BoolToPtr(true) + allocs[11].DesiredTransition.Migrate = pointer.Of(true) tainted[n.ID] = n mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive) @@ -3816,7 +3816,7 @@ func TestReconciler_PromoteCanaries_Unblock(t *testing.T) { s.PlacedCanaries = append(s.PlacedCanaries, canary.ID) canary.DeploymentID = d.ID canary.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } allocs = append(allocs, canary) handled[canary.ID] = allocUpdateFnIgnore @@ -3893,7 +3893,7 @@ func TestReconciler_PromoteCanaries_CanariesEqualCount(t *testing.T) { s.PlacedCanaries = append(s.PlacedCanaries, canary.ID) canary.DeploymentID = d.ID canary.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } allocs = append(allocs, canary) handled[canary.ID] = allocUpdateFnIgnore @@ -3994,7 +3994,7 @@ func TestReconciler_DeploymentLimit_HealthAccounting(t *testing.T) { new.DeploymentID = d.ID if i < c.healthy { new.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } } allocs = append(allocs, new) @@ -4065,7 +4065,7 @@ func TestReconciler_TaintedNode_RollingUpgrade(t *testing.T) { new.TaskGroup = job.TaskGroups[0].Name new.DeploymentID = d.ID new.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } allocs = append(allocs, new) handled[new.ID] = allocUpdateFnIgnore @@ -4080,7 +4080,7 @@ func TestReconciler_TaintedNode_RollingUpgrade(t *testing.T) { n.Status = structs.NodeStatusDown } else { n.DrainStrategy = mock.DrainNode().DrainStrategy - allocs[2+i].DesiredTransition.Migrate = helper.BoolToPtr(true) + allocs[2+i].DesiredTransition.Migrate = pointer.Of(true) } tainted[n.ID] = n } @@ -4153,7 +4153,7 @@ func TestReconciler_FailedDeployment_TaintedNodes(t *testing.T) { new.TaskGroup = job.TaskGroups[0].Name new.DeploymentID = d.ID new.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } allocs = append(allocs, new) handled[new.ID] = allocUpdateFnIgnore @@ -4168,7 +4168,7 @@ func TestReconciler_FailedDeployment_TaintedNodes(t *testing.T) { n.Status = structs.NodeStatusDown } else { n.DrainStrategy = mock.DrainNode().DrainStrategy - allocs[6+i].DesiredTransition.Migrate = helper.BoolToPtr(true) + allocs[6+i].DesiredTransition.Migrate = pointer.Of(true) } tainted[n.ID] = n } @@ -4228,7 +4228,7 @@ func TestReconciler_CompleteDeployment(t *testing.T) { alloc.TaskGroup = job.TaskGroups[0].Name alloc.DeploymentID = d.ID alloc.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } allocs = append(allocs, alloc) } @@ -4281,11 +4281,11 @@ func TestReconciler_MarkDeploymentComplete_FailedAllocations(t *testing.T) { alloc.DeploymentStatus = &structs.AllocDeploymentStatus{} if i < 10 { alloc.ClientStatus = structs.AllocClientStatusRunning - alloc.DeploymentStatus.Healthy = helper.BoolToPtr(true) + alloc.DeploymentStatus.Healthy = pointer.Of(true) } else { alloc.DesiredStatus = structs.AllocDesiredStatusStop alloc.ClientStatus = structs.AllocClientStatusFailed - alloc.DeploymentStatus.Healthy = helper.BoolToPtr(false) + alloc.DeploymentStatus.Healthy = pointer.Of(false) } allocs = append(allocs, alloc) @@ -4367,7 +4367,7 @@ func TestReconciler_FailedDeployment_CancelCanaries(t *testing.T) { new.TaskGroup = job.TaskGroups[group].Name new.DeploymentID = d.ID new.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } allocs = append(allocs, new) handled[new.ID] = allocUpdateFnIgnore @@ -4452,7 +4452,7 @@ func TestReconciler_FailedDeployment_NewJob(t *testing.T) { new.TaskGroup = job.TaskGroups[0].Name new.DeploymentID = d.ID new.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } allocs = append(allocs, new) } @@ -4512,7 +4512,7 @@ func TestReconciler_MarkDeploymentComplete(t *testing.T) { alloc.TaskGroup = job.TaskGroups[0].Name alloc.DeploymentID = d.ID alloc.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } allocs = append(allocs, alloc) } @@ -4801,7 +4801,7 @@ func TestReconciler_DeploymentWithFailedAllocs_DontReschedule(t *testing.T) { // Mark half of them as reschedulable for i := 0; i < 5; i++ { - allocs[i].DesiredTransition.Reschedule = helper.BoolToPtr(true) + allocs[i].DesiredTransition.Reschedule = pointer.Of(true) } reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job, @@ -4870,7 +4870,7 @@ func TestReconciler_FailedDeployment_AutoRevert_CancelCanaries(t *testing.T) { new.TaskGroup = job.TaskGroups[0].Name new.DeploymentID = d.ID new.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), } new.ClientStatus = structs.AllocClientStatusRunning allocs = append(allocs, new) @@ -4885,7 +4885,7 @@ func TestReconciler_FailedDeployment_AutoRevert_CancelCanaries(t *testing.T) { new.TaskGroup = job.TaskGroups[0].Name new.DeploymentID = uuid.Generate() new.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(false), + Healthy: pointer.Of(false), } new.DesiredStatus = structs.AllocDesiredStatusStop new.ClientStatus = structs.AllocClientStatusFailed @@ -5022,7 +5022,7 @@ func TestReconciler_ForceReschedule_Service(t *testing.T) { }} // Mark DesiredTransition ForceReschedule - allocs[0].DesiredTransition = structs.DesiredTransition{ForceReschedule: helper.BoolToPtr(true)} + allocs[0].DesiredTransition = structs.DesiredTransition{ForceReschedule: pointer.Of(true)} reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, nil, "", 50, true) @@ -5488,7 +5488,7 @@ func TestReconciler_Disconnected_Client(t *testing.T) { serverDesiredStatus: structs.AllocDesiredStatusRun, shouldStopOnDisconnectedNode: true, nodeStatusDisconnected: true, - maxDisconnect: helper.TimeToPtr(2 * time.Second), + maxDisconnect: pointer.Of(2 * time.Second), expected: &resultExpectation{ stop: 2, desiredTGUpdates: map[string]*structs.DesiredUpdates{ @@ -5926,7 +5926,7 @@ func TestReconciler_Client_Disconnect_Canaries(t *testing.T) { Canary: true, } if alloc.ClientStatus == structs.AllocClientStatusRunning { - alloc.DeploymentStatus.Healthy = helper.BoolToPtr(true) + alloc.DeploymentStatus.Healthy = pointer.Of(true) } tc.deploymentState.PlacedCanaries = append(tc.deploymentState.PlacedCanaries, alloc.ID) handled[alloc.ID] = allocUpdateFnIgnore diff --git a/scheduler/reconcile_util_test.go b/scheduler/reconcile_util_test.go index ab3867393..21f19814e 100644 --- a/scheduler/reconcile_util_test.go +++ b/scheduler/reconcile_util_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" @@ -61,7 +61,7 @@ func TestAllocSet_filterByTainted(t *testing.T) { } testJob := mock.Job() - testJob.TaskGroups[0].MaxClientDisconnect = helper.TimeToPtr(5 * time.Second) + testJob.TaskGroups[0].MaxClientDisconnect = pointer.Of(5 * time.Second) now := time.Now() testJobNoMaxDisconnect := mock.Job() @@ -143,7 +143,7 @@ func TestAllocSet_filterByTainted(t *testing.T) { "migrating1": { ID: "migrating1", ClientStatus: structs.AllocClientStatusRunning, - DesiredTransition: structs.DesiredTransition{Migrate: helper.BoolToPtr(true)}, + DesiredTransition: structs.DesiredTransition{Migrate: pointer.Of(true)}, Job: testJob, NodeID: "draining", }, @@ -151,7 +151,7 @@ func TestAllocSet_filterByTainted(t *testing.T) { "migrating2": { ID: "migrating2", ClientStatus: structs.AllocClientStatusRunning, - DesiredTransition: structs.DesiredTransition{Migrate: helper.BoolToPtr(true)}, + DesiredTransition: structs.DesiredTransition{Migrate: pointer.Of(true)}, Job: testJob, NodeID: "nil", }, @@ -190,7 +190,7 @@ func TestAllocSet_filterByTainted(t *testing.T) { "migrating1": { ID: "migrating1", ClientStatus: structs.AllocClientStatusRunning, - DesiredTransition: structs.DesiredTransition{Migrate: helper.BoolToPtr(true)}, + DesiredTransition: structs.DesiredTransition{Migrate: pointer.Of(true)}, Job: testJob, NodeID: "draining", }, @@ -198,7 +198,7 @@ func TestAllocSet_filterByTainted(t *testing.T) { "migrating2": { ID: "migrating2", ClientStatus: structs.AllocClientStatusRunning, - DesiredTransition: structs.DesiredTransition{Migrate: helper.BoolToPtr(true)}, + DesiredTransition: structs.DesiredTransition{Migrate: pointer.Of(true)}, Job: testJob, NodeID: "nil", }, diff --git a/scheduler/scheduler_sysbatch_test.go b/scheduler/scheduler_sysbatch_test.go index dcc3d6ea3..fac543699 100644 --- a/scheduler/scheduler_sysbatch_test.go +++ b/scheduler/scheduler_sysbatch_test.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/go-memdb" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -538,7 +538,7 @@ func TestSysBatch_NodeDown(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-sysbatch.pinger[0]" - alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) + alloc.DesiredTransition.Migrate = pointer.Of(true) require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain @@ -652,7 +652,7 @@ func TestSysBatch_NodeDrain(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-sysbatch.pinger[0]" - alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) + alloc.DesiredTransition.Migrate = pointer.Of(true) require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain @@ -1308,7 +1308,7 @@ func TestSysBatch_PlanWithDrainedNode(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-sysbatch.pinger[0]" - alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) + alloc.DesiredTransition.Migrate = pointer.Of(true) alloc.TaskGroup = "pinger" alloc2 := mock.SysBatchAlloc() diff --git a/scheduler/scheduler_system_test.go b/scheduler/scheduler_system_test.go index ce3b5cc51..bb26d7d69 100644 --- a/scheduler/scheduler_system_test.go +++ b/scheduler/scheduler_system_test.go @@ -9,7 +9,7 @@ import ( memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -1016,7 +1016,7 @@ func TestSystemSched_NodeDown(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" - alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) + alloc.DesiredTransition.Migrate = pointer.Of(true) require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain @@ -1130,7 +1130,7 @@ func TestSystemSched_NodeDrain(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" - alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) + alloc.DesiredTransition.Migrate = pointer.Of(true) require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc})) // Create a mock evaluation to deal with drain @@ -1731,7 +1731,7 @@ func TestSystemSched_PlanWithDrainedNode(t *testing.T) { alloc.JobID = job.ID alloc.NodeID = node.ID alloc.Name = "my-job.web[0]" - alloc.DesiredTransition.Migrate = helper.BoolToPtr(true) + alloc.DesiredTransition.Migrate = pointer.Of(true) alloc.TaskGroup = "web" alloc2 := mock.Alloc() @@ -2895,7 +2895,7 @@ func TestSystemSched_NodeDisconnected(t *testing.T) { require.FailNow(t, "invalid jobType") } - job.TaskGroups[0].MaxClientDisconnect = helper.TimeToPtr(5 * time.Second) + job.TaskGroups[0].MaxClientDisconnect = pointer.Of(5 * time.Second) if !tc.required { job.Stop = true @@ -2914,7 +2914,7 @@ func TestSystemSched_NodeDisconnected(t *testing.T) { alloc.TaskGroup = job.TaskGroups[0].Name alloc.ClientStatus = tc.clientStatus alloc.DesiredStatus = tc.desiredStatus - alloc.DesiredTransition.Migrate = helper.BoolToPtr(tc.migrate) + alloc.DesiredTransition.Migrate = pointer.Of(tc.migrate) alloc.AllocStates = tc.allocState alloc.TaskStates = tc.taskState diff --git a/scheduler/spread_test.go b/scheduler/spread_test.go index adba6ffa5..ea581b9a1 100644 --- a/scheduler/spread_test.go +++ b/scheduler/spread_test.go @@ -9,7 +9,7 @@ import ( "time" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -877,7 +877,7 @@ func TestSpreadPanicDowngrade(t *testing.T) { alloc.JobID = job1.ID alloc.NodeID = nodes[i].ID alloc.DeploymentStatus = &structs.AllocDeploymentStatus{ - Healthy: helper.BoolToPtr(true), + Healthy: pointer.Of(true), Timestamp: time.Now(), Canary: false, ModifyIndex: h.NextIndex(), diff --git a/scheduler/util_test.go b/scheduler/util_test.go index ff7984fb5..4133e9edc 100644 --- a/scheduler/util_test.go +++ b/scheduler/util_test.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" - "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -163,7 +163,7 @@ func TestDiffSystemAllocsForNode(t *testing.T) { Name: "my-job.web[2]", Job: oldJob, DesiredTransition: structs.DesiredTransition{ - Migrate: helper.BoolToPtr(true), + Migrate: pointer.Of(true), }, }, // Mark the 4th lost @@ -340,7 +340,7 @@ func TestDiffSystemAllocs(t *testing.T) { Name: "my-job.web[0]", Job: oldJob, DesiredTransition: structs.DesiredTransition{ - Migrate: helper.BoolToPtr(true), + Migrate: pointer.Of(true), }, }, // Mark as lost on a dead node @@ -793,8 +793,8 @@ func TestTasksUpdated(t *testing.T) { j22.TaskGroups[0].Tasks[0].Templates = []*structs.Template{ { Wait: &structs.WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(5 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(5 * time.Second), }, }, } @@ -802,14 +802,14 @@ func TestTasksUpdated(t *testing.T) { j23.TaskGroups[0].Tasks[0].Templates = []*structs.Template{ { Wait: &structs.WaitConfig{ - Min: helper.TimeToPtr(5 * time.Second), - Max: helper.TimeToPtr(5 * time.Second), + Min: pointer.Of(5 * time.Second), + Max: pointer.Of(5 * time.Second), }, }, } require.False(t, tasksUpdated(j22, j23, name)) // Compare changed Template wait configs - j23.TaskGroups[0].Tasks[0].Templates[0].Wait.Max = helper.TimeToPtr(10 * time.Second) + j23.TaskGroups[0].Tasks[0].Templates[0].Wait.Max = pointer.Of(10 * time.Second) require.True(t, tasksUpdated(j22, j23, name)) // Add a volume