Merge pull request #3398 from hashicorp/f-consul-1.0.0

Bump Consul to 1.0.0 in vagrant/travis
This commit is contained in:
Michael Schurter
2017-10-17 14:03:08 -07:00
committed by GitHub
32 changed files with 609 additions and 2845 deletions

View File

@@ -1064,6 +1064,8 @@ func createCheckReg(serviceID, checkID string, check *structs.ServiceCheck, host
chkReg.TCP = net.JoinHostPort(host, strconv.Itoa(port))
case structs.ServiceCheckScript:
chkReg.TTL = (check.Interval + ttlCheckBuffer).String()
// As of Consul 1.0.0 setting TTL and Interval is a 400
chkReg.Interval = ""
default:
return nil, fmt.Errorf("check type %+q not valid", check.Type)
}

View File

@@ -19,6 +19,7 @@ import (
"github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/stretchr/testify/assert"
)
func testLogger() *log.Logger {
@@ -37,6 +38,8 @@ func TestConsul_Integration(t *testing.T) {
if testing.Short() {
t.Skip("-short set; skipping")
}
assert := assert.New(t)
// Create an embedded Consul server
testconsul, err := testutil.NewTestServerConfig(func(c *testutil.TestServerConfig) {
// If -v wasn't specified squelch consul logging
@@ -128,9 +131,8 @@ func TestConsul_Integration(t *testing.T) {
taskDir := allocDir.NewTaskDir(task.Name)
vclient := vaultclient.NewMockVaultClient()
consulClient, err := consulapi.NewClient(consulConfig)
if err != nil {
t.Fatalf("error creating consul client: %v", err)
}
assert.Nil(err)
serviceClient := consul.NewServiceClient(consulClient.Agent(), true, logger)
defer serviceClient.Shutdown() // just-in-case cleanup
consulRan := make(chan struct{})
@@ -154,9 +156,7 @@ func TestConsul_Integration(t *testing.T) {
// Block waiting for the service to appear
catalog := consulClient.Catalog()
res, meta, err := catalog.Service("httpd2", "test", nil)
if err != nil {
t.Fatalf("bad: %v", err)
}
assert.Nil(err)
for i := 0; len(res) == 0 && i < 10; i++ {
//Expected initial request to fail, do a blocking query
@@ -165,32 +165,25 @@ func TestConsul_Integration(t *testing.T) {
t.Fatalf("error querying for service: %v", err)
}
}
if len(res) != 1 {
t.Fatalf("expected 1 service but found %d:\n%#v", len(res), res)
}
assert.Len(res, 1)
// Truncate results
res = res[:]
// Assert the service with the checks exists
for i := 0; len(res) == 0 && i < 10; i++ {
res, meta, err = catalog.Service("httpd", "http", &consulapi.QueryOptions{WaitIndex: meta.LastIndex + 1, WaitTime: 3 * time.Second})
if err != nil {
t.Fatalf("error querying for service: %v", err)
}
}
if len(res) != 1 {
t.Fatalf("exepcted 1 service but found %d:\n%#v", len(res), res)
assert.Nil(err)
}
assert.Len(res, 1)
// Assert the script check passes (mock_driver script checks always
// pass) after having time to run once
time.Sleep(2 * time.Second)
checks, _, err := consulClient.Health().Checks("httpd", nil)
if err != nil {
t.Fatalf("error querying checks: %v", err)
}
if expected := 2; len(checks) != expected {
t.Fatalf("expected %d checks but found %d:\n%#v", expected, len(checks), checks)
}
assert.Nil(err)
assert.Len(checks, 2)
for _, check := range checks {
if expected := "httpd"; check.ServiceName != expected {
t.Fatalf("expected checks to be for %q but found service name = %q", expected, check.ServiceName)
@@ -244,13 +237,7 @@ func TestConsul_Integration(t *testing.T) {
// Ensure Consul is clean
services, _, err := catalog.Services(nil)
if err != nil {
t.Fatalf("error query services: %v", err)
}
if len(services) != 1 {
t.Fatalf("expected only 1 service in Consul but found %d:\n%#v", len(services), services)
}
if _, ok := services["consul"]; !ok {
t.Fatalf(`expected only the "consul" key in Consul but found: %#v`, services)
}
assert.Nil(err)
assert.Len(services, 1)
assert.Contains(services, "consul")
}

View File

@@ -2,7 +2,7 @@
set -o errexit
VERSION=0.9.2
VERSION=1.0.0
DOWNLOAD=https://releases.hashicorp.com/consul/${VERSION}/consul_${VERSION}_linux_amd64.zip
function install_consul() {

View File

@@ -2,7 +2,7 @@
set -o errexit
VERSION=0.9.2
VERSION=1.0.0
DOWNLOAD=https://releases.hashicorp.com/consul/${VERSION}/consul_${VERSION}_linux_amd64.zip
function install_consul() {

354
vendor/github.com/hashicorp/consul/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,354 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. “Contributor”
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. “Contributor Version”
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributors Contribution.
1.3. “Contribution”
means Covered Software of a particular Contributor.
1.4. “Covered Software”
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. “Incompatible With Secondary Licenses”
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of version
1.1 or earlier of the License, but not also under the terms of a
Secondary License.
1.6. “Executable Form”
means any form of the work other than Source Code Form.
1.7. “Larger Work”
means a work that combines Covered Software with other material, in a separate
file or files, that is not Covered Software.
1.8. “License”
means this document.
1.9. “Licensable”
means having the right to grant, to the maximum extent possible, whether at the
time of the initial grant or subsequently, any and all of the rights conveyed by
this License.
1.10. “Modifications”
means any of the following:
a. any file in Source Code Form that results from an addition to, deletion
from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. “Patent Claims” of a Contributor
means any patent claim(s), including without limitation, method, process,
and apparatus claims, in any patent Licensable by such Contributor that
would be infringed, but for the grant of the License, by the making,
using, selling, offering for sale, having made, import, or transfer of
either its Contributions or its Contributor Version.
1.12. “Secondary License”
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. “Source Code Form”
means the form of the work preferred for making modifications.
1.14. “You” (or “Your”)
means an individual or a legal entity exercising rights under this
License. For legal entities, “You” includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, “control” means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or as
part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its Contributions
or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution become
effective for each Contribution on the date the Contributor first distributes
such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under this
License. No additional rights or licenses will be implied from the distribution
or licensing of Covered Software under this License. Notwithstanding Section
2.1(b) above, no patent license is granted by a Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third partys
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of its
Contributions.
This License does not grant any rights in the trademarks, service marks, or
logos of any Contributor (except as may be necessary to comply with the
notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this License
(see Section 10.2) or under the terms of a Secondary License (if permitted
under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its Contributions
are its original creation(s) or it has sufficient rights to grant the
rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under applicable
copyright doctrines of fair use, fair dealing, or other equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under the
terms of this License. You must inform recipients that the Source Code Form
of the Covered Software is governed by the terms of this License, and how
they can obtain a copy of this License. You may not attempt to alter or
restrict the recipients rights in the Source Code Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this License,
or sublicense it under different terms, provided that the license for
the Executable Form does not attempt to limit or alter the recipients
rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for the
Covered Software. If the Larger Work is a combination of Covered Software
with a work governed by one or more Secondary Licenses, and the Covered
Software is not Incompatible With Secondary Licenses, this License permits
You to additionally distribute such Covered Software under the terms of
such Secondary License(s), so that the recipient of the Larger Work may, at
their option, further distribute the Covered Software under the terms of
either this License or such Secondary License(s).
3.4. Notices
You may not remove or alter the substance of any license notices (including
copyright notices, patent notices, disclaimers of warranty, or limitations
of liability) contained within the Source Code Form of the Covered
Software, except that You may alter any license notices to the extent
required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on behalf
of any Contributor. You must make it absolutely clear that any such
warranty, support, indemnity, or liability obligation is offered by You
alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute, judicial
order, or regulation then You must: (a) comply with the terms of this License
to the maximum extent possible; and (b) describe the limitations and the code
they affect. Such description must be placed in a text file included with all
distributions of the Covered Software under this License. Except to the
extent prohibited by statute or regulation, such description must be
sufficiently detailed for a recipient of ordinary skill to be able to
understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
if such Contributor fails to notify You of the non-compliance by some
reasonable means prior to 60 days after You have come back into compliance.
Moreover, Your grants from a particular Contributor are reinstated on an
ongoing basis if such Contributor notifies You of the non-compliance by
some reasonable means, this is the first time You have received notice of
non-compliance with this License from such Contributor, and You become
compliant prior to 30 days after Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions, counter-claims,
and cross-claims) alleging that a Contributor Version directly or
indirectly infringes any patent, then the rights granted to You by any and
all Contributors for the Covered Software under Section 2.1 of this License
shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an “as is” basis, without
warranty of any kind, either expressed, implied, or statutory, including,
without limitation, warranties that the Covered Software is free of defects,
merchantable, fit for a particular purpose or non-infringing. The entire
risk as to the quality and performance of the Covered Software is with You.
Should any Covered Software prove defective in any respect, You (not any
Contributor) assume the cost of any necessary servicing, repair, or
correction. This disclaimer of warranty constitutes an essential part of this
License. No use of any Covered Software is authorized under this License
except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from such
partys negligence to the extent applicable law prohibits such limitation.
Some jurisdictions do not allow the exclusion or limitation of incidental or
consequential damages, so this exclusion and limitation may not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts of
a jurisdiction where the defendant maintains its principal place of business
and such litigation shall be governed by laws of that jurisdiction, without
reference to its conflict-of-law provisions. Nothing in this Section shall
prevent a partys ability to bring cross-claims or counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject matter
hereof. If any provision of this License is held to be unenforceable, such
provision shall be reformed only to the extent necessary to make it
enforceable. Any law or regulation which provides that the language of a
contract shall be construed against the drafter shall not be used to construe
this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version of
the License under which You originally received the Covered Software, or
under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a modified
version of this License if you rename the license and remove any
references to the name of the license steward (except to note that such
modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file, then
You may include the notice in a location (such as a LICENSE file in a relevant
directory) where a recipient would be likely to look for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - “Incompatible With Secondary Licenses” Notice
This Source Code Form is “Incompatible
With Secondary Licenses”, as defined by
the Mozilla Public License, v. 2.0.

View File

@@ -1,672 +0,0 @@
package acl
import (
"github.com/armon/go-radix"
)
var (
// allowAll is a singleton policy which allows all
// non-management actions
allowAll ACL
// denyAll is a singleton policy which denies all actions
denyAll ACL
// manageAll is a singleton policy which allows all
// actions, including management
manageAll ACL
)
func init() {
// Setup the singletons
allowAll = &StaticACL{
allowManage: false,
defaultAllow: true,
}
denyAll = &StaticACL{
allowManage: false,
defaultAllow: false,
}
manageAll = &StaticACL{
allowManage: true,
defaultAllow: true,
}
}
// ACL is the interface for policy enforcement.
type ACL interface {
// ACLList checks for permission to list all the ACLs
ACLList() bool
// ACLModify checks for permission to manipulate ACLs
ACLModify() bool
// AgentRead checks for permission to read from agent endpoints for a
// given node.
AgentRead(string) bool
// AgentWrite checks for permission to make changes via agent endpoints
// for a given node.
AgentWrite(string) bool
// EventRead determines if a specific event can be queried.
EventRead(string) bool
// EventWrite determines if a specific event may be fired.
EventWrite(string) bool
// KeyRead checks for permission to read a given key
KeyRead(string) bool
// KeyWrite checks for permission to write a given key
KeyWrite(string) bool
// KeyWritePrefix checks for permission to write to an
// entire key prefix. This means there must be no sub-policies
// that deny a write.
KeyWritePrefix(string) bool
// KeyringRead determines if the encryption keyring used in
// the gossip layer can be read.
KeyringRead() bool
// KeyringWrite determines if the keyring can be manipulated
KeyringWrite() bool
// NodeRead checks for permission to read (discover) a given node.
NodeRead(string) bool
// NodeWrite checks for permission to create or update (register) a
// given node.
NodeWrite(string) bool
// OperatorRead determines if the read-only Consul operator functions
// can be used.
OperatorRead() bool
// OperatorWrite determines if the state-changing Consul operator
// functions can be used.
OperatorWrite() bool
// PrepardQueryRead determines if a specific prepared query can be read
// to show its contents (this is not used for execution).
PreparedQueryRead(string) bool
// PreparedQueryWrite determines if a specific prepared query can be
// created, modified, or deleted.
PreparedQueryWrite(string) bool
// ServiceRead checks for permission to read a given service
ServiceRead(string) bool
// ServiceWrite checks for permission to create or update a given
// service
ServiceWrite(string) bool
// SessionRead checks for permission to read sessions for a given node.
SessionRead(string) bool
// SessionWrite checks for permission to create sessions for a given
// node.
SessionWrite(string) bool
// Snapshot checks for permission to take and restore snapshots.
Snapshot() bool
}
// StaticACL is used to implement a base ACL policy. It either
// allows or denies all requests. This can be used as a parent
// ACL to act in a blacklist or whitelist mode.
type StaticACL struct {
allowManage bool
defaultAllow bool
}
func (s *StaticACL) ACLList() bool {
return s.allowManage
}
func (s *StaticACL) ACLModify() bool {
return s.allowManage
}
func (s *StaticACL) AgentRead(string) bool {
return s.defaultAllow
}
func (s *StaticACL) AgentWrite(string) bool {
return s.defaultAllow
}
func (s *StaticACL) EventRead(string) bool {
return s.defaultAllow
}
func (s *StaticACL) EventWrite(string) bool {
return s.defaultAllow
}
func (s *StaticACL) KeyRead(string) bool {
return s.defaultAllow
}
func (s *StaticACL) KeyWrite(string) bool {
return s.defaultAllow
}
func (s *StaticACL) KeyWritePrefix(string) bool {
return s.defaultAllow
}
func (s *StaticACL) KeyringRead() bool {
return s.defaultAllow
}
func (s *StaticACL) KeyringWrite() bool {
return s.defaultAllow
}
func (s *StaticACL) NodeRead(string) bool {
return s.defaultAllow
}
func (s *StaticACL) NodeWrite(string) bool {
return s.defaultAllow
}
func (s *StaticACL) OperatorRead() bool {
return s.defaultAllow
}
func (s *StaticACL) OperatorWrite() bool {
return s.defaultAllow
}
func (s *StaticACL) PreparedQueryRead(string) bool {
return s.defaultAllow
}
func (s *StaticACL) PreparedQueryWrite(string) bool {
return s.defaultAllow
}
func (s *StaticACL) ServiceRead(string) bool {
return s.defaultAllow
}
func (s *StaticACL) ServiceWrite(string) bool {
return s.defaultAllow
}
func (s *StaticACL) SessionRead(string) bool {
return s.defaultAllow
}
func (s *StaticACL) SessionWrite(string) bool {
return s.defaultAllow
}
func (s *StaticACL) Snapshot() bool {
return s.allowManage
}
// AllowAll returns an ACL rule that allows all operations
func AllowAll() ACL {
return allowAll
}
// DenyAll returns an ACL rule that denies all operations
func DenyAll() ACL {
return denyAll
}
// ManageAll returns an ACL rule that can manage all resources
func ManageAll() ACL {
return manageAll
}
// RootACL returns a possible ACL if the ID matches a root policy
func RootACL(id string) ACL {
switch id {
case "allow":
return allowAll
case "deny":
return denyAll
case "manage":
return manageAll
default:
return nil
}
}
// PolicyACL is used to wrap a set of ACL policies to provide
// the ACL interface.
type PolicyACL struct {
// parent is used to resolve policy if we have
// no matching rule.
parent ACL
// agentRules contains the agent policies
agentRules *radix.Tree
// keyRules contains the key policies
keyRules *radix.Tree
// nodeRules contains the node policies
nodeRules *radix.Tree
// serviceRules contains the service policies
serviceRules *radix.Tree
// sessionRules contains the session policies
sessionRules *radix.Tree
// eventRules contains the user event policies
eventRules *radix.Tree
// preparedQueryRules contains the prepared query policies
preparedQueryRules *radix.Tree
// keyringRule contains the keyring policies. The keyring has
// a very simple yes/no without prefix matching, so here we
// don't need to use a radix tree.
keyringRule string
// operatorRule contains the operator policies.
operatorRule string
}
// New is used to construct a policy based ACL from a set of policies
// and a parent policy to resolve missing cases.
func New(parent ACL, policy *Policy) (*PolicyACL, error) {
p := &PolicyACL{
parent: parent,
agentRules: radix.New(),
keyRules: radix.New(),
nodeRules: radix.New(),
serviceRules: radix.New(),
sessionRules: radix.New(),
eventRules: radix.New(),
preparedQueryRules: radix.New(),
}
// Load the agent policy
for _, ap := range policy.Agents {
p.agentRules.Insert(ap.Node, ap.Policy)
}
// Load the key policy
for _, kp := range policy.Keys {
p.keyRules.Insert(kp.Prefix, kp.Policy)
}
// Load the node policy
for _, np := range policy.Nodes {
p.nodeRules.Insert(np.Name, np.Policy)
}
// Load the service policy
for _, sp := range policy.Services {
p.serviceRules.Insert(sp.Name, sp.Policy)
}
// Load the session policy
for _, sp := range policy.Sessions {
p.sessionRules.Insert(sp.Node, sp.Policy)
}
// Load the event policy
for _, ep := range policy.Events {
p.eventRules.Insert(ep.Event, ep.Policy)
}
// Load the prepared query policy
for _, pq := range policy.PreparedQueries {
p.preparedQueryRules.Insert(pq.Prefix, pq.Policy)
}
// Load the keyring policy
p.keyringRule = policy.Keyring
// Load the operator policy
p.operatorRule = policy.Operator
return p, nil
}
// ACLList checks if listing of ACLs is allowed
func (p *PolicyACL) ACLList() bool {
return p.parent.ACLList()
}
// ACLModify checks if modification of ACLs is allowed
func (p *PolicyACL) ACLModify() bool {
return p.parent.ACLModify()
}
// AgentRead checks for permission to read from agent endpoints for a given
// node.
func (p *PolicyACL) AgentRead(node string) bool {
// Check for an exact rule or catch-all
_, rule, ok := p.agentRules.LongestPrefix(node)
if ok {
switch rule {
case PolicyRead, PolicyWrite:
return true
default:
return false
}
}
// No matching rule, use the parent.
return p.parent.AgentRead(node)
}
// AgentWrite checks for permission to make changes via agent endpoints for a
// given node.
func (p *PolicyACL) AgentWrite(node string) bool {
// Check for an exact rule or catch-all
_, rule, ok := p.agentRules.LongestPrefix(node)
if ok {
switch rule {
case PolicyWrite:
return true
default:
return false
}
}
// No matching rule, use the parent.
return p.parent.AgentWrite(node)
}
// Snapshot checks if taking and restoring snapshots is allowed.
func (p *PolicyACL) Snapshot() bool {
return p.parent.Snapshot()
}
// EventRead is used to determine if the policy allows for a
// specific user event to be read.
func (p *PolicyACL) EventRead(name string) bool {
// Longest-prefix match on event names
if _, rule, ok := p.eventRules.LongestPrefix(name); ok {
switch rule {
case PolicyRead, PolicyWrite:
return true
default:
return false
}
}
// Nothing matched, use parent
return p.parent.EventRead(name)
}
// EventWrite is used to determine if new events can be created
// (fired) by the policy.
func (p *PolicyACL) EventWrite(name string) bool {
// Longest-prefix match event names
if _, rule, ok := p.eventRules.LongestPrefix(name); ok {
return rule == PolicyWrite
}
// No match, use parent
return p.parent.EventWrite(name)
}
// KeyRead returns if a key is allowed to be read
func (p *PolicyACL) KeyRead(key string) bool {
// Look for a matching rule
_, rule, ok := p.keyRules.LongestPrefix(key)
if ok {
switch rule.(string) {
case PolicyRead, PolicyWrite:
return true
default:
return false
}
}
// No matching rule, use the parent.
return p.parent.KeyRead(key)
}
// KeyWrite returns if a key is allowed to be written
func (p *PolicyACL) KeyWrite(key string) bool {
// Look for a matching rule
_, rule, ok := p.keyRules.LongestPrefix(key)
if ok {
switch rule.(string) {
case PolicyWrite:
return true
default:
return false
}
}
// No matching rule, use the parent.
return p.parent.KeyWrite(key)
}
// KeyWritePrefix returns if a prefix is allowed to be written
func (p *PolicyACL) KeyWritePrefix(prefix string) bool {
// Look for a matching rule that denies
_, rule, ok := p.keyRules.LongestPrefix(prefix)
if ok && rule.(string) != PolicyWrite {
return false
}
// Look if any of our children have a deny policy
deny := false
p.keyRules.WalkPrefix(prefix, func(path string, rule interface{}) bool {
// We have a rule to prevent a write in a sub-directory!
if rule.(string) != PolicyWrite {
deny = true
return true
}
return false
})
// Deny the write if any sub-rules may be violated
if deny {
return false
}
// If we had a matching rule, done
if ok {
return true
}
// No matching rule, use the parent.
return p.parent.KeyWritePrefix(prefix)
}
// KeyringRead is used to determine if the keyring can be
// read by the current ACL token.
func (p *PolicyACL) KeyringRead() bool {
switch p.keyringRule {
case PolicyRead, PolicyWrite:
return true
case PolicyDeny:
return false
default:
return p.parent.KeyringRead()
}
}
// KeyringWrite determines if the keyring can be manipulated.
func (p *PolicyACL) KeyringWrite() bool {
if p.keyringRule == PolicyWrite {
return true
}
return p.parent.KeyringWrite()
}
// OperatorRead determines if the read-only operator functions are allowed.
func (p *PolicyACL) OperatorRead() bool {
switch p.operatorRule {
case PolicyRead, PolicyWrite:
return true
case PolicyDeny:
return false
default:
return p.parent.OperatorRead()
}
}
// NodeRead checks if reading (discovery) of a node is allowed
func (p *PolicyACL) NodeRead(name string) bool {
// Check for an exact rule or catch-all
_, rule, ok := p.nodeRules.LongestPrefix(name)
if ok {
switch rule {
case PolicyRead, PolicyWrite:
return true
default:
return false
}
}
// No matching rule, use the parent.
return p.parent.NodeRead(name)
}
// NodeWrite checks if writing (registering) a node is allowed
func (p *PolicyACL) NodeWrite(name string) bool {
// Check for an exact rule or catch-all
_, rule, ok := p.nodeRules.LongestPrefix(name)
if ok {
switch rule {
case PolicyWrite:
return true
default:
return false
}
}
// No matching rule, use the parent.
return p.parent.NodeWrite(name)
}
// OperatorWrite determines if the state-changing operator functions are
// allowed.
func (p *PolicyACL) OperatorWrite() bool {
if p.operatorRule == PolicyWrite {
return true
}
return p.parent.OperatorWrite()
}
// PreparedQueryRead checks if reading (listing) of a prepared query is
// allowed - this isn't execution, just listing its contents.
func (p *PolicyACL) PreparedQueryRead(prefix string) bool {
// Check for an exact rule or catch-all
_, rule, ok := p.preparedQueryRules.LongestPrefix(prefix)
if ok {
switch rule {
case PolicyRead, PolicyWrite:
return true
default:
return false
}
}
// No matching rule, use the parent.
return p.parent.PreparedQueryRead(prefix)
}
// PreparedQueryWrite checks if writing (creating, updating, or deleting) of a
// prepared query is allowed.
func (p *PolicyACL) PreparedQueryWrite(prefix string) bool {
// Check for an exact rule or catch-all
_, rule, ok := p.preparedQueryRules.LongestPrefix(prefix)
if ok {
switch rule {
case PolicyWrite:
return true
default:
return false
}
}
// No matching rule, use the parent.
return p.parent.PreparedQueryWrite(prefix)
}
// ServiceRead checks if reading (discovery) of a service is allowed
func (p *PolicyACL) ServiceRead(name string) bool {
// Check for an exact rule or catch-all
_, rule, ok := p.serviceRules.LongestPrefix(name)
if ok {
switch rule {
case PolicyRead, PolicyWrite:
return true
default:
return false
}
}
// No matching rule, use the parent.
return p.parent.ServiceRead(name)
}
// ServiceWrite checks if writing (registering) a service is allowed
func (p *PolicyACL) ServiceWrite(name string) bool {
// Check for an exact rule or catch-all
_, rule, ok := p.serviceRules.LongestPrefix(name)
if ok {
switch rule {
case PolicyWrite:
return true
default:
return false
}
}
// No matching rule, use the parent.
return p.parent.ServiceWrite(name)
}
// SessionRead checks for permission to read sessions for a given node.
func (p *PolicyACL) SessionRead(node string) bool {
// Check for an exact rule or catch-all
_, rule, ok := p.sessionRules.LongestPrefix(node)
if ok {
switch rule {
case PolicyRead, PolicyWrite:
return true
default:
return false
}
}
// No matching rule, use the parent.
return p.parent.SessionRead(node)
}
// SessionWrite checks for permission to create sessions for a given node.
func (p *PolicyACL) SessionWrite(node string) bool {
// Check for an exact rule or catch-all
_, rule, ok := p.sessionRules.LongestPrefix(node)
if ok {
switch rule {
case PolicyWrite:
return true
default:
return false
}
}
// No matching rule, use the parent.
return p.parent.SessionWrite(node)
}

View File

@@ -1,177 +0,0 @@
package acl
import (
"crypto/md5"
"fmt"
"github.com/hashicorp/golang-lru"
)
// FaultFunc is a function used to fault in the parent,
// rules for an ACL given its ID
type FaultFunc func(id string) (string, string, error)
// aclEntry allows us to store the ACL with it's policy ID
type aclEntry struct {
ACL ACL
Parent string
RuleID string
}
// Cache is used to implement policy and ACL caching
type Cache struct {
faultfn FaultFunc
aclCache *lru.TwoQueueCache // Cache id -> acl
policyCache *lru.TwoQueueCache // Cache policy -> acl
ruleCache *lru.TwoQueueCache // Cache rules -> policy
}
// NewCache constructs a new policy and ACL cache of a given size
func NewCache(size int, faultfn FaultFunc) (*Cache, error) {
if size <= 0 {
return nil, fmt.Errorf("Must provide positive cache size")
}
rc, err := lru.New2Q(size)
if err != nil {
return nil, err
}
pc, err := lru.New2Q(size)
if err != nil {
return nil, err
}
ac, err := lru.New2Q(size)
if err != nil {
return nil, err
}
c := &Cache{
faultfn: faultfn,
aclCache: ac,
policyCache: pc,
ruleCache: rc,
}
return c, nil
}
// GetPolicy is used to get a potentially cached policy set.
// If not cached, it will be parsed, and then cached.
func (c *Cache) GetPolicy(rules string) (*Policy, error) {
return c.getPolicy(RuleID(rules), rules)
}
// getPolicy is an internal method to get a cached policy,
// but it assumes a pre-computed ID
func (c *Cache) getPolicy(id, rules string) (*Policy, error) {
raw, ok := c.ruleCache.Get(id)
if ok {
return raw.(*Policy), nil
}
policy, err := Parse(rules)
if err != nil {
return nil, err
}
policy.ID = id
c.ruleCache.Add(id, policy)
return policy, nil
}
// RuleID is used to generate an ID for a rule
func RuleID(rules string) string {
return fmt.Sprintf("%x", md5.Sum([]byte(rules)))
}
// policyID returns the cache ID for a policy
func (c *Cache) policyID(parent, ruleID string) string {
return parent + ":" + ruleID
}
// GetACLPolicy is used to get the potentially cached ACL
// policy. If not cached, it will be generated and then cached.
func (c *Cache) GetACLPolicy(id string) (string, *Policy, error) {
// Check for a cached acl
if raw, ok := c.aclCache.Get(id); ok {
cached := raw.(aclEntry)
if raw, ok := c.ruleCache.Get(cached.RuleID); ok {
return cached.Parent, raw.(*Policy), nil
}
}
// Fault in the rules
parent, rules, err := c.faultfn(id)
if err != nil {
return "", nil, err
}
// Get cached
policy, err := c.GetPolicy(rules)
return parent, policy, err
}
// GetACL is used to get a potentially cached ACL policy.
// If not cached, it will be generated and then cached.
func (c *Cache) GetACL(id string) (ACL, error) {
// Look for the ACL directly
raw, ok := c.aclCache.Get(id)
if ok {
return raw.(aclEntry).ACL, nil
}
// Get the rules
parentID, rules, err := c.faultfn(id)
if err != nil {
return nil, err
}
ruleID := RuleID(rules)
// Check for a compiled ACL
policyID := c.policyID(parentID, ruleID)
var compiled ACL
if raw, ok := c.policyCache.Get(policyID); ok {
compiled = raw.(ACL)
} else {
// Get the policy
policy, err := c.getPolicy(ruleID, rules)
if err != nil {
return nil, err
}
// Get the parent ACL
parent := RootACL(parentID)
if parent == nil {
parent, err = c.GetACL(parentID)
if err != nil {
return nil, err
}
}
// Compile the ACL
acl, err := New(parent, policy)
if err != nil {
return nil, err
}
// Cache the compiled ACL
c.policyCache.Add(policyID, acl)
compiled = acl
}
// Cache and return the ACL
c.aclCache.Add(id, aclEntry{compiled, parentID, ruleID})
return compiled, nil
}
// ClearACL is used to clear the ACL cache if any
func (c *Cache) ClearACL(id string) {
c.aclCache.Remove(id)
}
// Purge is used to clear all the ACL caches. The
// rule and policy caches are not purged, since they
// are content-hashed anyways.
func (c *Cache) Purge() {
c.aclCache.Purge()
}

View File

@@ -1,191 +0,0 @@
package acl
import (
"fmt"
"github.com/hashicorp/hcl"
)
const (
PolicyDeny = "deny"
PolicyRead = "read"
PolicyWrite = "write"
)
// Policy is used to represent the policy specified by
// an ACL configuration.
type Policy struct {
ID string `hcl:"-"`
Agents []*AgentPolicy `hcl:"agent,expand"`
Keys []*KeyPolicy `hcl:"key,expand"`
Nodes []*NodePolicy `hcl:"node,expand"`
Services []*ServicePolicy `hcl:"service,expand"`
Sessions []*SessionPolicy `hcl:"session,expand"`
Events []*EventPolicy `hcl:"event,expand"`
PreparedQueries []*PreparedQueryPolicy `hcl:"query,expand"`
Keyring string `hcl:"keyring"`
Operator string `hcl:"operator"`
}
// AgentPolicy represents a policy for working with agent endpoints on nodes
// with specific name prefixes.
type AgentPolicy struct {
Node string `hcl:",key"`
Policy string
}
func (a *AgentPolicy) GoString() string {
return fmt.Sprintf("%#v", *a)
}
// KeyPolicy represents a policy for a key
type KeyPolicy struct {
Prefix string `hcl:",key"`
Policy string
}
func (k *KeyPolicy) GoString() string {
return fmt.Sprintf("%#v", *k)
}
// NodePolicy represents a policy for a node
type NodePolicy struct {
Name string `hcl:",key"`
Policy string
}
func (n *NodePolicy) GoString() string {
return fmt.Sprintf("%#v", *n)
}
// ServicePolicy represents a policy for a service
type ServicePolicy struct {
Name string `hcl:",key"`
Policy string
}
func (s *ServicePolicy) GoString() string {
return fmt.Sprintf("%#v", *s)
}
// SessionPolicy represents a policy for making sessions tied to specific node
// name prefixes.
type SessionPolicy struct {
Node string `hcl:",key"`
Policy string
}
func (s *SessionPolicy) GoString() string {
return fmt.Sprintf("%#v", *s)
}
// EventPolicy represents a user event policy.
type EventPolicy struct {
Event string `hcl:",key"`
Policy string
}
func (e *EventPolicy) GoString() string {
return fmt.Sprintf("%#v", *e)
}
// PreparedQueryPolicy represents a prepared query policy.
type PreparedQueryPolicy struct {
Prefix string `hcl:",key"`
Policy string
}
func (p *PreparedQueryPolicy) GoString() string {
return fmt.Sprintf("%#v", *p)
}
// isPolicyValid makes sure the given string matches one of the valid policies.
func isPolicyValid(policy string) bool {
switch policy {
case PolicyDeny:
return true
case PolicyRead:
return true
case PolicyWrite:
return true
default:
return false
}
}
// Parse is used to parse the specified ACL rules into an
// intermediary set of policies, before being compiled into
// the ACL
func Parse(rules string) (*Policy, error) {
// Decode the rules
p := &Policy{}
if rules == "" {
// Hot path for empty rules
return p, nil
}
if err := hcl.Decode(p, rules); err != nil {
return nil, fmt.Errorf("Failed to parse ACL rules: %v", err)
}
// Validate the agent policy
for _, ap := range p.Agents {
if !isPolicyValid(ap.Policy) {
return nil, fmt.Errorf("Invalid agent policy: %#v", ap)
}
}
// Validate the key policy
for _, kp := range p.Keys {
if !isPolicyValid(kp.Policy) {
return nil, fmt.Errorf("Invalid key policy: %#v", kp)
}
}
// Validate the node policies
for _, np := range p.Nodes {
if !isPolicyValid(np.Policy) {
return nil, fmt.Errorf("Invalid node policy: %#v", np)
}
}
// Validate the service policies
for _, sp := range p.Services {
if !isPolicyValid(sp.Policy) {
return nil, fmt.Errorf("Invalid service policy: %#v", sp)
}
}
// Validate the session policies
for _, sp := range p.Sessions {
if !isPolicyValid(sp.Policy) {
return nil, fmt.Errorf("Invalid session policy: %#v", sp)
}
}
// Validate the user event policies
for _, ep := range p.Events {
if !isPolicyValid(ep.Policy) {
return nil, fmt.Errorf("Invalid event policy: %#v", ep)
}
}
// Validate the prepared query policies
for _, pq := range p.PreparedQueries {
if !isPolicyValid(pq.Policy) {
return nil, fmt.Errorf("Invalid query policy: %#v", pq)
}
}
// Validate the keyring policy - this one is allowed to be empty
if p.Keyring != "" && !isPolicyValid(p.Keyring) {
return nil, fmt.Errorf("Invalid keyring policy: %#v", p.Keyring)
}
// Validate the operator policy - this one is allowed to be empty
if p.Operator != "" && !isPolicyValid(p.Operator) {
return nil, fmt.Errorf("Invalid operator policy: %#v", p.Operator)
}
return p, nil
}

View File

@@ -44,6 +44,19 @@ type AgentMember struct {
DelegateCur uint8
}
// AllSegments is used to select for all segments in MembersOpts.
const AllSegments = "_all"
// MembersOpts is used for querying member information.
type MembersOpts struct {
// WAN is whether to show members from the WAN.
WAN bool
// Segment is the LAN segment to show members for. Setting this to the
// AllSegments value above will show members in all segments.
Segment string
}
// AgentServiceRegistration is used to register a new service
type AgentServiceRegistration struct {
ID string `json:",omitempty"`
@@ -256,6 +269,28 @@ func (a *Agent) Members(wan bool) ([]*AgentMember, error) {
return out, nil
}
// MembersOpts returns the known gossip members and can be passed
// additional options for WAN/segment filtering.
func (a *Agent) MembersOpts(opts MembersOpts) ([]*AgentMember, error) {
r := a.c.newRequest("GET", "/v1/agent/members")
r.params.Set("segment", opts.Segment)
if opts.WAN {
r.params.Set("wan", "1")
}
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out []*AgentMember
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// ServiceRegister is used to register a new service with
// the local agent
func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error {

View File

@@ -446,6 +446,7 @@ func NewClient(config *Config) (*Client, error) {
if len(parts) == 2 {
switch parts[0] {
case "http":
config.Scheme = "http"
case "https":
config.Scheme = "https"
case "unix":
@@ -462,10 +463,11 @@ func NewClient(config *Config) (*Client, error) {
config.Address = parts[1]
}
client := &Client{
config: *config,
if config.Token == "" {
config.Token = defConfig.Token
}
return client, nil
return &Client{config: *config}, nil
}
// NewHttpClient returns an http client configured with the given Transport and TLS
@@ -553,13 +555,20 @@ func durToMsec(dur time.Duration) string {
// serverError is a string we look for to detect 500 errors.
const serverError = "Unexpected response code: 500"
// IsServerError returns true for 500 errors from the Consul servers, these are
// usually retryable at a later time.
func IsServerError(err error) bool {
// IsRetryableError returns true for 500 errors from the Consul servers, and
// network connection errors. These are usually retryable at a later time.
// This applies to reads but NOT to writes. This may return true for errors
// on writes that may have still gone through, so do not use this to retry
// any write operations.
func IsRetryableError(err error) bool {
if err == nil {
return false
}
if _, ok := err.(net.Error); ok {
return true
}
// TODO (slackpad) - Make a real error type here instead of using
// a string check.
return strings.Contains(err.Error(), serverError)

View File

@@ -6,8 +6,9 @@ import (
// CoordinateEntry represents a node and its associated network coordinate.
type CoordinateEntry struct {
Node string
Coord *coordinate.Coordinate
Node string
Segment string
Coord *coordinate.Coordinate
}
// CoordinateDatacenterMap has the coordinates for servers in a given datacenter

View File

@@ -353,19 +353,19 @@ type TxnResponse struct {
//
// Here's an example:
//
// ops := KVTxnOps{
// &KVTxnOp{
// Verb: KVLock,
// Key: "test/lock",
// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e",
// Value: []byte("hello"),
// },
// &KVTxnOp{
// Verb: KVGet,
// Key: "another/key",
// },
// }
// ok, response, _, err := kv.Txn(&ops, nil)
// ops := KVTxnOps{
// &KVTxnOp{
// Verb: KVLock,
// Key: "test/lock",
// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e",
// Value: []byte("hello"),
// },
// &KVTxnOp{
// Verb: KVGet,
// Key: "another/key",
// },
// }
// ok, response, _, err := kv.Txn(&ops, nil)
//
// If there is a problem making the transaction request then an error will be
// returned. Otherwise, the ok value will be true if the transaction succeeded

View File

@@ -370,7 +370,7 @@ RETRY:
// by doing retries. Note that we have to attempt the retry in a non-
// blocking fashion so that we have a clean place to reset the retry
// counter if service is restored.
if retries > 0 && IsServerError(err) {
if retries > 0 && IsRetryableError(err) {
time.Sleep(l.opts.MonitorRetryTime)
retries--
opts.WaitIndex = 0

View File

@@ -25,6 +25,10 @@ type Area struct {
// RetryJoin specifies the address of Consul servers to join to, such as
// an IPs or hostnames with an optional port number. This is optional.
RetryJoin []string
// UseTLS specifies whether gossip over this area should be encrypted with TLS
// if possible.
UseTLS bool
}
// AreaJoinResponse is returned when a join occurs and gives the result for each
@@ -100,6 +104,27 @@ func (op *Operator) AreaCreate(area *Area, q *WriteOptions) (string, *WriteMeta,
return out.ID, wm, nil
}
// AreaUpdate will update the configuration of the network area with the given ID.
func (op *Operator) AreaUpdate(areaID string, area *Area, q *WriteOptions) (string, *WriteMeta, error) {
r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID)
r.setWriteOptions(q)
r.obj = area
rtt, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
var out struct{ ID string }
if err := decodeBody(resp, &out); err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// AreaGet returns a single network area.
func (op *Operator) AreaGet(areaID string, q *QueryOptions) ([]*Area, *QueryMeta, error) {
var out []*Area

View File

@@ -13,6 +13,9 @@ type KeyringResponse struct {
// The datacenter name this request corresponds to
Datacenter string
// Segment has the network segment this request corresponds to.
Segment string
// A map of the encryption keys to the number of nodes they're installed on
Keys map[string]int

View File

@@ -17,6 +17,9 @@ type RaftServer struct {
// Leader is true if this server is the current cluster leader.
Leader bool
// Protocol version is the raft protocol version used by the server
ProtocolVersion string
// Voter is true if this server has a vote in the cluster. This might
// be false if the server is staging and still coming online, or if
// it's a non-voting server, which will be added in a future release of
@@ -24,7 +27,7 @@ type RaftServer struct {
Voter bool
}
// RaftConfigration is returned when querying for the current Raft configuration.
// RaftConfiguration is returned when querying for the current Raft configuration.
type RaftConfiguration struct {
// Servers has the list of servers in the Raft configuration.
Servers []*RaftServer

View File

@@ -0,0 +1,11 @@
package api
// SegmentList returns all the available LAN segments.
func (op *Operator) SegmentList(q *QueryOptions) ([]string, *QueryMeta, error) {
var out []string
qm, err := op.c.query("/v1/operator/segment", &out, q)
if err != nil {
return nil, nil, err
}
return out, qm, nil
}

View File

@@ -492,7 +492,7 @@ RETRY:
// by doing retries. Note that we have to attempt the retry in a non-
// blocking fashion so that we have a clean place to reset the retry
// counter if service is restored.
if retries > 0 && IsServerError(err) {
if retries > 0 && IsRetryableError(err) {
time.Sleep(s.opts.MonitorRetryTime)
retries--
opts.WaitIndex = 0

View File

@@ -1,221 +0,0 @@
package structs
import (
"time"
"github.com/hashicorp/raft"
"github.com/hashicorp/serf/serf"
)
// AutopilotConfig holds the Autopilot configuration for a cluster.
type AutopilotConfig struct {
// CleanupDeadServers controls whether to remove dead servers when a new
// server is added to the Raft peers.
CleanupDeadServers bool
// LastContactThreshold is the limit on the amount of time a server can go
// without leader contact before being considered unhealthy.
LastContactThreshold time.Duration
// MaxTrailingLogs is the amount of entries in the Raft Log that a server can
// be behind before being considered unhealthy.
MaxTrailingLogs uint64
// ServerStabilizationTime is the minimum amount of time a server must be
// in a stable, healthy state before it can be added to the cluster. Only
// applicable with Raft protocol version 3 or higher.
ServerStabilizationTime time.Duration
// (Enterprise-only) RedundancyZoneTag is the node tag to use for separating
// servers into zones for redundancy. If left blank, this feature will be disabled.
RedundancyZoneTag string
// (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration
// strategy of waiting until enough newer-versioned servers have been added to the
// cluster before promoting them to voters.
DisableUpgradeMigration bool
// RaftIndex stores the create/modify indexes of this configuration.
RaftIndex
}
// RaftServer has information about a server in the Raft configuration.
type RaftServer struct {
// ID is the unique ID for the server. These are currently the same
// as the address, but they will be changed to a real GUID in a future
// release of Consul.
ID raft.ServerID
// Node is the node name of the server, as known by Consul, or this
// will be set to "(unknown)" otherwise.
Node string
// Address is the IP:port of the server, used for Raft communications.
Address raft.ServerAddress
// Leader is true if this server is the current cluster leader.
Leader bool
// Voter is true if this server has a vote in the cluster. This might
// be false if the server is staging and still coming online, or if
// it's a non-voting server, which will be added in a future release of
// Consul.
Voter bool
}
// RaftConfigrationResponse is returned when querying for the current Raft
// configuration.
type RaftConfigurationResponse struct {
// Servers has the list of servers in the Raft configuration.
Servers []*RaftServer
// Index has the Raft index of this configuration.
Index uint64
}
// RaftRemovePeerRequest is used by the Operator endpoint to apply a Raft
// operation on a specific Raft peer by address in the form of "IP:port".
type RaftRemovePeerRequest struct {
// Datacenter is the target this request is intended for.
Datacenter string
// Address is the peer to remove, in the form "IP:port".
Address raft.ServerAddress
// ID is the peer ID to remove.
ID raft.ServerID
// WriteRequest holds the ACL token to go along with this request.
WriteRequest
}
// RequestDatacenter returns the datacenter for a given request.
func (op *RaftRemovePeerRequest) RequestDatacenter() string {
return op.Datacenter
}
// AutopilotSetConfigRequest is used by the Operator endpoint to update the
// current Autopilot configuration of the cluster.
type AutopilotSetConfigRequest struct {
// Datacenter is the target this request is intended for.
Datacenter string
// Config is the new Autopilot configuration to use.
Config AutopilotConfig
// CAS controls whether to use check-and-set semantics for this request.
CAS bool
// WriteRequest holds the ACL token to go along with this request.
WriteRequest
}
// RequestDatacenter returns the datacenter for a given request.
func (op *AutopilotSetConfigRequest) RequestDatacenter() string {
return op.Datacenter
}
// ServerHealth is the health (from the leader's point of view) of a server.
type ServerHealth struct {
// ID is the raft ID of the server.
ID string
// Name is the node name of the server.
Name string
// Address is the address of the server.
Address string
// The status of the SerfHealth check for the server.
SerfStatus serf.MemberStatus
// Version is the Consul version of the server.
Version string
// Leader is whether this server is currently the leader.
Leader bool
// LastContact is the time since this node's last contact with the leader.
LastContact time.Duration
// LastTerm is the highest leader term this server has a record of in its Raft log.
LastTerm uint64
// LastIndex is the last log index this server has a record of in its Raft log.
LastIndex uint64
// Healthy is whether or not the server is healthy according to the current
// Autopilot config.
Healthy bool
// Voter is whether this is a voting server.
Voter bool
// StableSince is the last time this server's Healthy value changed.
StableSince time.Time
}
// IsHealthy determines whether this ServerHealth is considered healthy
// based on the given Autopilot config
func (h *ServerHealth) IsHealthy(lastTerm uint64, leaderLastIndex uint64, autopilotConf *AutopilotConfig) bool {
if h.SerfStatus != serf.StatusAlive {
return false
}
if h.LastContact > autopilotConf.LastContactThreshold || h.LastContact < 0 {
return false
}
if h.LastTerm != lastTerm {
return false
}
if leaderLastIndex > autopilotConf.MaxTrailingLogs && h.LastIndex < leaderLastIndex-autopilotConf.MaxTrailingLogs {
return false
}
return true
}
// IsStable returns true if the ServerHealth is in a stable, passing state
// according to the given AutopilotConfig
func (h *ServerHealth) IsStable(now time.Time, conf *AutopilotConfig) bool {
if h == nil {
return false
}
if !h.Healthy {
return false
}
if now.Sub(h.StableSince) < conf.ServerStabilizationTime {
return false
}
return true
}
// ServerStats holds miscellaneous Raft metrics for a server
type ServerStats struct {
// LastContact is the time since this node's last contact with the leader.
LastContact string
// LastTerm is the highest leader term this server has a record of in its Raft log.
LastTerm uint64
// LastIndex is the last log index this server has a record of in its Raft log.
LastIndex uint64
}
// OperatorHealthReply is a representation of the overall health of the cluster
type OperatorHealthReply struct {
// Healthy is true if all the servers in the cluster are healthy.
Healthy bool
// FailureTolerance is the number of healthy servers that could be lost without
// an outage occurring.
FailureTolerance int
// Servers holds the health of each server.
Servers []ServerHealth
}

View File

@@ -1,257 +0,0 @@
package structs
// QueryDatacenterOptions sets options about how we fail over if there are no
// healthy nodes in the local datacenter.
type QueryDatacenterOptions struct {
// NearestN is set to the number of remote datacenters to try, based on
// network coordinates.
NearestN int
// Datacenters is a fixed list of datacenters to try after NearestN. We
// never try a datacenter multiple times, so those are subtracted from
// this list before proceeding.
Datacenters []string
}
// QueryDNSOptions controls settings when query results are served over DNS.
type QueryDNSOptions struct {
// TTL is the time to live for the served DNS results.
TTL string
}
// ServiceQuery is used to query for a set of healthy nodes offering a specific
// service.
type ServiceQuery struct {
// Service is the service to query.
Service string
// Failover controls what we do if there are no healthy nodes in the
// local datacenter.
Failover QueryDatacenterOptions
// If OnlyPassing is true then we will only include nodes with passing
// health checks (critical AND warning checks will cause a node to be
// discarded)
OnlyPassing bool
// Near allows the query to always prefer the node nearest the given
// node. If the node does not exist, results are returned in their
// normal randomly-shuffled order. Supplying the magic "_agent" value
// is supported to sort near the agent which initiated the request.
Near string
// Tags are a set of required and/or disallowed tags. If a tag is in
// this list it must be present. If the tag is preceded with "!" then
// it is disallowed.
Tags []string
// NodeMeta is a map of required node metadata fields. If a key/value
// pair is in this map it must be present on the node in order for the
// service entry to be returned.
NodeMeta map[string]string
}
const (
// QueryTemplateTypeNamePrefixMatch uses the Name field of the query as
// a prefix to select the template.
QueryTemplateTypeNamePrefixMatch = "name_prefix_match"
)
// QueryTemplateOptions controls settings if this query is a template.
type QueryTemplateOptions struct {
// Type, if non-empty, means that this query is a template. This is
// set to one of the QueryTemplateType* constants above.
Type string
// Regexp is an optional regular expression to use to parse the full
// name, once the prefix match has selected a template. This can be
// used to extract parts of the name and choose a service name, set
// tags, etc.
Regexp string
}
// PreparedQuery defines a complete prepared query, and is the structure we
// maintain in the state store.
type PreparedQuery struct {
// ID is this UUID-based ID for the query, always generated by Consul.
ID string
// Name is an optional friendly name for the query supplied by the
// user. NOTE - if this feature is used then it will reduce the security
// of any read ACL associated with this query/service since this name
// can be used to locate nodes with supplying any ACL.
Name string
// Session is an optional session to tie this query's lifetime to. If
// this is omitted then the query will not expire.
Session string
// Token is the ACL token used when the query was created, and it is
// used when a query is subsequently executed. This token, or a token
// with management privileges, must be used to change the query later.
Token string
// Template is used to configure this query as a template, which will
// respond to queries based on the Name, and then will be rendered
// before it is executed.
Template QueryTemplateOptions
// Service defines a service query (leaving things open for other types
// later).
Service ServiceQuery
// DNS has options that control how the results of this query are
// served over DNS.
DNS QueryDNSOptions
RaftIndex
}
// GetACLPrefix returns the prefix to look up the prepared_query ACL policy for
// this query, and whether the prefix applies to this query. You always need to
// check the ok value before using the prefix.
func (pq *PreparedQuery) GetACLPrefix() (string, bool) {
if pq.Name != "" || pq.Template.Type != "" {
return pq.Name, true
}
return "", false
}
type PreparedQueries []*PreparedQuery
type IndexedPreparedQueries struct {
Queries PreparedQueries
QueryMeta
}
type PreparedQueryOp string
const (
PreparedQueryCreate PreparedQueryOp = "create"
PreparedQueryUpdate PreparedQueryOp = "update"
PreparedQueryDelete PreparedQueryOp = "delete"
)
// QueryRequest is used to create or change prepared queries.
type PreparedQueryRequest struct {
// Datacenter is the target this request is intended for.
Datacenter string
// Op is the operation to apply.
Op PreparedQueryOp
// Query is the query itself.
Query *PreparedQuery
// WriteRequest holds the ACL token to go along with this request.
WriteRequest
}
// RequestDatacenter returns the datacenter for a given request.
func (q *PreparedQueryRequest) RequestDatacenter() string {
return q.Datacenter
}
// PreparedQuerySpecificRequest is used to get information about a prepared
// query.
type PreparedQuerySpecificRequest struct {
// Datacenter is the target this request is intended for.
Datacenter string
// QueryID is the ID of a query.
QueryID string
// QueryOptions (unfortunately named here) controls the consistency
// settings for the query lookup itself, as well as the service lookups.
QueryOptions
}
// RequestDatacenter returns the datacenter for a given request.
func (q *PreparedQuerySpecificRequest) RequestDatacenter() string {
return q.Datacenter
}
// PreparedQueryExecuteRequest is used to execute a prepared query.
type PreparedQueryExecuteRequest struct {
// Datacenter is the target this request is intended for.
Datacenter string
// QueryIDOrName is the ID of a query _or_ the name of one, either can
// be provided.
QueryIDOrName string
// Limit will trim the resulting list down to the given limit.
Limit int
// Source is used to sort the results relative to a given node using
// network coordinates.
Source QuerySource
// Agent is used to carry around a reference to the agent which initiated
// the execute request. Used to distance-sort relative to the local node.
Agent QuerySource
// QueryOptions (unfortunately named here) controls the consistency
// settings for the query lookup itself, as well as the service lookups.
QueryOptions
}
// RequestDatacenter returns the datacenter for a given request.
func (q *PreparedQueryExecuteRequest) RequestDatacenter() string {
return q.Datacenter
}
// PreparedQueryExecuteRemoteRequest is used when running a local query in a
// remote datacenter.
type PreparedQueryExecuteRemoteRequest struct {
// Datacenter is the target this request is intended for.
Datacenter string
// Query is a copy of the query to execute. We have to ship the entire
// query over since it won't be present in the remote state store.
Query PreparedQuery
// Limit will trim the resulting list down to the given limit.
Limit int
// QueryOptions (unfortunately named here) controls the consistency
// settings for the the service lookups.
QueryOptions
}
// RequestDatacenter returns the datacenter for a given request.
func (q *PreparedQueryExecuteRemoteRequest) RequestDatacenter() string {
return q.Datacenter
}
// PreparedQueryExecuteResponse has the results of executing a query.
type PreparedQueryExecuteResponse struct {
// Service is the service that was queried.
Service string
// Nodes has the nodes that were output by the query.
Nodes CheckServiceNodes
// DNS has the options for serving these results over DNS.
DNS QueryDNSOptions
// Datacenter is the datacenter that these results came from.
Datacenter string
// Failovers is a count of how many times we had to query a remote
// datacenter.
Failovers int
// QueryMeta has freshness information about the query.
QueryMeta
}
// PreparedQueryExplainResponse has the results when explaining a query/
type PreparedQueryExplainResponse struct {
// Query has the fully-rendered query.
Query PreparedQuery
// QueryMeta has freshness information about the query.
QueryMeta
}

View File

@@ -1,40 +0,0 @@
package structs
type SnapshotOp int
const (
SnapshotSave SnapshotOp = iota
SnapshotRestore
)
// SnapshotRequest is used as a header for a snapshot RPC request. This will
// precede any streaming data that's part of the request and is JSON-encoded on
// the wire.
type SnapshotRequest struct {
// Datacenter is the target datacenter for this request. The request
// will be forwarded if necessary.
Datacenter string
// Token is the ACL token to use for the operation. If ACLs are enabled
// then all operations require a management token.
Token string
// If set, any follower can service the request. Results may be
// arbitrarily stale. Only applies to SnapshotSave.
AllowStale bool
// Op is the operation code for the RPC.
Op SnapshotOp
}
// SnapshotResponse is used header for a snapshot RPC response. This will
// precede any streaming data that's part of the request and is JSON-encoded on
// the wire.
type SnapshotResponse struct {
// Error is the overall error status of the RPC request.
Error string
// QueryMeta has freshness information about the server that handled the
// request. It is only filled in for a SnapshotSave.
QueryMeta
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,85 +0,0 @@
package structs
import (
"fmt"
)
// TxnKVOp is used to define a single operation on the KVS inside a
// transaction
type TxnKVOp struct {
Verb KVSOp
DirEnt DirEntry
}
// TxnKVResult is used to define the result of a single operation on the KVS
// inside a transaction.
type TxnKVResult *DirEntry
// TxnOp is used to define a single operation inside a transaction. Only one
// of the types should be filled out per entry.
type TxnOp struct {
KV *TxnKVOp
}
// TxnOps is a list of operations within a transaction.
type TxnOps []*TxnOp
// TxnRequest is used to apply multiple operations to the state store in a
// single transaction
type TxnRequest struct {
Datacenter string
Ops TxnOps
WriteRequest
}
func (r *TxnRequest) RequestDatacenter() string {
return r.Datacenter
}
// TxnReadRequest is used as a fast path for read-only transactions that don't
// modify the state store.
type TxnReadRequest struct {
Datacenter string
Ops TxnOps
QueryOptions
}
func (r *TxnReadRequest) RequestDatacenter() string {
return r.Datacenter
}
// TxnError is used to return information about an error for a specific
// operation.
type TxnError struct {
OpIndex int
What string
}
// Error returns the string representation of an atomic error.
func (e TxnError) Error() string {
return fmt.Sprintf("op %d: %s", e.OpIndex, e.What)
}
// TxnErrors is a list of TxnError entries.
type TxnErrors []*TxnError
// TxnResult is used to define the result of a given operation inside a
// transaction. Only one of the types should be filled out per entry.
type TxnResult struct {
KV TxnKVResult
}
// TxnResults is a list of TxnResult entries.
type TxnResults []*TxnResult
// TxnResponse is the structure returned by a TxnRequest.
type TxnResponse struct {
Results TxnResults
Errors TxnErrors
}
// TxnReadResponse is the structure returned by a TxnReadRequest.
type TxnReadResponse struct {
TxnResponse
QueryMeta
}

27
vendor/github.com/hashicorp/consul/lib/eof.go generated vendored Normal file
View File

@@ -0,0 +1,27 @@
package lib
import (
"io"
"strings"
"github.com/hashicorp/yamux"
)
var yamuxStreamClosed = yamux.ErrStreamClosed.Error()
var yamuxSessionShutdown = yamux.ErrSessionShutdown.Error()
// IsErrEOF returns true if we get an EOF error from the socket itself, or
// an EOF equivalent error from yamux.
func IsErrEOF(err error) bool {
if err == io.EOF {
return true
}
errStr := err.Error()
if strings.Contains(errStr, yamuxStreamClosed) ||
strings.Contains(errStr, yamuxSessionShutdown) {
return true
}
return false
}

View File

@@ -18,6 +18,39 @@ func ComputeDistance(a *coordinate.Coordinate, b *coordinate.Coordinate) float64
return a.DistanceTo(b).Seconds()
}
// CoordinateSet holds all the coordinates for a given node, indexed by network
// segment name.
type CoordinateSet map[string]*coordinate.Coordinate
// Intersect tries to return a pair of coordinates which are compatible with the
// current set and a given set. We employ some special knowledge about network
// segments to avoid doing a full intersection, since this is in several hot
// paths. This might return nil for either coordinate in the output pair if an
// intersection cannot be found. The ComputeDistance function above is designed
// to deal with that.
func (cs CoordinateSet) Intersect(other CoordinateSet) (*coordinate.Coordinate, *coordinate.Coordinate) {
// Use the empty segment by default.
segment := ""
// If we have a single segment, then let our segment take priority since
// we are possibly a client. Any node with more than one segment can only
// be a server, which means it should be in all segments.
if len(cs) == 1 {
for s, _ := range cs {
segment = s
}
}
// Likewise for the other set.
if len(other) == 1 {
for s, _ := range other {
segment = s
}
}
return cs[segment], other[segment]
}
// GenerateCoordinate creates a new coordinate with the given distance from the
// origin. This should only be used for tests.
func GenerateCoordinate(rtt time.Duration) *coordinate.Coordinate {

View File

@@ -0,0 +1,36 @@
package porter
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
)
var DefaultAddr = "127.0.0.1:7965"
func RandomPorts(n int) ([]int, error) {
addr := os.Getenv("PORTER_ADDR")
if addr == "" {
b, err := ioutil.ReadFile("/tmp/porter.addr")
if err == nil {
addr = string(b)
}
}
if addr == "" {
addr = DefaultAddr
}
resp, err := http.Get(fmt.Sprintf("http://%s/%d", addr, n))
if err != nil {
return nil, err
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var p []int
err = json.Unmarshal(data, &p)
return p, err
}

View File

@@ -58,6 +58,14 @@ type TestAddressConfig struct {
HTTP string `json:"http,omitempty"`
}
// TestNetworkSegment contains the configuration for a network segment.
type TestNetworkSegment struct {
Name string `json:"name"`
Bind string `json:"bind"`
Port int `json:"port"`
Advertise string `json:"advertise"`
}
// TestServerConfig is the main server configuration struct.
type TestServerConfig struct {
NodeName string `json:"node_name"`
@@ -68,6 +76,7 @@ type TestServerConfig struct {
Server bool `json:"server,omitempty"`
DataDir string `json:"data_dir,omitempty"`
Datacenter string `json:"datacenter,omitempty"`
Segments []TestNetworkSegment `json:"segments"`
DisableCheckpoint bool `json:"disable_update_check"`
LogLevel string `json:"log_level,omitempty"`
Bind string `json:"bind_addr,omitempty"`
@@ -123,7 +132,7 @@ func defaultServerConfig() *TestServerConfig {
SerfLan: randomPort(),
SerfWan: randomPort(),
Server: randomPort(),
RPC: randomPort(),
//RPC: randomPort(),
},
ReadyTimeout: 10 * time.Second,
}

View File

@@ -1,39 +0,0 @@
# Consul `types` Package
The Go language has a strong type system built into the language. The
`types` package corrals named types into a single package that is terminal in
`go`'s import graph. The `types` package should not have any downstream
dependencies. Each subsystem that defines its own set of types exists in its
own file, but all types are defined in the same package.
# Why
> Everything should be made as simple as possible, but not simpler.
`string` is a useful container and underlying type for identifiers, however
the `string` type is effectively opaque to the compiler in terms of how a
given string is intended to be used. For instance, there is nothing
preventing the following from happening:
```go
// `map` of Widgets, looked up by ID
var widgetLookup map[string]*Widget
// ...
var widgetID string = "widgetID"
w, found := widgetLookup[widgetID]
// Bad!
var widgetName string = "name of widget"
w, found := widgetLookup[widgetName]
```
but this class of problem is entirely preventable:
```go
type WidgetID string
var widgetLookup map[WidgetID]*Widget
var widgetName
```
TL;DR: intentions and idioms aren't statically checked by compilers. The
`types` package uses Go's strong type system to prevent this class of bug.

View File

@@ -1,9 +0,0 @@
package types
// AreaID is a strongly-typed string used to uniquely represent a network area,
// which is a relationship between Consul servers.
type AreaID string
// This represents the existing WAN area that's built in to Consul. Consul
// Enterprise generalizes areas, which are represented with UUIDs.
const AreaWAN AreaID = "wan"

View File

@@ -1,5 +0,0 @@
package types
// CheckID is a strongly typed string used to uniquely represent a Consul
// Check on an Agent (a CheckID is not globally unique).
type CheckID string

View File

@@ -1,4 +0,0 @@
package types
// NodeID is a unique identifier for a node across space and time.
type NodeID string

46
vendor/vendor.json vendored
View File

@@ -749,46 +749,34 @@
"revisionTime": "2017-10-03T21:31:50Z"
},
{
"checksumSHA1": "jfELEMRhiTcppZmRH+ZwtkVS5Uw=",
"path": "github.com/hashicorp/consul/acl",
"revision": "75ca2cace08e38de8af1731ee8614d0533d5a4d4",
"revisionTime": "2017-08-10T00:46:41Z"
},
{
"checksumSHA1": "AgdPCR9/+M0kkVpaeZ0PnATxfSc=",
"checksumSHA1": "XLfcIX2qpRr0o26aFMjCOzvw6jo=",
"path": "github.com/hashicorp/consul/api",
"revision": "75ca2cace08e38de8af1731ee8614d0533d5a4d4",
"revisionTime": "2017-08-10T00:46:41Z"
"revision": "51ea240df8476e02215d53fbfad5838bf0d44d21",
"revisionTime": "2017-10-16T16:22:40Z"
},
{
"checksumSHA1": "Z1N3jX/5B7GbLNfNp5GTxrsJItc=",
"path": "github.com/hashicorp/consul/consul/structs",
"revision": "e9ca44d0a1757ac9aecc6785904a701936c10e4a",
"revisionTime": "2017-04-17T18:01:43Z"
},
{
"checksumSHA1": "X3kV+a3rz+kw6SJupDVjHmUEUCQ=",
"checksumSHA1": "tU6mtqrRxFn0gqqRC35JhD0y2rE=",
"path": "github.com/hashicorp/consul/lib",
"revision": "75ca2cace08e38de8af1731ee8614d0533d5a4d4",
"revisionTime": "2017-08-10T00:46:41Z"
"revision": "51ea240df8476e02215d53fbfad5838bf0d44d21",
"revisionTime": "2017-10-16T16:22:40Z"
},
{
"checksumSHA1": "++0PVBxbpylmllyCxSa7cdc6dDc=",
"checksumSHA1": "lLvSnIuLwqn3ZbX1H4dVtzKuRdU=",
"path": "github.com/hashicorp/consul/test/porter",
"revision": "51ea240df8476e02215d53fbfad5838bf0d44d21",
"revisionTime": "2017-10-16T16:22:40Z"
},
{
"checksumSHA1": "AZh3opzONxPrOi+4Io2+OomdKgM=",
"path": "github.com/hashicorp/consul/testutil",
"revision": "75ca2cace08e38de8af1731ee8614d0533d5a4d4",
"revisionTime": "2017-08-10T00:46:41Z"
"revision": "745fd38728f28061c9f5bfe323d68ee25f2cd07b",
"revisionTime": "2017-10-16T23:19:35Z"
},
{
"checksumSHA1": "J8TTDc84MvAyXE/FrfgS+xc/b6s=",
"path": "github.com/hashicorp/consul/testutil/retry",
"revision": "75ca2cace08e38de8af1731ee8614d0533d5a4d4",
"revisionTime": "2017-08-10T00:46:41Z"
},
{
"checksumSHA1": "bYK/7DsyTM3YDjvc0RRUH4I+jic=",
"path": "github.com/hashicorp/consul/types",
"revision": "75ca2cace08e38de8af1731ee8614d0533d5a4d4",
"revisionTime": "2017-08-10T00:46:41Z"
"revision": "745fd38728f28061c9f5bfe323d68ee25f2cd07b",
"revisionTime": "2017-10-16T23:19:35Z"
},
{
"path": "github.com/hashicorp/errwrap",