diff --git a/go.mod b/go.mod
index db277759e..ca0ebf5dd 100644
--- a/go.mod
+++ b/go.mod
@@ -7,17 +7,19 @@ replace (
github.com/NYTimes/gziphandler => github.com/NYTimes/gziphandler v1.0.0
github.com/apparentlymart/go-textseg/v12 => github.com/apparentlymart/go-textseg/v12 v12.0.0
github.com/godbus/dbus => github.com/godbus/dbus v5.0.1+incompatible
+ github.com/golang/protobuf => github.com/golang/protobuf v1.3.4
github.com/hashicorp/nomad/api => ./api
github.com/kr/pty => github.com/kr/pty v1.1.5
github.com/shirou/gopsutil => github.com/hashicorp/gopsutil v2.18.13-0.20200531184148-5aca383d4f9d+incompatible
- github.com/golang/protobuf => github.com/golang/protobuf v1.3.4
)
require (
cloud.google.com/go/storage v1.0.0 // indirect
- contrib.go.opencensus.io/exporter/ocagent v0.4.12 // indirect
github.com/Azure/azure-sdk-for-go v29.0.0+incompatible // indirect
- github.com/Azure/go-autorest v11.7.1+incompatible // indirect
+ github.com/Azure/go-autorest/autorest v0.11.4 // indirect
+ github.com/Azure/go-autorest/autorest/azure/auth v0.5.1 // indirect
+ github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
+ github.com/Azure/go-autorest/autorest/validation v0.3.0 // indirect
github.com/LK4D4/joincontext v0.0.0-20171026170139-1724345da6d5
github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873
github.com/Microsoft/hcsshim v0.8.8-0.20200312192636-fd0797d766b1 // indirect
@@ -50,8 +52,7 @@ require (
github.com/google/go-cmp v0.4.0
github.com/gorilla/websocket v1.4.2
github.com/grpc-ecosystem/go-grpc-middleware v1.2.1-0.20200228141219-3ce3d519df39
- github.com/grpc-ecosystem/grpc-gateway v1.9.0 // indirect
- github.com/hashicorp/consul v1.7.1-0.20200213195527-b137060630b4
+ github.com/hashicorp/consul v1.7.7
github.com/hashicorp/consul-template v0.24.1
github.com/hashicorp/consul/api v1.4.1-0.20200730220852-12f574c9de39
github.com/hashicorp/consul/sdk v0.5.0
@@ -96,7 +97,7 @@ require (
github.com/mitchellh/colorstring v0.0.0-20150917214807-8631ce90f286
github.com/mitchellh/copystructure v1.0.0
github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b
- github.com/mitchellh/go-testing-interface v1.0.0
+ github.com/mitchellh/go-testing-interface v1.0.3
github.com/mitchellh/hashstructure v1.0.0
github.com/mitchellh/mapstructure v1.3.1
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
@@ -120,7 +121,7 @@ require (
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
github.com/zclconf/go-cty v1.4.1
go.opencensus.io v0.22.1-0.20190713072201-b4a14686f0a9 // indirect
- golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37
+ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136 // indirect
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f // indirect
golang.org/x/mod v0.3.0 // indirect
diff --git a/go.sum b/go.sum
index c0330ad6a..df11d7326 100644
--- a/go.sum
+++ b/go.sum
@@ -17,8 +17,6 @@ cloud.google.com/go/pubsub v1.0.1 h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/storage v1.0.0 h1:VV2nUM3wwLLGh9lSABFgZMjInyUbJeaRSE64WuAIQ+4=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc=
-contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA=
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
@@ -33,9 +31,33 @@ github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7O
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest v10.7.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest v10.15.3+incompatible h1:nhKI/bvazIs3C3TFGoSqKY6hZ8f5od5mb5/UcS6HVIY=
github.com/Azure/go-autorest v10.15.3+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest v11.7.1+incompatible h1:M2YZIajBBVekV86x0rr1443Lc1F/Ylxb9w+5EtSyX3Q=
-github.com/Azure/go-autorest v11.7.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest/autorest v0.11.0/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
+github.com/Azure/go-autorest/autorest v0.11.4 h1:iWJqGEvip7mjibEqC/srXNdo+4wLEPiwlP/7dZLtoPc=
+github.com/Azure/go-autorest/autorest v0.11.4/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
+github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
+github.com/Azure/go-autorest/autorest/adal v0.9.2 h1:Aze/GQeAN1RRbGmnUJvUj+tFGBzFdIg3293/A9rbxC4=
+github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE=
+github.com/Azure/go-autorest/autorest/azure/auth v0.5.1 h1:bvUhZciHydpBxBmCheUgxxbSwJy7xcfjkUsjUcqSojc=
+github.com/Azure/go-autorest/autorest/azure/auth v0.5.1/go.mod h1:ea90/jvmnAwDrSooLH4sRIehEPtG/EPUXavDh31MnA4=
+github.com/Azure/go-autorest/autorest/azure/cli v0.4.0 h1:Ml+UCrnlKD+cJmSzrZ/RDcDw86NjkRUpnFh7V5JUhzU=
+github.com/Azure/go-autorest/autorest/azure/cli v0.4.0/go.mod h1:JljT387FplPzBA31vUcvsetLKF3pec5bdAxjVU4kI2s=
+github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
+github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
+github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
+github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
+github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
+github.com/Azure/go-autorest/autorest/validation v0.3.0 h1:3I9AAI63HfcLtphd9g39ruUwRI+Ca+z/f36KHPFRUss=
+github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E=
+github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE=
+github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
+github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
@@ -52,8 +74,6 @@ github.com/NVIDIA/gpu-monitoring-tools v0.0.0-20180829222009-86f2a9fac6c5/go.mod
github.com/NYTimes/gziphandler v1.0.0 h1:OswZCvpiFsNRCbeapdJxDuikAqVXTgV7XAht8S9olZo=
github.com/NYTimes/gziphandler v1.0.0/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
-github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
-github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14=
@@ -65,7 +85,6 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
-github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3 h1:ZSTrOEhiM5J5RFxEaFvMZVEAM1KvT1YzbEOwB2EAGjA=
github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM=
github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0=
@@ -104,7 +123,6 @@ github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
-github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
@@ -119,6 +137,8 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D
github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533 h1:8wZizuKuZVu5COB7EsBYxBQz8nRcXXn5d4Gt91eJLvU=
+github.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/container-storage-interface/spec v1.2.0-rc1.0.20191021210849-a33ece0a8a9f h1:m2LYF3fo9IPapVt5FGRVw5bJPmlWqWIezB0jkQh03Zo=
github.com/container-storage-interface/spec v1.2.0-rc1.0.20191021210849-a33ece0a8a9f/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s=
@@ -143,6 +163,7 @@ github.com/containernetworking/cni v0.7.2-0.20190612152420-dc953e2fd91f h1:zLuuw
github.com/containernetworking/cni v0.7.2-0.20190612152420-dc953e2fd91f/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/plugins v0.7.3-0.20190501191748-2d6d46d308b2 h1:WT1OeJOkmYxVP5/e3P+/MVJ1ftii14o80u0N9aK2tCg=
github.com/containernetworking/plugins v0.7.3-0.20190501191748-2d6d46d308b2/go.mod h1:dagHaAhNjXjT9QYOklkKJDGaQPTg4pf//FrUcJeb7FU=
+github.com/coredns/coredns v1.1.2 h1:bAFHrSsBeTeRG5W3Nf2su3lUGw7Npw2UKeCJm/3A638=
github.com/coredns/coredns v1.1.2/go.mod h1:zASH/MVDgR6XZTbxvOnsZfffS+31vg6Ackf/wo1+AM0=
github.com/coreos/go-iptables v0.4.3-0.20190724151750-969b135e941d h1:9n0vuQxZw5yuleXDNqGLSw3o5s6vZJJH36ZSU7626zw=
github.com/coreos/go-iptables v0.4.3-0.20190724151750-969b135e941d/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
@@ -203,17 +224,16 @@ github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNE
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
-github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
-github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/elazarl/go-bindata-assetfs v0.0.0-20160803192304-e1a2a7ec64b0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
github.com/elazarl/go-bindata-assetfs v1.0.1-0.20200509193318-234c15e7648f h1:AwZUiMWfYSmIiHdFJIubTSs8BFIFoMmUFbeuwBzHIPs=
github.com/elazarl/go-bindata-assetfs v1.0.1-0.20200509193318-234c15e7648f/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
github.com/endocrimes/go-winio v0.4.13-0.20190628114223-fb47a8b41948 h1:PgcXIRC45Fcvl4hQeHRzyGsDebslp0j+CXYtMgr3COM=
github.com/endocrimes/go-winio v0.4.13-0.20190628114223-fb47a8b41948/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
-github.com/envoyproxy/go-control-plane v0.8.0/go.mod h1:GSSbY9P1neVhdY7G4wu+IK1rk/dqhiCC/4ExuWJZVuk=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/envoyproxy/go-control-plane v0.9.5 h1:lRJIqDD8yjV1YyPRqecMdytjDLs2fTXq363aCib5xPU=
+github.com/envoyproxy/go-control-plane v0.9.5/go.mod h1:OXl5to++W0ctG+EHWTFUjiypVxC/Y4VLc/KFU+al13s=
+github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
@@ -228,6 +248,7 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4
github.com/fsouza/go-dockerclient v1.6.5 h1:vuFDnPcds3LvTWGYb9h0Rty14FLgkjHZdwLDROCdgsw=
github.com/fsouza/go-dockerclient v1.6.5/go.mod h1:GOdftxWLWIbIWKbIMDroKFJzPdg6Iw7r+jX1DDZdVsA=
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
+github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
@@ -248,10 +269,9 @@ github.com/godbus/dbus v5.0.1+incompatible h1:fsDsnr/6MFSIm3kl6JJpq5pH+vO/rA5jUu
github.com/godbus/dbus v5.0.1+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/godbus/dbus/v5 v5.0.3 h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
+github.com/gogo/googleapis v1.2.0 h1:Z0v3OJDotX9ZBpdz2V+AI7F4fITSZhVE5mg6GQppwMM=
github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
@@ -262,22 +282,8 @@ github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200j
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@@ -293,6 +299,7 @@ github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:od
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
+github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
@@ -305,29 +312,26 @@ github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE0
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g=
github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/gophercloud/gophercloud v0.0.0-20180828235145-f29afc2cceca h1:wobTb8SE189AuxzEKClyYxiI4nUGWlpVtl13eLiFlOE=
github.com/gophercloud/gophercloud v0.0.0-20180828235145-f29afc2cceca/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4=
github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
-github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.2.1-0.20200228141219-3ce3d519df39 h1:MqvH60+R2JhSdvVgGxmExOndrkRQtGW7w4+gcrymN64=
github.com/grpc-ecosystem/go-grpc-middleware v1.2.1-0.20200228141219-3ce3d519df39/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s=
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
-github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0 h1:bM6ZAFZmc/wPFaRDi0d5L7hGEZEx/2u+Tmr2evNHDiI=
-github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/hashicorp/consul v1.7.1-0.20200213195527-b137060630b4 h1:KO2Xiprv+MgQ75yyakCf3m62u4UMS3C+C68Oa+f47EA=
-github.com/hashicorp/consul v1.7.1-0.20200213195527-b137060630b4/go.mod h1:vKfXmSQNl6HwO/JqQ2DDLzisBDV49y+JVTkrdW1cnSU=
+github.com/hashicorp/consul v1.7.7 h1:jQsGzHkPYslqgDVSsJCdPOJSsdzaZl2upfVPxhn3lmg=
+github.com/hashicorp/consul v1.7.7/go.mod h1:urbfGaVZDmnXC6geg0LYPh/SRUk1E8nfmDHpz+Q0nLw=
github.com/hashicorp/consul-template v0.24.1 h1:96zTJ5YOq4HMTgtehXRvzGoQNEG2Z4jBYY5ofhq8/Cc=
github.com/hashicorp/consul-template v0.24.1/go.mod h1:KcTEopo2kCp7kww0d4oG7d3oX2Uou4hzb1Rs/wY9TVI=
github.com/hashicorp/consul/api v1.2.0/go.mod h1:1SIkFYi2ZTXUE5Kgt179+4hH33djo11+0Eo2XgTAtkw=
@@ -345,6 +349,7 @@ github.com/hashicorp/cronexpr v1.1.0/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-bexpr v0.1.2 h1:ijMXI4qERbzxbCnkxmfUtwMyjrrk3y+Vt0MxojNCbBs=
github.com/hashicorp/go-bexpr v0.1.2/go.mod h1:ANbpTX1oAql27TZkKVeW8p1w8NTdnyzPe/0qqPCKohU=
github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de h1:XDCSythtg8aWSRSO29uwhgh7b127fWr+m5SemqjSUL8=
github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de/go.mod h1:xIwEieBHERyEvaeKF/TcHh1Hu+lxPM+n2vT1+g9I4m4=
@@ -385,6 +390,7 @@ github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+
github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
github.com/hashicorp/go-plugin v1.0.2-0.20191004171845-809113480b55 h1:XzRWU4VSJBqGVxl6tWD+6ITHftMhvRDKKyp1WVSXAhY=
github.com/hashicorp/go-plugin v1.0.2-0.20191004171845-809113480b55/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
+github.com/hashicorp/go-raftchunking v0.6.1 h1:moEnaG3gcwsWNyIBJoD5PCByE+Ewkqxh6N05CT+MbwA=
github.com/hashicorp/go-raftchunking v0.6.1/go.mod h1:cGlg3JtDy7qy6c/3Bu660Mic1JF+7lWqIwCFSb08fX0=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-retryablehttp v0.5.4 h1:1BZvpawXoJCWX6pNtow9+rpEj+3itIlutiqnntI6jOE=
@@ -419,6 +425,7 @@ github.com/hashicorp/hcl v1.0.1-0.20191016231534-914dc3f8dd7c h1:PdZEHcpa3117kJ1
github.com/hashicorp/hcl v1.0.1-0.20191016231534-914dc3f8dd7c/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/hcl/v2 v2.5.1 h1:5ytFZykUu2/4U59ogd2f+XZdi9+6oC/Tv5WzsH6fIDA=
github.com/hashicorp/hcl/v2 v2.5.1/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY=
+github.com/hashicorp/hil v0.0.0-20160711231837-1e86c6b523c5 h1:uk280DXEbQiCOZgCOI3elFSeNxf8YIZiNsbr2pQLYD0=
github.com/hashicorp/hil v0.0.0-20160711231837-1e86c6b523c5/go.mod h1:KHvg/R2/dPtaePb16oW4qIyzkMxXOL38xjRN64adsts=
github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
@@ -427,8 +434,6 @@ github.com/hashicorp/mdns v1.0.1 h1:XFSOubp8KWB+Jd2PDyaX5xUd5bhSP/+pTDZVDMzZJM8=
github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
-github.com/hashicorp/memberlist v0.1.6 h1:ouPxvwKYaNZe+eTcHxYP0EblPduVLvIPycul+vv8his=
-github.com/hashicorp/memberlist v0.1.6/go.mod h1:5VDNHjqFMgEcclnwmkCnC99IPwxBmIsxwY8qn+Nl0H4=
github.com/hashicorp/memberlist v0.2.2 h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g=
github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69 h1:lc3c72qGlIMDqQpQH82Y4vaglRMMFdJbziYWriR4UcE=
@@ -441,8 +446,6 @@ github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea h1:xykPFhrBA
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.8.3/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k=
-github.com/hashicorp/serf v0.8.5 h1:ZynDUIQiA8usmRgPdGPHFdPnb1wgGI9tK3mO9hcAJjc=
-github.com/hashicorp/serf v0.8.5/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k=
github.com/hashicorp/serf v0.9.3 h1:AVF6JDQQens6nMHT9OGERBvK0f8rPrAGILnsKLr6lzM=
github.com/hashicorp/serf v0.9.3/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q=
@@ -460,6 +463,7 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO
github.com/hpcloud/tail v1.0.1-0.20170814160653-37f427138745 h1:8as8OQ+RF1QrsHvWWsKBtBKINhD9QaD1iozA1wrO4aA=
github.com/hpcloud/tail v1.0.1-0.20170814160653-37f427138745/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ=
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 h1:rw3IAne6CDuVFlZbPOkA7bhxlqawFh7RJJ+CejfMaxE=
@@ -547,6 +551,8 @@ github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzO
github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/go-testing-interface v1.0.3 h1:gqwbsGvc0jbhAPW/26WfEoSiPANAVlR49AAVdvaTjI4=
+github.com/mitchellh/go-testing-interface v1.0.3/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
@@ -557,6 +563,7 @@ github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1D
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.2.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.3.1 h1:cCBH2gTD2K0OtLlv/Y5H01VQCqmlDxz30kS5Y5bqfLA=
github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
@@ -587,13 +594,10 @@ github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQ
github.com/oklog/run v1.0.1-0.20180308005104-6934b124db28 h1:R9vmquWCeGmxTHUVnTQJrU4oPlgEn9+x48nwXSqkIKg=
github.com/oklog/run v1.0.1-0.20180308005104-6934b124db28/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU=
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg=
@@ -617,12 +621,12 @@ github.com/opencontainers/selinux v1.6.0 h1:+bIAS/Za3q5FTwWym4fTB0vObnfCf3G/NC7K
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
-github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c h1:vwpFWvAO8DeIZfFeqASzZfsxuWPno9ncAebBEP0N3uE=
github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pierrec/lz4 v2.2.5+incompatible h1:xOYu2+sKj87pJz7V+I7260354UlcRyAZUGhMCToTzVw=
@@ -641,13 +645,11 @@ github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
-github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0 h1:YVIb/fVcOTMSqtqZWSKnHpSLBxu8DKgxq8z6RuBZwqI=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
@@ -655,7 +657,6 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
@@ -663,15 +664,14 @@ github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
-github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/rboyer/safeio v0.2.1 h1:05xhhdRNAdS3apYm7JRjOqngf4xruaW959jmRxGDuSU=
+github.com/rboyer/safeio v0.2.1/go.mod h1:Cq/cEPK+YXFn622lsQ0K4KsPZSPtaptHHEldsy7Fmig=
github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 h1:Wdi9nwnhFNAlseAOekn6B5G/+GMtks9UKbvRU/CMM/o=
github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03/go.mod h1:gRAiPF5C5Nd0eyyRdqIu9qTiFSoZzpTq727b5B8fkkU=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rs/cors v0.0.0-20170801073201-eabcc6af4bbe h1:fPLXIFSIvTs99QtPSCsRITbv01v7MsPN7wpuYtD8ebI=
github.com/rs/cors v0.0.0-20170801073201-eabcc6af4bbe/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
@@ -739,6 +739,7 @@ github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKv
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -786,8 +787,6 @@ github.com/zclconf/go-cty v1.4.1 h1:Xzr4m4utRDhHDifag1onwwUSq32HLoLBsp+w6tD0880=
github.com/zclconf/go-cty v1.4.1/go.mod h1:nHzOclRkoj++EU9ZjSrZvRG0BXIWt8c7loYc0qXAFGQ=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
-go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
-go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.1-0.20190713072201-b4a14686f0a9 h1:7LiVwYOeGhrZmChB6cSFzXlk3v0aRNA28kOEygIK9mw=
@@ -810,8 +809,8 @@ golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191106202628-ed6320f186d4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 h1:cg5LA/zNPRzIXIWSCxQW10Rvpy94aQh3LT/ShoCpkHw=
-golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -847,7 +846,6 @@ golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -886,9 +884,7 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -900,7 +896,6 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190508220229-2d0786266e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -972,7 +967,6 @@ google.golang.org/api v0.0.0-20180829000535-087779f1d2c9/go.mod h1:4mhQ8q/RsB7i+
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
-google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
@@ -999,6 +993,7 @@ google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRn
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51 h1:Ex1mq5jaJof+kRnYi3SlYJ8KKa9Ao3NHyIT5XJ1gF6U=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20200302123026-7795fca6ccb1 h1:RYJIKMPLUCjLP+fEg9ygjxF3KjfSHN4BSZw91aecq6U=
google.golang.org/genproto v0.0.0-20200302123026-7795fca6ccb1/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
@@ -1007,22 +1002,16 @@ google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmE
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
@@ -1037,6 +1026,7 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/resty.v1 v1.12.0 h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
@@ -1047,7 +1037,6 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/tomb.v2 v2.0.0-20140626144623-14b3d72120e8 h1:EQ3aCG3c3nkUNxx6quE0Ux47RYExj7cJyRMxUXqPf6I=
gopkg.in/tomb.v2 v2.0.0-20140626144623-14b3d72120e8/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -1068,11 +1057,13 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-istio.io/gogo-genproto v0.0.0-20190124151557-6d926a6e6feb/go.mod h1:eIDJ6jNk/IeJz6ODSksHl5Aiczy5JUq6vFhJWI5OtiI=
k8s.io/api v0.0.0-20180806132203-61b11ee65332/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=
+k8s.io/api v0.0.0-20190325185214-7544f9db76f6 h1:9MWtbqhwTyDvF4cS1qAhxDb9Mi8taXiAu+5nEacl7gY=
k8s.io/api v0.0.0-20190325185214-7544f9db76f6/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=
k8s.io/apimachinery v0.0.0-20180821005732-488889b0007f/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
+k8s.io/apimachinery v0.0.0-20190223001710-c182ff3b9841 h1:Q4RZrHNtlC/mSdC1sTrcZ5RchC/9vxLVj57pWiCBKv4=
k8s.io/apimachinery v0.0.0-20190223001710-c182ff3b9841/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
+k8s.io/client-go v8.0.0+incompatible h1:tTI4hRmb1DRMl4fG6Vclfdi6nTM82oIrTT7HfitmxC4=
k8s.io/client-go v8.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml b/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml
deleted file mode 100644
index ee417bbe6..000000000
--- a/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-language: go
-
-go:
- - 1.11.x
-
-go_import_path: contrib.go.opencensus.io/exporter/ocagent
-
-before_script:
- - GO_FILES=$(find . -iname '*.go' | grep -v /vendor/) # All the .go files, excluding vendor/ if any
- - PKGS=$(go list ./... | grep -v /vendor/) # All the import paths, excluding vendor/ if any
-
-script:
- - go build ./... # Ensure dependency updates don't break build
- - if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "gofmt the following files:"; gofmt -s -l $GO_FILES; exit 1; fi
- - go vet ./...
- - GO111MODULE=on go test -v -race $PKGS # Run all the tests with the race detector enabled
- - GO111MODULE=off go test -v -race $PKGS # Make sure tests still pass when not using Go modules.
- - 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi'
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md b/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md
deleted file mode 100644
index 0786fdf43..000000000
--- a/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md
+++ /dev/null
@@ -1,24 +0,0 @@
-# How to contribute
-
-We'd love to accept your patches and contributions to this project. There are
-just a few small guidelines you need to follow.
-
-## Contributor License Agreement
-
-Contributions to this project must be accompanied by a Contributor License
-Agreement. You (or your employer) retain the copyright to your contribution,
-this simply gives us permission to use and redistribute your contributions as
-part of the project. Head over to to see
-your current agreements on file or to sign a new one.
-
-You generally only need to submit a CLA once, so if you've already submitted one
-(even if it was for a different project), you probably don't need to do it
-again.
-
-## Code reviews
-
-All submissions, including submissions by project members, require review. We
-use GitHub pull requests for this purpose. Consult [GitHub Help] for more
-information on using pull requests.
-
-[GitHub Help]: https://help.github.com/articles/about-pull-requests/
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md b/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md
deleted file mode 100644
index 3b9e908f5..000000000
--- a/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md
+++ /dev/null
@@ -1,61 +0,0 @@
-# OpenCensus Agent Go Exporter
-
-[![Build Status][travis-image]][travis-url] [![GoDoc][godoc-image]][godoc-url]
-
-
-This repository contains the Go implementation of the OpenCensus Agent (OC-Agent) Exporter.
-OC-Agent is a deamon process running in a VM that can retrieve spans/stats/metrics from
-OpenCensus Library, export them to other backends and possibly push configurations back to
-Library. See more details on [OC-Agent Readme][OCAgentReadme].
-
-Note: This is an experimental repository and is likely to get backwards-incompatible changes.
-Ultimately we may want to move the OC-Agent Go Exporter to [OpenCensus Go core library][OpenCensusGo].
-
-## Installation
-
-```bash
-$ go get -u contrib.go.opencensus.io/exporter/ocagent
-```
-
-## Usage
-
-```go
-import (
- "context"
- "fmt"
- "log"
- "time"
-
- "contrib.go.opencensus.io/exporter/ocagent"
- "go.opencensus.io/trace"
-)
-
-func Example() {
- exp, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithServiceName("your-service-name"))
- if err != nil {
- log.Fatalf("Failed to create the agent exporter: %v", err)
- }
- defer exp.Stop()
-
- // Now register it as a trace exporter.
- trace.RegisterExporter(exp)
-
- // Then use the OpenCensus tracing library, like we normally would.
- ctx, span := trace.StartSpan(context.Background(), "AgentExporter-Example")
- defer span.End()
-
- for i := 0; i < 10; i++ {
- _, iSpan := trace.StartSpan(ctx, fmt.Sprintf("Sample-%d", i))
- <-time.After(6 * time.Millisecond)
- iSpan.End()
- }
-}
-```
-
-[OCAgentReadme]: https://github.com/census-instrumentation/opencensus-proto/tree/master/opencensus/proto/agent#opencensus-agent-proto
-[OpenCensusGo]: https://github.com/census-instrumentation/opencensus-go
-[godoc-image]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent?status.svg
-[godoc-url]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent
-[travis-image]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent.svg?branch=master
-[travis-url]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent
-
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go
deleted file mode 100644
index 297e44b6e..000000000
--- a/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ocagent
-
-import (
- "math/rand"
- "time"
-)
-
-var randSrc = rand.New(rand.NewSource(time.Now().UnixNano()))
-
-// retries function fn upto n times, if fn returns an error lest it returns nil early.
-// It applies exponential backoff in units of (1< 0 {
- ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers))
- }
- traceExporter, err := traceSvcClient.Export(ctx)
- if err != nil {
- return fmt.Errorf("Exporter.Start:: TraceServiceClient: %v", err)
- }
-
- firstTraceMessage := &agenttracepb.ExportTraceServiceRequest{
- Node: node,
- Resource: ae.resource,
- }
- if err := traceExporter.Send(firstTraceMessage); err != nil {
- return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err)
- }
-
- ae.mu.Lock()
- ae.traceExporter = traceExporter
- ae.mu.Unlock()
-
- // Initiate the config service by sending over node identifier info.
- configStream, err := traceSvcClient.Config(context.Background())
- if err != nil {
- return fmt.Errorf("Exporter.Start:: ConfigStream: %v", err)
- }
- firstCfgMessage := &agenttracepb.CurrentLibraryConfig{Node: node}
- if err := configStream.Send(firstCfgMessage); err != nil {
- return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err)
- }
-
- // In the background, handle trace configurations that are beamed down
- // by the agent, but also reply to it with the applied configuration.
- go ae.handleConfigStreaming(configStream)
-
- return nil
-}
-
-func (ae *Exporter) createMetricsServiceConnection(cc *grpc.ClientConn, node *commonpb.Node) error {
- metricsSvcClient := agentmetricspb.NewMetricsServiceClient(cc)
- metricsExporter, err := metricsSvcClient.Export(context.Background())
- if err != nil {
- return fmt.Errorf("MetricsExporter: failed to start the service client: %v", err)
- }
- // Initiate the metrics service by sending over the first message just containing the Node and Resource.
- firstMetricsMessage := &agentmetricspb.ExportMetricsServiceRequest{
- Node: node,
- Resource: ae.resource,
- }
- if err := metricsExporter.Send(firstMetricsMessage); err != nil {
- return fmt.Errorf("MetricsExporter:: failed to send the first message: %v", err)
- }
-
- ae.mu.Lock()
- ae.metricsExporter = metricsExporter
- ae.mu.Unlock()
-
- // With that we are good to go and can start sending metrics
- return nil
-}
-
-func (ae *Exporter) dialToAgent() (*grpc.ClientConn, error) {
- addr := ae.prepareAgentAddress()
- var dialOpts []grpc.DialOption
- if ae.clientTransportCredentials != nil {
- dialOpts = append(dialOpts, grpc.WithTransportCredentials(ae.clientTransportCredentials))
- } else if ae.canDialInsecure {
- dialOpts = append(dialOpts, grpc.WithInsecure())
- }
- if ae.compressor != "" {
- dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(ae.compressor)))
- }
- dialOpts = append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{}))
-
- ctx := context.Background()
- if len(ae.headers) > 0 {
- ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers))
- }
- return grpc.DialContext(ctx, addr, dialOpts...)
-}
-
-func (ae *Exporter) handleConfigStreaming(configStream agenttracepb.TraceService_ConfigClient) error {
- // Note: We haven't yet implemented configuration sending so we
- // should NOT be changing connection states within this function for now.
- for {
- recv, err := configStream.Recv()
- if err != nil {
- // TODO: Check if this is a transient error or exponential backoff-able.
- return err
- }
- cfg := recv.Config
- if cfg == nil {
- continue
- }
-
- // Otherwise now apply the trace configuration sent down from the agent
- if psamp := cfg.GetProbabilitySampler(); psamp != nil {
- trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(psamp.SamplingProbability)})
- } else if csamp := cfg.GetConstantSampler(); csamp != nil {
- alwaysSample := csamp.Decision == tracepb.ConstantSampler_ALWAYS_ON
- if alwaysSample {
- trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
- } else {
- trace.ApplyConfig(trace.Config{DefaultSampler: trace.NeverSample()})
- }
- } else { // TODO: Add the rate limiting sampler here
- }
-
- // Then finally send back to upstream the newly applied configuration
- err = configStream.Send(&agenttracepb.CurrentLibraryConfig{Config: &tracepb.TraceConfig{Sampler: cfg.Sampler}})
- if err != nil {
- return err
- }
- }
-}
-
-// Stop shuts down all the connections and resources
-// related to the exporter.
-func (ae *Exporter) Stop() error {
- ae.mu.RLock()
- cc := ae.grpcClientConn
- started := ae.started
- stopped := ae.stopped
- ae.mu.RUnlock()
-
- if !started {
- return errNotStarted
- }
- if stopped {
- // TODO: tell the user that we've already stopped, so perhaps a sentinel error?
- return nil
- }
-
- ae.Flush()
-
- // Now close the underlying gRPC connection.
- var err error
- if cc != nil {
- err = cc.Close()
- }
-
- // At this point we can change the state variables: started and stopped
- ae.mu.Lock()
- ae.started = false
- ae.stopped = true
- ae.mu.Unlock()
- close(ae.stopCh)
-
- // Ensure that the backgroundConnector returns
- <-ae.backgroundConnectionDoneCh
-
- return err
-}
-
-func (ae *Exporter) ExportSpan(sd *trace.SpanData) {
- if sd == nil {
- return
- }
- _ = ae.traceBundler.Add(sd, 1)
-}
-
-func (ae *Exporter) ExportTraceServiceRequest(batch *agenttracepb.ExportTraceServiceRequest) error {
- if batch == nil || len(batch.Spans) == 0 {
- return nil
- }
-
- select {
- case <-ae.stopCh:
- return errStopped
-
- default:
- if !ae.connected() {
- return errNoConnection
- }
-
- ae.senderMu.Lock()
- err := ae.traceExporter.Send(batch)
- ae.senderMu.Unlock()
- if err != nil {
- ae.setStateDisconnected()
- return err
- }
- return nil
- }
-}
-
-func (ae *Exporter) ExportView(vd *view.Data) {
- if vd == nil {
- return
- }
- _ = ae.viewDataBundler.Add(vd, 1)
-}
-
-func ocSpanDataToPbSpans(sdl []*trace.SpanData) []*tracepb.Span {
- if len(sdl) == 0 {
- return nil
- }
- protoSpans := make([]*tracepb.Span, 0, len(sdl))
- for _, sd := range sdl {
- if sd != nil {
- protoSpans = append(protoSpans, ocSpanToProtoSpan(sd))
- }
- }
- return protoSpans
-}
-
-func (ae *Exporter) uploadTraces(sdl []*trace.SpanData) {
- select {
- case <-ae.stopCh:
- return
-
- default:
- if !ae.connected() {
- return
- }
-
- protoSpans := ocSpanDataToPbSpans(sdl)
- if len(protoSpans) == 0 {
- return
- }
- ae.senderMu.Lock()
- err := ae.traceExporter.Send(&agenttracepb.ExportTraceServiceRequest{
- Spans: protoSpans,
- })
- ae.senderMu.Unlock()
- if err != nil {
- ae.setStateDisconnected()
- }
- }
-}
-
-func ocViewDataToPbMetrics(vdl []*view.Data) []*metricspb.Metric {
- if len(vdl) == 0 {
- return nil
- }
- metrics := make([]*metricspb.Metric, 0, len(vdl))
- for _, vd := range vdl {
- if vd != nil {
- vmetric, err := viewDataToMetric(vd)
- // TODO: (@odeke-em) somehow report this error, if it is non-nil.
- if err == nil && vmetric != nil {
- metrics = append(metrics, vmetric)
- }
- }
- }
- return metrics
-}
-
-func (ae *Exporter) uploadViewData(vdl []*view.Data) {
- select {
- case <-ae.stopCh:
- return
-
- default:
- if !ae.connected() {
- return
- }
-
- protoMetrics := ocViewDataToPbMetrics(vdl)
- if len(protoMetrics) == 0 {
- return
- }
- err := ae.metricsExporter.Send(&agentmetricspb.ExportMetricsServiceRequest{
- Metrics: protoMetrics,
- // TODO:(@odeke-em)
- // a) Figure out how to derive a Node from the environment
- // b) Figure out how to derive a Resource from the environment
- // or better letting users of the exporter configure it.
- })
- if err != nil {
- ae.setStateDisconnected()
- }
- }
-}
-
-func (ae *Exporter) Flush() {
- ae.traceBundler.Flush()
- ae.viewDataBundler.Flush()
-}
-
-func resourceProtoFromEnv() *resourcepb.Resource {
- rs, _ := resource.FromEnv(context.Background())
- if rs == nil {
- return nil
- }
-
- rprs := &resourcepb.Resource{
- Type: rs.Type,
- }
- if rs.Labels != nil {
- rprs.Labels = make(map[string]string)
- for k, v := range rs.Labels {
- rprs.Labels[k] = v
- }
- }
- return rprs
-}
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go
deleted file mode 100644
index 3e05ae8b3..000000000
--- a/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ocagent
-
-import (
- "time"
-
- "google.golang.org/grpc/credentials"
-)
-
-const (
- DefaultAgentPort uint16 = 55678
- DefaultAgentHost string = "localhost"
-)
-
-type ExporterOption interface {
- withExporter(e *Exporter)
-}
-
-type insecureGrpcConnection int
-
-var _ ExporterOption = (*insecureGrpcConnection)(nil)
-
-func (igc *insecureGrpcConnection) withExporter(e *Exporter) {
- e.canDialInsecure = true
-}
-
-// WithInsecure disables client transport security for the exporter's gRPC connection
-// just like grpc.WithInsecure() https://godoc.org/google.golang.org/grpc#WithInsecure
-// does. Note, by default, client security is required unless WithInsecure is used.
-func WithInsecure() ExporterOption { return new(insecureGrpcConnection) }
-
-type addressSetter string
-
-func (as addressSetter) withExporter(e *Exporter) {
- e.agentAddress = string(as)
-}
-
-var _ ExporterOption = (*addressSetter)(nil)
-
-// WithAddress allows one to set the address that the exporter will
-// connect to the agent on. If unset, it will instead try to use
-// connect to DefaultAgentHost:DefaultAgentPort
-func WithAddress(addr string) ExporterOption {
- return addressSetter(addr)
-}
-
-type serviceNameSetter string
-
-func (sns serviceNameSetter) withExporter(e *Exporter) {
- e.serviceName = string(sns)
-}
-
-var _ ExporterOption = (*serviceNameSetter)(nil)
-
-// WithServiceName allows one to set/override the service name
-// that the exporter will report to the agent.
-func WithServiceName(serviceName string) ExporterOption {
- return serviceNameSetter(serviceName)
-}
-
-type reconnectionPeriod time.Duration
-
-func (rp reconnectionPeriod) withExporter(e *Exporter) {
- e.reconnectionPeriod = time.Duration(rp)
-}
-
-func WithReconnectionPeriod(rp time.Duration) ExporterOption {
- return reconnectionPeriod(rp)
-}
-
-type compressorSetter string
-
-func (c compressorSetter) withExporter(e *Exporter) {
- e.compressor = string(c)
-}
-
-// UseCompressor will set the compressor for the gRPC client to use when sending requests.
-// It is the responsibility of the caller to ensure that the compressor set has been registered
-// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some
-// compressors auto-register on import, such as gzip, which can be registered by calling
-// `import _ "google.golang.org/grpc/encoding/gzip"`
-func UseCompressor(compressorName string) ExporterOption {
- return compressorSetter(compressorName)
-}
-
-type headerSetter map[string]string
-
-func (h headerSetter) withExporter(e *Exporter) {
- e.headers = map[string]string(h)
-}
-
-// WithHeaders will send the provided headers when the gRPC stream connection
-// is instantiated
-func WithHeaders(headers map[string]string) ExporterOption {
- return headerSetter(headers)
-}
-
-type clientCredentials struct {
- credentials.TransportCredentials
-}
-
-var _ ExporterOption = (*clientCredentials)(nil)
-
-// WithTLSCredentials allows the connection to use TLS credentials
-// when talking to the server. It takes in grpc.TransportCredentials instead
-// of say a Certificate file or a tls.Certificate, because the retrieving
-// these credentials can be done in many ways e.g. plain file, in code tls.Config
-// or by certificate rotation, so it is up to the caller to decide what to use.
-func WithTLSCredentials(creds credentials.TransportCredentials) ExporterOption {
- return &clientCredentials{TransportCredentials: creds}
-}
-
-func (cc *clientCredentials) withExporter(e *Exporter) {
- e.clientTransportCredentials = cc.TransportCredentials
-}
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go
deleted file mode 100644
index 983ebe7b7..000000000
--- a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go
+++ /dev/null
@@ -1,248 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ocagent
-
-import (
- "math"
- "time"
-
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/tracestate"
-
- tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
- "github.com/golang/protobuf/ptypes/timestamp"
-)
-
-const (
- maxAnnotationEventsPerSpan = 32
- maxMessageEventsPerSpan = 128
-)
-
-func ocSpanToProtoSpan(sd *trace.SpanData) *tracepb.Span {
- if sd == nil {
- return nil
- }
- var namePtr *tracepb.TruncatableString
- if sd.Name != "" {
- namePtr = &tracepb.TruncatableString{Value: sd.Name}
- }
- return &tracepb.Span{
- TraceId: sd.TraceID[:],
- SpanId: sd.SpanID[:],
- ParentSpanId: sd.ParentSpanID[:],
- Status: ocStatusToProtoStatus(sd.Status),
- StartTime: timeToTimestamp(sd.StartTime),
- EndTime: timeToTimestamp(sd.EndTime),
- Links: ocLinksToProtoLinks(sd.Links),
- Kind: ocSpanKindToProtoSpanKind(sd.SpanKind),
- Name: namePtr,
- Attributes: ocAttributesToProtoAttributes(sd.Attributes),
- TimeEvents: ocTimeEventsToProtoTimeEvents(sd.Annotations, sd.MessageEvents),
- Tracestate: ocTracestateToProtoTracestate(sd.Tracestate),
- }
-}
-
-var blankStatus trace.Status
-
-func ocStatusToProtoStatus(status trace.Status) *tracepb.Status {
- if status == blankStatus {
- return nil
- }
- return &tracepb.Status{
- Code: status.Code,
- Message: status.Message,
- }
-}
-
-func ocLinksToProtoLinks(links []trace.Link) *tracepb.Span_Links {
- if len(links) == 0 {
- return nil
- }
-
- sl := make([]*tracepb.Span_Link, 0, len(links))
- for _, ocLink := range links {
- // This redefinition is necessary to prevent ocLink.*ID[:] copies
- // being reused -- in short we need a new ocLink per iteration.
- ocLink := ocLink
-
- sl = append(sl, &tracepb.Span_Link{
- TraceId: ocLink.TraceID[:],
- SpanId: ocLink.SpanID[:],
- Type: ocLinkTypeToProtoLinkType(ocLink.Type),
- })
- }
-
- return &tracepb.Span_Links{
- Link: sl,
- }
-}
-
-func ocLinkTypeToProtoLinkType(oct trace.LinkType) tracepb.Span_Link_Type {
- switch oct {
- case trace.LinkTypeChild:
- return tracepb.Span_Link_CHILD_LINKED_SPAN
- case trace.LinkTypeParent:
- return tracepb.Span_Link_PARENT_LINKED_SPAN
- default:
- return tracepb.Span_Link_TYPE_UNSPECIFIED
- }
-}
-
-func ocAttributesToProtoAttributes(attrs map[string]interface{}) *tracepb.Span_Attributes {
- if len(attrs) == 0 {
- return nil
- }
- outMap := make(map[string]*tracepb.AttributeValue)
- for k, v := range attrs {
- switch v := v.(type) {
- case bool:
- outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_BoolValue{BoolValue: v}}
-
- case int:
- outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: int64(v)}}
-
- case int64:
- outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: v}}
-
- case string:
- outMap[k] = &tracepb.AttributeValue{
- Value: &tracepb.AttributeValue_StringValue{
- StringValue: &tracepb.TruncatableString{Value: v},
- },
- }
- }
- }
- return &tracepb.Span_Attributes{
- AttributeMap: outMap,
- }
-}
-
-// This code is mostly copied from
-// https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/blob/master/trace_proto.go#L46
-func ocTimeEventsToProtoTimeEvents(as []trace.Annotation, es []trace.MessageEvent) *tracepb.Span_TimeEvents {
- if len(as) == 0 && len(es) == 0 {
- return nil
- }
-
- timeEvents := &tracepb.Span_TimeEvents{}
- var annotations, droppedAnnotationsCount int
- var messageEvents, droppedMessageEventsCount int
-
- // Transform annotations
- for i, a := range as {
- if annotations >= maxAnnotationEventsPerSpan {
- droppedAnnotationsCount = len(as) - i
- break
- }
- annotations++
- timeEvents.TimeEvent = append(timeEvents.TimeEvent,
- &tracepb.Span_TimeEvent{
- Time: timeToTimestamp(a.Time),
- Value: transformAnnotationToTimeEvent(&a),
- },
- )
- }
-
- // Transform message events
- for i, e := range es {
- if messageEvents >= maxMessageEventsPerSpan {
- droppedMessageEventsCount = len(es) - i
- break
- }
- messageEvents++
- timeEvents.TimeEvent = append(timeEvents.TimeEvent,
- &tracepb.Span_TimeEvent{
- Time: timeToTimestamp(e.Time),
- Value: transformMessageEventToTimeEvent(&e),
- },
- )
- }
-
- // Process dropped counter
- timeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount)
- timeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount)
-
- return timeEvents
-}
-
-func transformAnnotationToTimeEvent(a *trace.Annotation) *tracepb.Span_TimeEvent_Annotation_ {
- return &tracepb.Span_TimeEvent_Annotation_{
- Annotation: &tracepb.Span_TimeEvent_Annotation{
- Description: &tracepb.TruncatableString{Value: a.Message},
- Attributes: ocAttributesToProtoAttributes(a.Attributes),
- },
- }
-}
-
-func transformMessageEventToTimeEvent(e *trace.MessageEvent) *tracepb.Span_TimeEvent_MessageEvent_ {
- return &tracepb.Span_TimeEvent_MessageEvent_{
- MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{
- Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType),
- Id: uint64(e.MessageID),
- UncompressedSize: uint64(e.UncompressedByteSize),
- CompressedSize: uint64(e.CompressedByteSize),
- },
- }
-}
-
-// clip32 clips an int to the range of an int32.
-func clip32(x int) int32 {
- if x < math.MinInt32 {
- return math.MinInt32
- }
- if x > math.MaxInt32 {
- return math.MaxInt32
- }
- return int32(x)
-}
-
-func timeToTimestamp(t time.Time) *timestamp.Timestamp {
- nanoTime := t.UnixNano()
- return ×tamp.Timestamp{
- Seconds: nanoTime / 1e9,
- Nanos: int32(nanoTime % 1e9),
- }
-}
-
-func ocSpanKindToProtoSpanKind(kind int) tracepb.Span_SpanKind {
- switch kind {
- case trace.SpanKindClient:
- return tracepb.Span_CLIENT
- case trace.SpanKindServer:
- return tracepb.Span_SERVER
- default:
- return tracepb.Span_SPAN_KIND_UNSPECIFIED
- }
-}
-
-func ocTracestateToProtoTracestate(ts *tracestate.Tracestate) *tracepb.Span_Tracestate {
- if ts == nil {
- return nil
- }
- return &tracepb.Span_Tracestate{
- Entries: ocTracestateEntriesToProtoTracestateEntries(ts.Entries()),
- }
-}
-
-func ocTracestateEntriesToProtoTracestateEntries(entries []tracestate.Entry) []*tracepb.Span_Tracestate_Entry {
- protoEntries := make([]*tracepb.Span_Tracestate_Entry, 0, len(entries))
- for _, entry := range entries {
- protoEntries = append(protoEntries, &tracepb.Span_Tracestate_Entry{
- Key: entry.Key,
- Value: entry.Value,
- })
- }
- return protoEntries
-}
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go
deleted file mode 100644
index 43f18dec1..000000000
--- a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go
+++ /dev/null
@@ -1,274 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ocagent
-
-import (
- "errors"
- "time"
-
- "go.opencensus.io/stats"
- "go.opencensus.io/stats/view"
- "go.opencensus.io/tag"
-
- "github.com/golang/protobuf/ptypes/timestamp"
-
- metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
-)
-
-var (
- errNilMeasure = errors.New("expecting a non-nil stats.Measure")
- errNilView = errors.New("expecting a non-nil view.View")
- errNilViewData = errors.New("expecting a non-nil view.Data")
-)
-
-func viewDataToMetric(vd *view.Data) (*metricspb.Metric, error) {
- if vd == nil {
- return nil, errNilViewData
- }
-
- descriptor, err := viewToMetricDescriptor(vd.View)
- if err != nil {
- return nil, err
- }
-
- timeseries, err := viewDataToTimeseries(vd)
- if err != nil {
- return nil, err
- }
-
- metric := &metricspb.Metric{
- MetricDescriptor: descriptor,
- Timeseries: timeseries,
- }
- return metric, nil
-}
-
-func viewToMetricDescriptor(v *view.View) (*metricspb.MetricDescriptor, error) {
- if v == nil {
- return nil, errNilView
- }
- if v.Measure == nil {
- return nil, errNilMeasure
- }
-
- desc := &metricspb.MetricDescriptor{
- Name: stringOrCall(v.Name, v.Measure.Name),
- Description: stringOrCall(v.Description, v.Measure.Description),
- Unit: v.Measure.Unit(),
- Type: aggregationToMetricDescriptorType(v),
- LabelKeys: tagKeysToLabelKeys(v.TagKeys),
- }
- return desc, nil
-}
-
-func stringOrCall(first string, call func() string) string {
- if first != "" {
- return first
- }
- return call()
-}
-
-type measureType uint
-
-const (
- measureUnknown measureType = iota
- measureInt64
- measureFloat64
-)
-
-func measureTypeFromMeasure(m stats.Measure) measureType {
- switch m.(type) {
- default:
- return measureUnknown
- case *stats.Float64Measure:
- return measureFloat64
- case *stats.Int64Measure:
- return measureInt64
- }
-}
-
-func aggregationToMetricDescriptorType(v *view.View) metricspb.MetricDescriptor_Type {
- if v == nil || v.Aggregation == nil {
- return metricspb.MetricDescriptor_UNSPECIFIED
- }
- if v.Measure == nil {
- return metricspb.MetricDescriptor_UNSPECIFIED
- }
-
- switch v.Aggregation.Type {
- case view.AggTypeCount:
- // Cumulative on int64
- return metricspb.MetricDescriptor_CUMULATIVE_INT64
-
- case view.AggTypeDistribution:
- // Cumulative types
- return metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION
-
- case view.AggTypeLastValue:
- // Gauge types
- switch measureTypeFromMeasure(v.Measure) {
- case measureFloat64:
- return metricspb.MetricDescriptor_GAUGE_DOUBLE
- case measureInt64:
- return metricspb.MetricDescriptor_GAUGE_INT64
- }
-
- case view.AggTypeSum:
- // Cumulative types
- switch measureTypeFromMeasure(v.Measure) {
- case measureFloat64:
- return metricspb.MetricDescriptor_CUMULATIVE_DOUBLE
- case measureInt64:
- return metricspb.MetricDescriptor_CUMULATIVE_INT64
- }
- }
-
- // For all other cases, return unspecified.
- return metricspb.MetricDescriptor_UNSPECIFIED
-}
-
-func tagKeysToLabelKeys(tagKeys []tag.Key) []*metricspb.LabelKey {
- labelKeys := make([]*metricspb.LabelKey, 0, len(tagKeys))
- for _, tagKey := range tagKeys {
- labelKeys = append(labelKeys, &metricspb.LabelKey{
- Key: tagKey.Name(),
- })
- }
- return labelKeys
-}
-
-func viewDataToTimeseries(vd *view.Data) ([]*metricspb.TimeSeries, error) {
- if vd == nil || len(vd.Rows) == 0 {
- return nil, nil
- }
-
- // Given that view.Data only contains Start, End
- // the timestamps for all the row data will be the exact same
- // per aggregation. However, the values will differ.
- // Each row has its own tags.
- startTimestamp := timeToProtoTimestamp(vd.Start)
- endTimestamp := timeToProtoTimestamp(vd.End)
-
- mType := measureTypeFromMeasure(vd.View.Measure)
- timeseries := make([]*metricspb.TimeSeries, 0, len(vd.Rows))
- // It is imperative that the ordering of "LabelValues" matches those
- // of the Label keys in the metric descriptor.
- for _, row := range vd.Rows {
- labelValues := labelValuesFromTags(row.Tags)
- point := rowToPoint(vd.View, row, endTimestamp, mType)
- timeseries = append(timeseries, &metricspb.TimeSeries{
- StartTimestamp: startTimestamp,
- LabelValues: labelValues,
- Points: []*metricspb.Point{point},
- })
- }
-
- if len(timeseries) == 0 {
- return nil, nil
- }
-
- return timeseries, nil
-}
-
-func timeToProtoTimestamp(t time.Time) *timestamp.Timestamp {
- unixNano := t.UnixNano()
- return ×tamp.Timestamp{
- Seconds: int64(unixNano / 1e9),
- Nanos: int32(unixNano % 1e9),
- }
-}
-
-func rowToPoint(v *view.View, row *view.Row, endTimestamp *timestamp.Timestamp, mType measureType) *metricspb.Point {
- pt := &metricspb.Point{
- Timestamp: endTimestamp,
- }
-
- switch data := row.Data.(type) {
- case *view.CountData:
- pt.Value = &metricspb.Point_Int64Value{Int64Value: data.Value}
-
- case *view.DistributionData:
- pt.Value = &metricspb.Point_DistributionValue{
- DistributionValue: &metricspb.DistributionValue{
- Count: data.Count,
- Sum: float64(data.Count) * data.Mean, // because Mean := Sum/Count
- // TODO: Add Exemplar
- Buckets: bucketsToProtoBuckets(data.CountPerBucket),
- BucketOptions: &metricspb.DistributionValue_BucketOptions{
- Type: &metricspb.DistributionValue_BucketOptions_Explicit_{
- Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{
- Bounds: v.Aggregation.Buckets,
- },
- },
- },
- SumOfSquaredDeviation: data.SumOfSquaredDev,
- }}
-
- case *view.LastValueData:
- setPointValue(pt, data.Value, mType)
-
- case *view.SumData:
- setPointValue(pt, data.Value, mType)
- }
-
- return pt
-}
-
-// Not returning anything from this function because metricspb.Point.is_Value is an unexported
-// interface hence we just have to set its value by pointer.
-func setPointValue(pt *metricspb.Point, value float64, mType measureType) {
- if mType == measureInt64 {
- pt.Value = &metricspb.Point_Int64Value{Int64Value: int64(value)}
- } else {
- pt.Value = &metricspb.Point_DoubleValue{DoubleValue: value}
- }
-}
-
-func bucketsToProtoBuckets(countPerBucket []int64) []*metricspb.DistributionValue_Bucket {
- distBuckets := make([]*metricspb.DistributionValue_Bucket, len(countPerBucket))
- for i := 0; i < len(countPerBucket); i++ {
- count := countPerBucket[i]
-
- distBuckets[i] = &metricspb.DistributionValue_Bucket{
- Count: count,
- }
- }
-
- return distBuckets
-}
-
-func labelValuesFromTags(tags []tag.Tag) []*metricspb.LabelValue {
- if len(tags) == 0 {
- return nil
- }
-
- labelValues := make([]*metricspb.LabelValue, 0, len(tags))
- for _, tag_ := range tags {
- labelValues = append(labelValues, &metricspb.LabelValue{
- Value: tag_.Value,
-
- // It is imperative that we set the "HasValue" attribute,
- // in order to distinguish missing a label from the empty string.
- // https://godoc.org/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1#LabelValue.HasValue
- //
- // OpenCensus-Go uses non-pointers for tags as seen by this function's arguments,
- // so the best case that we can use to distinguish missing labels/tags from the
- // empty string is by checking if the Tag.Key.Name() != "" to indicate that we have
- // a value.
- HasValue: tag_.Key.Name() != "",
- })
- }
- return labelValues
-}
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go
deleted file mode 100644
index 68be4c75b..000000000
--- a/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ocagent
-
-const Version = "0.0.1"
diff --git a/vendor/github.com/Azure/go-autorest/.gitignore b/vendor/github.com/Azure/go-autorest/.gitignore
new file mode 100644
index 000000000..3350aaf70
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/.gitignore
@@ -0,0 +1,32 @@
+# The standard Go .gitignore file follows. (Sourced from: github.com/github/gitignore/master/Go.gitignore)
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+.DS_Store
+.idea/
+.vscode/
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+
+# go-autorest specific
+vendor/
+autorest/azure/example/example
diff --git a/vendor/github.com/Azure/go-autorest/CHANGELOG.md b/vendor/github.com/Azure/go-autorest/CHANGELOG.md
new file mode 100644
index 000000000..d1f596bfc
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/CHANGELOG.md
@@ -0,0 +1,1004 @@
+# CHANGELOG
+
+## v14.2.0
+
+- Added package comment to make `github.com/Azure/go-autorest` importable.
+
+## v14.1.1
+
+### Bug Fixes
+
+- Change `x-ms-authorization-auxiliary` header value separator to comma.
+
+## v14.1.0
+
+### New Features
+
+- Added `azure.SetEnvironment()` that will update the global environments map with the specified values.
+
+## v14.0.1
+
+### Bug Fixes
+
+- Fix race condition when refreshing token.
+- Fixed some tests to work with Go 1.14.
+
+## v14.0.0
+
+## Breaking Changes
+
+- By default, the `DoRetryForStatusCodes` functions will no longer infinitely retry a request when the response returns an HTTP status code of 429 (StatusTooManyRequests). To opt in to the old behavior set `autorest.Count429AsRetry` to `false`.
+
+## New Features
+
+- Variable `autorest.Max429Delay` can be used to control the maximum delay between retries when a 429 is received with no `Retry-After` header. The default is zero which means there is no cap.
+
+## v13.4.0
+
+## New Features
+
+- Added field `SendDecorators` to the `Client` type. This can be used to specify a custom chain of SendDecorators per client.
+- Added method `Client.Send()` which includes logic for selecting the preferred chain of SendDecorators.
+
+## v13.3.3
+
+### Bug Fixes
+
+- Fixed connection leak when retrying requests.
+- Enabled exponential back-off with a 2-minute cap when retrying on 429.
+- Fixed some cases where errors were inadvertently dropped.
+
+## v13.3.2
+
+### Bug Fixes
+
+- Updated `autorest.AsStringSlice()` to convert slice elements to their string representation.
+
+## v13.3.1
+
+- Updated external dependencies.
+
+### Bug Fixes
+
+## v13.3.0
+
+### New Features
+
+- Added support for shared key and shared access signature token authorization.
+ - `autorest.NewSharedKeyAuthorizer()` and dependent types.
+ - `autorest.NewSASTokenAuthorizer()` and dependent types.
+- Added `ServicePrincipalToken.SetCustomRefresh()` so a custom refresh function can be invoked when a token has expired.
+
+### Bug Fixes
+
+- Fixed `cli.AccessTokensPath()` to respect `AZURE_CONFIG_DIR` when set.
+- Support parsing error messages in XML responses.
+
+## v13.2.0
+
+### New Features
+
+- Added the following functions to replace their versions that don't take a context.
+ - `adal.InitiateDeviceAuthWithContext()`
+ - `adal.CheckForUserCompletionWithContext()`
+ - `adal.WaitForUserCompletionWithContext()`
+
+## v13.1.0
+
+### New Features
+
+- Added support for MSI authentication on Azure App Service and Azure Functions.
+
+## v13.0.2
+
+### Bug Fixes
+
+- Always retry a request even if the sender returns a non-nil error.
+
+## v13.0.1
+
+## Bug Fixes
+
+- Fixed `autorest.WithQueryParameters()` so that it properly encodes multi-value query parameters.
+
+## v13.0.0
+
+## Breaking Changes
+
+The `tracing` package has been rewritten to provide a common interface for consumers to wire in the tracing package of their choice.
+What this means is that by default no tracing provider will be compiled into your program and setting the `AZURE_SDK_TRACING_ENABLED`
+environment variable will have no effect. To enable this previous behavior you must now add the following import to your source file.
+```go
+ import _ "github.com/Azure/go-autorest/tracing/opencensus"
+```
+The APIs required by autorest-generated code have remained but some APIs have been removed and new ones added.
+The following APIs and variables have been removed (the majority of them were moved to the `opencensus` package).
+- tracing.Transport
+- tracing.Enable()
+- tracing.EnableWithAIForwarding()
+- tracing.Disable()
+
+The following APIs and types have been added
+- tracing.Tracer
+- tracing.Register()
+
+To hook up a tracer simply call `tracing.Register()` passing in a type that satisfies the `tracing.Tracer` interface.
+
+## v12.4.3
+
+### Bug Fixes
+
+- `autorest.MultiTenantServicePrincipalTokenAuthorizer` will now properly add its auxiliary bearer tokens.
+
+## v12.4.2
+
+### Bug Fixes
+
+- Improvements to the fixes made in v12.4.1.
+ - Remove `override` stanza from Gopkg.toml and `replace` directive from go.mod as they don't apply when being consumed as a dependency.
+ - Switched to latest version of `ocagent` that still depends on protobuf v1.2.
+ - Add indirect dependencies to the `required` clause with matching `constraint` stanzas so that `dep` dependencies match go.sum.
+
+## v12.4.1
+
+### Bug Fixes
+
+- Updated OpenCensus and OCAgent versions to versions that don't depend on v1.3+ of protobuf as it was breaking kubernetes.
+- Pinned opencensus-proto to a version that's compatible with our versions of OpenCensus and OCAgent.
+
+## v12.4.0
+
+### New Features
+
+- Added `autorest.WithPrepareDecorators` and `autorest.GetPrepareDecorators` for adding and retrieving a custom chain of PrepareDecorators to the provided context.
+
+## v12.3.0
+
+### New Features
+
+- Support for multi-tenant via x-ms-authorization-auxiliary header has been added for client credentials with
+ secret scenario; this basically bundles multiple OAuthConfig and ServicePrincipalToken types into corresponding
+ MultiTenant* types along with a new authorizer that adds the primary and auxiliary token headers to the reqest.
+ The authenticaion helpers have been updated to support this scenario; if environment var AZURE_AUXILIARY_TENANT_IDS
+ is set with a semicolon delimited list of tenants the multi-tenant codepath will kick in to create the appropriate authorizer.
+ See `adal.NewMultiTenantOAuthConfig`, `adal.NewMultiTenantServicePrincipalToken` and `autorest.NewMultiTenantServicePrincipalTokenAuthorizer`
+ along with their supporting types and methods.
+- Added `autorest.WithSendDecorators` and `autorest.GetSendDecorators` for adding and retrieving a custom chain of SendDecorators to the provided context.
+- Added `autorest.DoRetryForStatusCodesWithCap` and `autorest.DelayForBackoffWithCap` to enforce an upper bound on the duration between retries.
+
+## v12.2.0
+
+### New Features
+
+- Added `autorest.WithXML`, `autorest.AsMerge`, `autorest.WithBytes` preparer decorators.
+- Added `autorest.ByUnmarshallingBytes` response decorator.
+- Added `Response.IsHTTPStatus` and `Response.HasHTTPStatus` helper methods for inspecting HTTP status code in `autorest.Response` types.
+
+### Bug Fixes
+
+- `autorest.DelayWithRetryAfter` now supports HTTP-Dates in the `Retry-After` header and is not limited to just 429 status codes.
+
+## v12.1.0
+
+### New Features
+
+- Added `to.ByteSlicePtr()`.
+- Added blob/queue storage resource ID to `azure.ResourceIdentifier`.
+
+## v12.0.0
+
+### Breaking Changes
+
+In preparation for modules the following deprecated content has been removed.
+
+ - async.NewFuture()
+ - async.Future.Done()
+ - async.Future.WaitForCompletion()
+ - async.DoPollForAsynchronous()
+ - The `utils` package
+ - validation.NewErrorWithValidationError()
+ - The `version` package
+
+## v11.9.0
+
+### New Features
+
+- Add `ResourceIdentifiers` field to `azure.Environment` containing resource IDs for public and sovereign clouds.
+
+## v11.8.0
+
+### New Features
+
+- Added `autorest.NewClientWithOptions()` to support endpoints that require free renegotiation.
+
+## v11.7.1
+
+### Bug Fixes
+
+- Fix missing support for http(s) proxy when using the default sender.
+
+## v11.7.0
+
+### New Features
+
+- Added methods to obtain a ServicePrincipalToken on the various credential configuration types in the `auth` package.
+
+## v11.6.1
+
+### Bug Fixes
+
+- Fix ACR DNS endpoint for government clouds.
+- Add Cosmos DB DNS endpoints.
+- Update dependencies to resolve build breaks in OpenCensus.
+
+## v11.6.0
+
+### New Features
+
+- Added type `autorest.BasicAuthorizer` to support Basic authentication.
+
+## v11.5.2
+
+### Bug Fixes
+
+- Fixed `GetTokenFromCLI` did not work with zsh.
+
+## v11.5.1
+
+### Bug Fixes
+
+- In `Client.sender()` set the minimum TLS version on HTTP clients to 1.2.
+
+## v11.5.0
+
+### New Features
+
+- The `auth` package has been refactored so that the environment and file settings are now available.
+- The methods used in `auth.NewAuthorizerFromEnvironment()` are now exported so that custom authorization chains can be created.
+- Added support for certificate authorization for file-based config.
+
+## v11.4.0
+
+### New Features
+
+- Added `adal.AddToUserAgent()` so callers can append custom data to the user-agent header used for ADAL requests.
+- Exported `adal.UserAgent()` for parity with `autorest.Client`.
+
+## v11.3.2
+
+### Bug Fixes
+
+- In `Future.WaitForCompletionRef()` if the provided context has a deadline don't add the default deadline.
+
+## v11.3.1
+
+### Bug Fixes
+
+- For an LRO PUT operation the final GET URL was incorrectly set to the Location polling header in some cases.
+
+## v11.3.0
+
+### New Features
+
+- Added method `ServicePrincipalToken()` to `DeviceFlowConfig` type.
+
+## v11.2.8
+
+### Bug Fixes
+
+- Deprecate content in the `version` package. The functionality has been superseded by content in the `autorest` package.
+
+## v11.2.7
+
+### Bug Fixes
+
+- Fix environment variable name for enabling tracing from `AZURE_SDK_TRACING_ENABELD` to `AZURE_SDK_TRACING_ENABLED`.
+ Note that for backward compatibility reasons, both will work until the next major version release of the package.
+
+## v11.2.6
+
+### Bug Fixes
+
+- If zero bytes are read from a polling response body don't attempt to unmarshal them.
+
+## v11.2.5
+
+### Bug Fixes
+
+- Removed race condition in `autorest.DoRetryForStatusCodes`.
+
+## v11.2.4
+
+### Bug Fixes
+
+- Function `cli.ProfilePath` now respects environment `AZURE_CONFIG_DIR` if available.
+
+## v11.2.1
+
+NOTE: Versions of Go prior to 1.10 have been removed from CI as they no
+longer work with golint.
+
+### Bug Fixes
+
+- Method `MSIConfig.Authorizer` now supports user-assigned identities.
+- The adal package now reports its own user-agent string.
+
+## v11.2.0
+
+### New Features
+
+- Added `tracing` package that enables instrumentation of HTTP and API calls.
+ Setting the env variable `AZURE_SDK_TRACING_ENABLED` or calling `tracing.Enable`
+ will start instrumenting the code for metrics and traces.
+ Additionally, setting the env variable `OCAGENT_TRACE_EXPORTER_ENDPOINT` or
+ calling `tracing.EnableWithAIForwarding` will start the instrumentation and connect to an
+ App Insights Local Forwarder that is needs to be running. Note that if the
+ AI Local Forwarder is not running tracking will still be enabled.
+ By default, instrumentation is disabled. Once enabled, instrumentation can also
+ be programatically disabled by calling `Disable`.
+- Added `DoneWithContext` call for checking LRO status. `Done` has been deprecated.
+
+### Bug Fixes
+
+- Don't use the initial request's context for LRO polling.
+- Don't override the `refreshLock` and the `http.Client` when unmarshalling `ServicePrincipalToken` if
+ it is already set.
+
+## v11.1.1
+
+### Bug Fixes
+
+- When creating a future always include the polling tracker even if there's a failure; this allows the underlying response to be obtained by the caller.
+
+## v11.1.0
+
+### New Features
+
+- Added `auth.NewAuthorizerFromCLI` to create an authorizer configured from the Azure 2.0 CLI.
+- Added `adal.NewOAuthConfigWithAPIVersion` to create an OAuthConfig with the specified API version.
+
+## v11.0.1
+
+### New Features
+
+- Added `x5c` header to client assertion for certificate Issuer+Subject Name authentication.
+
+## v11.0.0
+
+### Breaking Changes
+
+- To handle differences between ADFS and AAD the following fields have had their types changed from `string` to `json.Number`
+ - ExpiresIn
+ - ExpiresOn
+ - NotBefore
+
+### New Features
+
+- Added `auth.NewAuthorizerFromFileWithResource` to create an authorizer from the config file with the specified resource.
+- Setting a client's `PollingDuration` to zero will use the provided context to control a LRO's polling duration.
+
+## v10.15.5
+
+### Bug Fixes
+
+- In `DoRetryForStatusCodes`, if a request's context is cancelled return the last response.
+
+## v10.15.4
+
+### Bug Fixes
+
+- If a polling operation returns a failure status code return the associated error.
+
+## v10.15.3
+
+### Bug Fixes
+
+- Initialize the polling URL and method for an LRO tracker on each iteration, favoring the Azure-AsyncOperation header.
+
+## v10.15.2
+
+### Bug Fixes
+
+- Use fmt.Fprint when printing request/response so that any escape sequences aren't treated as format specifiers.
+
+## v10.15.1
+
+### Bug Fixes
+
+- If an LRO API returns a `Failed` provisioning state in the initial response return an error at that point so the caller doesn't have to poll.
+- For failed LROs without an OData v4 error include the response body in the error's `AdditionalInfo` field to aid in diagnosing the failure.
+
+## v10.15.0
+
+### New Features
+
+- Add initial support for request/response logging via setting environment variables.
+ Setting `AZURE_GO_SDK_LOG_LEVEL` to `LogInfo` will log request/response
+ without their bodies. To include the bodies set the log level to `LogDebug`.
+ By default the logger writes to strerr, however it can also write to stdout or a file
+ if specified in `AZURE_GO_SDK_LOG_FILE`. Note that if the specified file
+ already exists it will be truncated.
+ IMPORTANT: by default the logger will redact the Authorization and Ocp-Apim-Subscription-Key
+ headers. Any other secrets will _not_ be redacted.
+
+## v10.14.0
+
+### New Features
+
+- Added package version that contains version constants and user-agent data.
+
+### Bug Fixes
+
+- Add the user-agent to token requests.
+
+## v10.13.0
+
+- Added support for additionalInfo in ServiceError type.
+
+## v10.12.0
+
+### New Features
+
+- Added field ServicePrincipalToken.MaxMSIRefreshAttempts to configure the maximun number of attempts to refresh an MSI token.
+
+## v10.11.4
+
+### Bug Fixes
+
+- If an LRO returns http.StatusOK on the initial response with no async headers return the response body from Future.GetResult().
+- If there is no "final GET URL" return an error from Future.GetResult().
+
+## v10.11.3
+
+### Bug Fixes
+
+- In IMDS retry logic, if we don't receive a response don't retry.
+ - Renamed the retry function so it's clear it's meant for IMDS only.
+- For error response bodies that aren't OData-v4 compliant stick the raw JSON in the ServiceError.Details field so the information isn't lost.
+ - Also add the raw HTTP response to the DetailedResponse.
+- Removed superfluous wrapping of response error in azure.DoRetryWithRegistration().
+
+## v10.11.2
+
+### Bug Fixes
+
+- Validation for integers handles int and int64 types.
+
+## v10.11.1
+
+### Bug Fixes
+
+- Adding User information to authorization config as parsed from CLI cache.
+
+## v10.11.0
+
+### New Features
+
+- Added NewServicePrincipalTokenFromManualTokenSecret for creating a new SPT using a manual token and secret
+- Added method ServicePrincipalToken.MarshalTokenJSON() to marshall the inner Token
+
+## v10.10.0
+
+### New Features
+
+- Most ServicePrincipalTokens can now be marshalled/unmarshall to/from JSON (ServicePrincipalCertificateSecret and ServicePrincipalMSISecret are not supported).
+- Added method ServicePrincipalToken.SetRefreshCallbacks().
+
+## v10.9.2
+
+### Bug Fixes
+
+- Refreshing a refresh token obtained from a web app authorization code now works.
+
+## v10.9.1
+
+### Bug Fixes
+
+- The retry logic for MSI token requests now uses exponential backoff per the guidelines.
+- IsTemporaryNetworkError() will return true for errors that don't implement the net.Error interface.
+
+## v10.9.0
+
+### Deprecated Methods
+
+| Old Method | New Method |
+| -------------------------: | :---------------------------: |
+| azure.NewFuture() | azure.NewFutureFromResponse() |
+| Future.WaitForCompletion() | Future.WaitForCompletionRef() |
+
+### New Features
+
+- Added azure.NewFutureFromResponse() for creating a Future from the initial response from an async operation.
+- Added Future.GetResult() for making the final GET call to retrieve the result from an async operation.
+
+### Bug Fixes
+
+- Some futures failed to return their results, this should now be fixed.
+
+## v10.8.2
+
+### Bug Fixes
+
+- Add nil-gaurd to token retry logic.
+
+## v10.8.1
+
+### Bug Fixes
+
+- Return a TokenRefreshError if the sender fails on the initial request.
+- Don't retry on non-temporary network errors.
+
+## v10.8.0
+
+- Added NewAuthorizerFromEnvironmentWithResource() helper function.
+
+## v10.7.0
+
+### New Features
+
+- Added \*WithContext() methods to ADAL token refresh operations.
+
+## v10.6.2
+
+- Fixed a bug on device authentication.
+
+## v10.6.1
+
+- Added retries to MSI token get request.
+
+## v10.6.0
+
+- Changed MSI token implementation. Now, the token endpoint is the IMDS endpoint.
+
+## v10.5.1
+
+### Bug Fixes
+
+- `DeviceFlowConfig.Authorizer()` now prints the device code message when running `go test`. `-v` flag is required.
+
+## v10.5.0
+
+### New Features
+
+- Added NewPollingRequestWithContext() for use with polling asynchronous operations.
+
+### Bug Fixes
+
+- Make retry logic use the request's context instead of the deprecated Cancel object.
+
+## v10.4.0
+
+### New Features
+
+- Added helper for parsing Azure Resource ID's.
+- Added deprecation message to utils.GetEnvVarOrExit()
+
+## v10.3.0
+
+### New Features
+
+- Added EnvironmentFromURL method to load an Environment from a given URL. This function is particularly useful in the private and hybrid Cloud model, where one may define their own endpoints
+- Added TokenAudience endpoint to Environment structure. This is useful in private and hybrid cloud models where TokenAudience endpoint can be different from ResourceManagerEndpoint
+
+## v10.2.0
+
+### New Features
+
+- Added endpoints for batch management.
+
+## v10.1.3
+
+### Bug Fixes
+
+- In Client.Do() invoke WithInspection() last so that it will inspect WithAuthorization().
+- Fixed authorization methods to invoke p.Prepare() first, aligning them with the other preparers.
+
+## v10.1.2
+
+- Corrected comment for auth.NewAuthorizerFromFile() function.
+
+## v10.1.1
+
+- Updated version number to match current release.
+
+## v10.1.0
+
+### New Features
+
+- Expose the polling URL for futures.
+
+### Bug Fixes
+
+- Add validation.NewErrorWithValidationError back to prevent breaking changes (it is deprecated).
+
+## v10.0.0
+
+### New Features
+
+- Added target and innererror fields to ServiceError to comply with OData v4 spec.
+- The Done() method on futures will now return a ServiceError object when available (it used to return a partial value of such errors).
+- Added helper methods for obtaining authorizers.
+- Expose the polling URL for futures.
+
+### Bug Fixes
+
+- Switched from glide to dep for dependency management.
+- Fixed unmarshaling of ServiceError for JSON bodies that don't conform to the OData spec.
+- Fixed a race condition in token refresh.
+
+### Breaking Changes
+
+- The ServiceError.Details field type has been changed to match the OData v4 spec.
+- Go v1.7 has been dropped from CI.
+- API parameter validation failures will now return a unique error type validation.Error.
+- The adal.Token type has been decomposed from adal.ServicePrincipalToken (this was necessary in order to fix the token refresh race).
+
+## v9.10.0
+
+- Fix the Service Bus suffix in Azure public env
+- Add Service Bus Endpoint (AAD ResourceURI) for use in [Azure Service Bus RBAC Preview](https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-role-based-access-control)
+
+## v9.9.0
+
+### New Features
+
+- Added EventGridKeyAuthorizer for key authorization with event grid topics.
+
+### Bug Fixes
+
+- Fixed race condition when auto-refreshing service principal tokens.
+
+## v9.8.1
+
+### Bug Fixes
+
+- Added http.StatusNoContent (204) to the list of expected status codes for long-running operations.
+- Updated runtime version info so it's current.
+
+## v9.8.0
+
+### New Features
+
+- Added type azure.AsyncOpIncompleteError to be returned from a future's Result() method when the operation has not completed.
+
+## v9.7.1
+
+### Bug Fixes
+
+- Use correct AAD and Graph endpoints for US Gov environment.
+
+## v9.7.0
+
+### New Features
+
+- Added support for application/octet-stream MIME types.
+
+## v9.6.1
+
+### Bug Fixes
+
+- Ensure Authorization header is added to request when polling for registration status.
+
+## v9.6.0
+
+### New Features
+
+- Added support for acquiring tokens via MSI with a user assigned identity.
+
+## v9.5.3
+
+### Bug Fixes
+
+- Don't remove encoding of existing URL Query parameters when calling autorest.WithQueryParameters.
+- Set correct Content Type when using autorest.WithFormData.
+
+## v9.5.2
+
+### Bug Fixes
+
+- Check for nil \*http.Response before dereferencing it.
+
+## v9.5.1
+
+### Bug Fixes
+
+- Don't count http.StatusTooManyRequests (429) against the retry cap.
+- Use retry logic when SkipResourceProviderRegistration is set to true.
+
+## v9.5.0
+
+### New Features
+
+- Added support for username + password, API key, authoriazation code and cognitive services authentication.
+- Added field SkipResourceProviderRegistration to clients to provide a way to skip auto-registration of RPs.
+- Added utility function AsStringSlice() to convert its parameters to a string slice.
+
+### Bug Fixes
+
+- When checking for authentication failures look at the error type not the status code as it could vary.
+
+## v9.4.2
+
+### Bug Fixes
+
+- Validate parameters when creating credentials.
+- Don't retry requests if the returned status is a 401 (http.StatusUnauthorized) as it will never succeed.
+
+## v9.4.1
+
+### Bug Fixes
+
+- Update the AccessTokensPath() to read access tokens path through AZURE_ACCESS_TOKEN_FILE. If this
+ environment variable is not set, it will fall back to use default path set by Azure CLI.
+- Use case-insensitive string comparison for polling states.
+
+## v9.4.0
+
+### New Features
+
+- Added WaitForCompletion() to Future as a default polling implementation.
+
+### Bug Fixes
+
+- Method Future.Done() shouldn't update polling status for unexpected HTTP status codes.
+
+## v9.3.1
+
+### Bug Fixes
+
+- DoRetryForStatusCodes will retry if sender.Do returns a non-nil error.
+
+## v9.3.0
+
+### New Features
+
+- Added PollingMethod() to Future so callers know what kind of polling mechanism is used.
+- Added azure.ChangeToGet() which transforms an http.Request into a GET (to be used with LROs).
+
+## v9.2.0
+
+### New Features
+
+- Added support for custom Azure Stack endpoints.
+- Added type azure.Future used to track the status of long-running operations.
+
+### Bug Fixes
+
+- Preserve the original error in DoRetryWithRegistration when registration fails.
+
+## v9.1.1
+
+- Fixes a bug regarding the cookie jar on `autorest.Client.Sender`.
+
+## v9.1.0
+
+### New Features
+
+- In cases where there is a non-empty error from the service, attempt to unmarshal it instead of uniformly calling it an "Unknown" error.
+- Support for loading Azure CLI Authentication files.
+- Automatically register your subscription with the Azure Resource Provider if it hadn't been previously.
+
+### Bug Fixes
+
+- RetriableRequest can now tolerate a ReadSeekable body being read but not reset.
+- Adding missing Apache Headers
+
+## v9.0.0
+
+> **IMPORTANT:** This release was intially labeled incorrectly as `v8.4.0`. From the time it was released, it should have been marked `v9.0.0` because it contains breaking changes to the MSI packages. We appologize for any inconvenience this causes.
+
+Adding MSI Endpoint Support and CLI token rehydration.
+
+## v8.3.1
+
+Pick up bug fix in adal for MSI support.
+
+## v8.3.0
+
+Updates to Error string formats for clarity. Also, adding a copy of the http.Response to errors for an improved debugging experience.
+
+## v8.2.0
+
+### New Features
+
+- Add support for bearer authentication callbacks
+- Support 429 response codes that include "Retry-After" header
+- Support validation constraint "Pattern" for map keys
+
+### Bug Fixes
+
+- Make RetriableRequest work with multiple versions of Go
+
+## v8.1.1
+
+Updates the RetriableRequest to take advantage of GetBody() added in Go 1.8.
+
+## v8.1.0
+
+Adds RetriableRequest type for more efficient handling of retrying HTTP requests.
+
+## v8.0.0
+
+ADAL refactored into its own package.
+Support for UNIX time.
+
+## v7.3.1
+
+- Version Testing now removed from production bits that are shipped with the library.
+
+## v7.3.0
+
+- Exposing new `RespondDecorator`, `ByDiscardingBody`. This allows operations
+ to acknowledge that they do not need either the entire or a trailing portion
+ of accepts response body. In doing so, Go's http library can reuse HTTP
+ connections more readily.
+- Adding `PrepareDecorator` to target custom BaseURLs.
+- Adding ACR suffix to public cloud environment.
+- Updating Glide dependencies.
+
+## v7.2.5
+
+- Fixed the Active Directory endpoint for the China cloud.
+- Removes UTF-8 BOM if present in response payload.
+- Added telemetry.
+
+## v7.2.3
+
+- Fixing bug in calls to `DelayForBackoff` that caused doubling of delay
+ duration.
+
+## v7.2.2
+
+- autorest/azure: added ASM and ARM VM DNS suffixes.
+
+## v7.2.1
+
+- fixed parsing of UTC times that are not RFC3339 conformant.
+
+## v7.2.0
+
+- autorest/validation: Reformat validation error for better error message.
+
+## v7.1.0
+
+- preparer: Added support for multipart formdata - WithMultiPartFormdata()
+- preparer: Added support for sending file in request body - WithFile
+- client: Added RetryDuration parameter.
+- autorest/validation: new package for validation code for Azure Go SDK.
+
+## v7.0.7
+
+- Add trailing / to endpoint
+- azure: add EnvironmentFromName
+
+## v7.0.6
+
+- Add retry logic for 408, 500, 502, 503 and 504 status codes.
+- Change url path and query encoding logic.
+- Fix DelayForBackoff for proper exponential delay.
+- Add CookieJar in Client.
+
+## v7.0.5
+
+- Add check to start polling only when status is in [200,201,202].
+- Refactoring for unchecked errors.
+- azure/persist changes.
+- Fix 'file in use' issue in renewing token in deviceflow.
+- Store header RetryAfter for subsequent requests in polling.
+- Add attribute details in service error.
+
+## v7.0.4
+
+- Better error messages for long running operation failures
+
+## v7.0.3
+
+- Corrected DoPollForAsynchronous to properly handle the initial response
+
+## v7.0.2
+
+- Corrected DoPollForAsynchronous to continue using the polling method first discovered
+
+## v7.0.1
+
+- Fixed empty JSON input error in ByUnmarshallingJSON
+- Fixed polling support for GET calls
+- Changed format name from TimeRfc1123 to TimeRFC1123
+
+## v7.0.0
+
+- Added ByCopying responder with supporting TeeReadCloser
+- Rewrote Azure asynchronous handling
+- Reverted to only unmarshalling JSON
+- Corrected handling of RFC3339 time strings and added support for Rfc1123 time format
+
+The `json.Decoder` does not catch bad data as thoroughly as `json.Unmarshal`. Since
+`encoding/json` successfully deserializes all core types, and extended types normally provide
+their custom JSON serialization handlers, the code has been reverted back to using
+`json.Unmarshal`. The original change to use `json.Decode` was made to reduce duplicate
+code; there is no loss of function, and there is a gain in accuracy, by reverting.
+
+Additionally, Azure services indicate requests to be polled by multiple means. The existing code
+only checked for one of those (that is, the presence of the `Azure-AsyncOperation` header).
+The new code correctly covers all cases and aligns with the other Azure SDKs.
+
+## v6.1.0
+
+- Introduced `date.ByUnmarshallingJSONDate` and `date.ByUnmarshallingJSONTime` to enable JSON encoded values.
+
+## v6.0.0
+
+- Completely reworked the handling of polled and asynchronous requests
+- Removed unnecessary routines
+- Reworked `mocks.Sender` to replay a series of `http.Response` objects
+- Added `PrepareDecorators` for primitive types (e.g., bool, int32)
+
+Handling polled and asynchronous requests is no longer part of `Client#Send`. Instead new
+`SendDecorators` implement different styles of polled behavior. See`autorest.DoPollForStatusCodes`
+and `azure.DoPollForAsynchronous` for examples.
+
+## v5.0.0
+
+- Added new RespondDecorators unmarshalling primitive types
+- Corrected application of inspection and authorization PrependDecorators
+
+## v4.0.0
+
+- Added support for Azure long-running operations.
+- Added cancelation support to all decorators and functions that may delay.
+- Breaking: `DelayForBackoff` now accepts a channel, which may be nil.
+
+## v3.1.0
+
+- Add support for OAuth Device Flow authorization.
+- Add support for ServicePrincipalTokens that are backed by an existing token, rather than other secret material.
+- Add helpers for persisting and restoring Tokens.
+- Increased code coverage in the github.com/Azure/autorest/azure package
+
+## v3.0.0
+
+- Breaking: `NewErrorWithError` no longer takes `statusCode int`.
+- Breaking: `NewErrorWithStatusCode` is replaced with `NewErrorWithResponse`.
+- Breaking: `Client#Send()` no longer takes `codes ...int` argument.
+- Add: XML unmarshaling support with `ByUnmarshallingXML()`
+- Stopped vending dependencies locally and switched to [Glide](https://github.com/Masterminds/glide).
+ Applications using this library should either use Glide or vendor dependencies locally some other way.
+- Add: `azure.WithErrorUnlessStatusCode()` decorator to handle Azure errors.
+- Fix: use `net/http.DefaultClient` as base client.
+- Fix: Missing inspection for polling responses added.
+- Add: CopyAndDecode helpers.
+- Improved `./autorest/to` with `[]string` helpers.
+- Removed golint suppressions in .travis.yml.
+
+## v2.1.0
+
+- Added `StatusCode` to `Error` for more easily obtaining the HTTP Reponse StatusCode (if any)
+
+## v2.0.0
+
+- Changed `to.StringMapPtr` method signature to return a pointer
+- Changed `ServicePrincipalCertificateSecret` and `NewServicePrincipalTokenFromCertificate` to support generic certificate and private keys
+
+## v1.0.0
+
+- Added Logging inspectors to trace http.Request / Response
+- Added support for User-Agent header
+- Changed WithHeader PrepareDecorator to use set vs. add
+- Added JSON to error when unmarshalling fails
+- Added Client#Send method
+- Corrected case of "Azure" in package paths
+- Added "to" helpers, Azure helpers, and improved ease-of-use
+- Corrected golint issues
+
+## v1.0.1
+
+- Added CHANGELOG.md
+
+## v1.1.0
+
+- Added mechanism to retrieve a ServicePrincipalToken using a certificate-signed JWT
+- Added an example of creating a certificate-based ServicePrincipal and retrieving an OAuth token using the certificate
+
+## v1.1.1
+
+- Introduce godeps and vendor dependencies introduced in v1.1.1
diff --git a/vendor/github.com/Azure/go-autorest/GNUmakefile b/vendor/github.com/Azure/go-autorest/GNUmakefile
new file mode 100644
index 000000000..a434e73ac
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/GNUmakefile
@@ -0,0 +1,23 @@
+DIR?=./autorest/
+
+default: build
+
+build: fmt
+ go install $(DIR)
+
+test:
+ go test $(DIR) || exit 1
+
+vet:
+ @echo "go vet ."
+ @go vet $(DIR)... ; if [ $$? -eq 1 ]; then \
+ echo ""; \
+ echo "Vet found suspicious constructs. Please check the reported constructs"; \
+ echo "and fix them if necessary before submitting the code for review."; \
+ exit 1; \
+ fi
+
+fmt:
+ gofmt -w $(DIR)
+
+.PHONY: build test vet fmt
diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.lock b/vendor/github.com/Azure/go-autorest/Gopkg.lock
new file mode 100644
index 000000000..dc6e3e633
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/Gopkg.lock
@@ -0,0 +1,324 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ digest = "1:892e39e5c083d0943f1e80ab8351690f183c6a5ab24e1d280adcad424c26255e"
+ name = "contrib.go.opencensus.io/exporter/ocagent"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "a8a6f458bbc1d5042322ad1f9b65eeb0b69be9ea"
+ version = "v0.6.0"
+
+[[projects]]
+ digest = "1:8f5acd4d4462b5136af644d25101f0968a7a94ee90fcb2059cec5b7cc42e0b20"
+ name = "github.com/census-instrumentation/opencensus-proto"
+ packages = [
+ "gen-go/agent/common/v1",
+ "gen-go/agent/metrics/v1",
+ "gen-go/agent/trace/v1",
+ "gen-go/metrics/v1",
+ "gen-go/resource/v1",
+ "gen-go/trace/v1",
+ ]
+ pruneopts = "UT"
+ revision = "d89fa54de508111353cb0b06403c00569be780d8"
+ version = "v0.2.1"
+
+[[projects]]
+ digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
+ name = "github.com/davecgh/go-spew"
+ packages = ["spew"]
+ pruneopts = "UT"
+ revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
+ version = "v1.1.1"
+
+[[projects]]
+ digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55"
+ name = "github.com/dgrijalva/jwt-go"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
+ version = "v3.2.0"
+
+[[projects]]
+ digest = "1:cf0d2e435fd4ce45b789e93ef24b5f08e86be0e9807a16beb3694e2d8c9af965"
+ name = "github.com/dimchansky/utfbom"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "d2133a1ce379ef6fa992b0514a77146c60db9d1c"
+ version = "v1.1.0"
+
+[[projects]]
+ branch = "master"
+ digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8"
+ name = "github.com/golang/groupcache"
+ packages = ["lru"]
+ pruneopts = "UT"
+ revision = "611e8accdfc92c4187d399e95ce826046d4c8d73"
+
+[[projects]]
+ digest = "1:e3839df32927e8d3403cd5aa7253d966e8ff80fc8f10e2e35d146461cd83fcfa"
+ name = "github.com/golang/protobuf"
+ packages = [
+ "descriptor",
+ "jsonpb",
+ "proto",
+ "protoc-gen-go/descriptor",
+ "ptypes",
+ "ptypes/any",
+ "ptypes/duration",
+ "ptypes/struct",
+ "ptypes/timestamp",
+ "ptypes/wrappers",
+ ]
+ pruneopts = "UT"
+ revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7"
+ version = "v1.3.2"
+
+[[projects]]
+ digest = "1:c560cd79300fac84f124b96225181a637a70b60155919a3c36db50b7cca6b806"
+ name = "github.com/grpc-ecosystem/grpc-gateway"
+ packages = [
+ "internal",
+ "runtime",
+ "utilities",
+ ]
+ pruneopts = "UT"
+ revision = "f7120437bb4f6c71f7f5076ad65a45310de2c009"
+ version = "v1.12.1"
+
+[[projects]]
+ digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79"
+ name = "github.com/mitchellh/go-homedir"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "af06845cf3004701891bf4fdb884bfe4920b3727"
+ version = "v1.1.0"
+
+[[projects]]
+ digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
+ name = "github.com/pmezard/go-difflib"
+ packages = ["difflib"]
+ pruneopts = "UT"
+ revision = "792786c7400a136282c1664665ae0a8db921c6c2"
+ version = "v1.0.0"
+
+[[projects]]
+ digest = "1:99d32780e5238c2621fff621123997c3e3cca96db8be13179013aea77dfab551"
+ name = "github.com/stretchr/testify"
+ packages = [
+ "assert",
+ "require",
+ ]
+ pruneopts = "UT"
+ revision = "221dbe5ed46703ee255b1da0dec05086f5035f62"
+ version = "v1.4.0"
+
+[[projects]]
+ digest = "1:7c5e00383399fe13de0b4b65c9fdde16275407ce8ac02d867eafeaa916edcc71"
+ name = "go.opencensus.io"
+ packages = [
+ ".",
+ "internal",
+ "internal/tagencoding",
+ "metric/metricdata",
+ "metric/metricproducer",
+ "plugin/ocgrpc",
+ "plugin/ochttp",
+ "plugin/ochttp/propagation/b3",
+ "plugin/ochttp/propagation/tracecontext",
+ "resource",
+ "stats",
+ "stats/internal",
+ "stats/view",
+ "tag",
+ "trace",
+ "trace/internal",
+ "trace/propagation",
+ "trace/tracestate",
+ ]
+ pruneopts = "UT"
+ revision = "aad2c527c5defcf89b5afab7f37274304195a6b2"
+ version = "v0.22.2"
+
+[[projects]]
+ branch = "master"
+ digest = "1:f604f5e2ee721b6757d962dfe7bab4f28aae50c456e39cfb2f3819762a44a6ae"
+ name = "golang.org/x/crypto"
+ packages = [
+ "pkcs12",
+ "pkcs12/internal/rc2",
+ ]
+ pruneopts = "UT"
+ revision = "e9b2fee46413994441b28dfca259d911d963dfed"
+
+[[projects]]
+ branch = "master"
+ digest = "1:334b27eac455cb6567ea28cd424230b07b1a64334a2f861a8075ac26ce10af43"
+ name = "golang.org/x/lint"
+ packages = [
+ ".",
+ "golint",
+ ]
+ pruneopts = "UT"
+ revision = "fdd1cda4f05fd1fd86124f0ef9ce31a0b72c8448"
+
+[[projects]]
+ branch = "master"
+ digest = "1:257a75d024975428ab9192bfc334c3490882f8cb21322ea5784ca8eca000a910"
+ name = "golang.org/x/net"
+ packages = [
+ "http/httpguts",
+ "http2",
+ "http2/hpack",
+ "idna",
+ "internal/timeseries",
+ "trace",
+ ]
+ pruneopts = "UT"
+ revision = "1ddd1de85cb0337b623b740a609d35817d516a8d"
+
+[[projects]]
+ branch = "master"
+ digest = "1:382bb5a7fb4034db3b6a2d19e5a4a6bcf52f4750530603c01ca18a172fa3089b"
+ name = "golang.org/x/sync"
+ packages = ["semaphore"]
+ pruneopts = "UT"
+ revision = "cd5d95a43a6e21273425c7ae415d3df9ea832eeb"
+
+[[projects]]
+ branch = "master"
+ digest = "1:4da420ceda5f68e8d748aa2169d0ed44ffadb1bbd6537cf778a49563104189b8"
+ name = "golang.org/x/sys"
+ packages = ["unix"]
+ pruneopts = "UT"
+ revision = "ce4227a45e2eb77e5c847278dcc6a626742e2945"
+
+[[projects]]
+ digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405"
+ name = "golang.org/x/text"
+ packages = [
+ "collate",
+ "collate/build",
+ "internal/colltab",
+ "internal/gen",
+ "internal/language",
+ "internal/language/compact",
+ "internal/tag",
+ "internal/triegen",
+ "internal/ucd",
+ "language",
+ "secure/bidirule",
+ "transform",
+ "unicode/bidi",
+ "unicode/cldr",
+ "unicode/norm",
+ "unicode/rangetable",
+ ]
+ pruneopts = "UT"
+ revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475"
+ version = "v0.3.2"
+
+[[projects]]
+ branch = "master"
+ digest = "1:4eb5ea8395fb60212dd58b92c9db80bab59d5e99c7435f9a6a0a528c373b60e7"
+ name = "golang.org/x/tools"
+ packages = [
+ "go/ast/astutil",
+ "go/gcexportdata",
+ "go/internal/gcimporter",
+ "go/types/typeutil",
+ ]
+ pruneopts = "UT"
+ revision = "259af5ff87bdcd4abf2ecda8edc3f13f04f26a42"
+
+[[projects]]
+ digest = "1:964bb30febc27fabfbec4759fa530c6ec35e77a7c85fed90b9317ea39a054877"
+ name = "google.golang.org/api"
+ packages = ["support/bundler"]
+ pruneopts = "UT"
+ revision = "8a410c21381766a810817fd6200fce8838ecb277"
+ version = "v0.14.0"
+
+[[projects]]
+ branch = "master"
+ digest = "1:a8d5c2c6e746b3485e36908ab2a9e3d77b86b81f8156d88403c7d2b462431dfd"
+ name = "google.golang.org/genproto"
+ packages = [
+ "googleapis/api/httpbody",
+ "googleapis/rpc/status",
+ "protobuf/field_mask",
+ ]
+ pruneopts = "UT"
+ revision = "51378566eb590fa106d1025ea12835a4416dda84"
+
+[[projects]]
+ digest = "1:b59ce3ddb11daeeccccc9cb3183b58ebf8e9a779f1c853308cd91612e817a301"
+ name = "google.golang.org/grpc"
+ packages = [
+ ".",
+ "backoff",
+ "balancer",
+ "balancer/base",
+ "balancer/roundrobin",
+ "binarylog/grpc_binarylog_v1",
+ "codes",
+ "connectivity",
+ "credentials",
+ "credentials/internal",
+ "encoding",
+ "encoding/proto",
+ "grpclog",
+ "internal",
+ "internal/backoff",
+ "internal/balancerload",
+ "internal/binarylog",
+ "internal/buffer",
+ "internal/channelz",
+ "internal/envconfig",
+ "internal/grpcrand",
+ "internal/grpcsync",
+ "internal/resolver/dns",
+ "internal/resolver/passthrough",
+ "internal/syscall",
+ "internal/transport",
+ "keepalive",
+ "metadata",
+ "naming",
+ "peer",
+ "resolver",
+ "serviceconfig",
+ "stats",
+ "status",
+ "tap",
+ ]
+ pruneopts = "UT"
+ revision = "1a3960e4bd028ac0cec0a2afd27d7d8e67c11514"
+ version = "v1.25.1"
+
+[[projects]]
+ digest = "1:b75b3deb2bce8bc079e16bb2aecfe01eb80098f5650f9e93e5643ca8b7b73737"
+ name = "gopkg.in/yaml.v2"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "1f64d6156d11335c3f22d9330b0ad14fc1e789ce"
+ version = "v2.2.7"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ input-imports = [
+ "contrib.go.opencensus.io/exporter/ocagent",
+ "github.com/dgrijalva/jwt-go",
+ "github.com/dimchansky/utfbom",
+ "github.com/mitchellh/go-homedir",
+ "github.com/stretchr/testify/require",
+ "go.opencensus.io/plugin/ochttp",
+ "go.opencensus.io/plugin/ochttp/propagation/tracecontext",
+ "go.opencensus.io/stats/view",
+ "go.opencensus.io/trace",
+ "golang.org/x/crypto/pkcs12",
+ "golang.org/x/lint/golint",
+ ]
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.toml b/vendor/github.com/Azure/go-autorest/Gopkg.toml
new file mode 100644
index 000000000..1fc286596
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/Gopkg.toml
@@ -0,0 +1,59 @@
+# Gopkg.toml example
+#
+# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+# name = "github.com/user/project"
+# version = "1.0.0"
+#
+# [[constraint]]
+# name = "github.com/user/project2"
+# branch = "dev"
+# source = "github.com/myfork/project2"
+#
+# [[override]]
+# name = "github.com/x/y"
+# version = "2.4.0"
+#
+# [prune]
+# non-go = false
+# go-tests = true
+# unused-packages = true
+
+required = ["golang.org/x/lint/golint"]
+
+[prune]
+ go-tests = true
+ unused-packages = true
+
+[[constraint]]
+ name = "contrib.go.opencensus.io/exporter/ocagent"
+ version = "0.6.0"
+
+[[constraint]]
+ name = "github.com/dgrijalva/jwt-go"
+ version = "3.2.0"
+
+[[constraint]]
+ name = "github.com/dimchansky/utfbom"
+ version = "1.1.0"
+
+[[constraint]]
+ name = "github.com/mitchellh/go-homedir"
+ version = "1.1.0"
+
+[[constraint]]
+ name = "github.com/stretchr/testify"
+ version = "1.3.0"
+
+[[constraint]]
+ name = "go.opencensus.io"
+ version = "0.22.0"
+
+[[constraint]]
+ branch = "master"
+ name = "golang.org/x/crypto"
diff --git a/vendor/github.com/Azure/go-autorest/README.md b/vendor/github.com/Azure/go-autorest/README.md
new file mode 100644
index 000000000..de1e19a44
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/README.md
@@ -0,0 +1,165 @@
+# go-autorest
+
+[](https://godoc.org/github.com/Azure/go-autorest/autorest)
+[](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=625&branchName=master)
+[](https://goreportcard.com/report/Azure/go-autorest)
+
+Package go-autorest provides an HTTP request client for use with [Autorest](https://github.com/Azure/autorest.go)-generated API client packages.
+
+An authentication client tested with Azure Active Directory (AAD) is also
+provided in this repo in the package
+`github.com/Azure/go-autorest/autorest/adal`. Despite its name, this package
+is maintained only as part of the Azure Go SDK and is not related to other
+"ADAL" libraries in [github.com/AzureAD](https://github.com/AzureAD).
+
+## Overview
+
+Package go-autorest implements an HTTP request pipeline suitable for use across
+multiple goroutines and provides the shared routines used by packages generated
+by [Autorest](https://github.com/Azure/autorest.go).
+
+The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending,
+and Responding. A typical pattern is:
+
+```go
+ req, err := Prepare(&http.Request{},
+ token.WithAuthorization())
+
+ resp, err := Send(req,
+ WithLogging(logger),
+ DoErrorIfStatusCode(http.StatusInternalServerError),
+ DoCloseIfError(),
+ DoRetryForAttempts(5, time.Second))
+
+ err = Respond(resp,
+ ByDiscardingBody(),
+ ByClosing())
+```
+
+Each phase relies on decorators to modify and / or manage processing. Decorators may first modify
+and then pass the data along, pass the data first and then modify the result, or wrap themselves
+around passing the data (such as a logger might do). Decorators run in the order provided. For
+example, the following:
+
+```go
+ req, err := Prepare(&http.Request{},
+ WithBaseURL("https://microsoft.com/"),
+ WithPath("a"),
+ WithPath("b"),
+ WithPath("c"))
+```
+
+will set the URL to:
+
+```
+ https://microsoft.com/a/b/c
+```
+
+Preparers and Responders may be shared and re-used (assuming the underlying decorators support
+sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders
+shared among multiple go-routines, and a single Sender shared among multiple sending go-routines,
+all bound together by means of input / output channels.
+
+Decorators hold their passed state within a closure (such as the path components in the example
+above). Be careful to share Preparers and Responders only in a context where such held state
+applies. For example, it may not make sense to share a Preparer that applies a query string from a
+fixed set of values. Similarly, sharing a Responder that reads the response body into a passed
+struct (e.g., `ByUnmarshallingJson`) is likely incorrect.
+
+Errors raised by autorest objects and methods will conform to the `autorest.Error` interface.
+
+See the included examples for more detail. For details on the suggested use of this package by
+generated clients, see the Client described below.
+
+## Helpers
+
+### Handling Swagger Dates
+
+The Swagger specification (https://swagger.io) that drives AutoRest
+(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The
+github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure correct
+parsing and formatting.
+
+### Handling Empty Values
+
+In JSON, missing values have different semantics than empty values. This is especially true for
+services using the HTTP PATCH verb. The JSON submitted with a PATCH request generally contains
+only those values to modify. Missing values are to be left unchanged. Developers, then, require a
+means to both specify an empty value and to leave the value out of the submitted JSON.
+
+The Go JSON package (`encoding/json`) supports the `omitempty` tag. When specified, it omits
+empty values from the rendered JSON. Since Go defines default values for all base types (such as ""
+for string and 0 for int) and provides no means to mark a value as actually empty, the JSON package
+treats default values as meaning empty, omitting them from the rendered JSON. This means that, using
+the Go base types encoded through the default JSON package, it is not possible to create JSON to
+clear a value at the server.
+
+The workaround within the Go community is to use pointers to base types in lieu of base types within
+structures that map to JSON. For example, instead of a value of type `string`, the workaround uses
+`*string`. While this enables distinguishing empty values from those to be unchanged, creating
+pointers to a base type (notably constant, in-line values) requires additional variables. This, for
+example,
+
+```go
+ s := struct {
+ S *string
+ }{ S: &"foo" }
+```
+fails, while, this
+
+```go
+ v := "foo"
+ s := struct {
+ S *string
+ }{ S: &v }
+```
+succeeds.
+
+To ease using pointers, the subpackage `to` contains helpers that convert to and from pointers for
+Go base types which have Swagger analogs. It also provides a helper that converts between
+`map[string]string` and `map[string]*string`, enabling the JSON to specify that the value
+associated with a key should be cleared. With the helpers, the previous example becomes
+
+```go
+ s := struct {
+ S *string
+ }{ S: to.StringPtr("foo") }
+```
+
+## Install
+
+```bash
+go get github.com/Azure/go-autorest/autorest
+go get github.com/Azure/go-autorest/autorest/azure
+go get github.com/Azure/go-autorest/autorest/date
+go get github.com/Azure/go-autorest/autorest/to
+```
+
+### Using with Go Modules
+In [v12.0.1](https://github.com/Azure/go-autorest/pull/386), this repository introduced the following modules.
+
+- autorest/adal
+- autorest/azure/auth
+- autorest/azure/cli
+- autorest/date
+- autorest/mocks
+- autorest/to
+- autorest/validation
+- autorest
+- logger
+- tracing
+
+Tagging cumulative SDK releases as a whole (e.g. `v12.3.0`) is still enabled to support consumers of this repo that have not yet migrated to modules.
+
+## License
+
+See LICENSE file.
+
+-----
+
+This project has adopted the [Microsoft Open Source Code of
+Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
+see the [Code of Conduct
+FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
+[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
+questions or comments.
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/LICENSE
similarity index 94%
rename from vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE
rename to vendor/github.com/Azure/go-autorest/autorest/LICENSE
index 261eeb9e9..b9d6a27ea 100644
--- a/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE
+++ b/vendor/github.com/Azure/go-autorest/autorest/LICENSE
@@ -1,3 +1,4 @@
+
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
@@ -175,18 +176,7 @@
END OF TERMS AND CONDITIONS
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
+ Copyright 2015 Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE
similarity index 94%
rename from vendor/github.com/census-instrumentation/opencensus-proto/LICENSE
rename to vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE
index d64569567..b9d6a27ea 100644
--- a/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE
@@ -176,18 +176,7 @@
END OF TERMS AND CONDITIONS
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
+ Copyright 2015 Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md
index 7b0c4bc4d..fec416a9c 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md
@@ -135,7 +135,7 @@ resource := "https://management.core.windows.net/"
applicationSecret := "APPLICATION_SECRET"
spt, err := adal.NewServicePrincipalToken(
- oauthConfig,
+ *oauthConfig,
appliationID,
applicationSecret,
resource,
@@ -170,7 +170,7 @@ if err != nil {
}
spt, err := adal.NewServicePrincipalTokenFromCertificate(
- oauthConfig,
+ *oauthConfig,
applicationID,
certificate,
rsaPrivateKey,
@@ -195,7 +195,7 @@ oauthClient := &http.Client{}
// Acquire the device code
deviceCode, err := adal.InitiateDeviceAuth(
oauthClient,
- oauthConfig,
+ *oauthConfig,
applicationID,
resource)
if err != nil {
@@ -212,7 +212,7 @@ if err != nil {
}
spt, err := adal.NewServicePrincipalTokenFromManualToken(
- oauthConfig,
+ *oauthConfig,
applicationID,
resource,
*token,
@@ -227,7 +227,7 @@ if (err == nil) {
```Go
spt, err := adal.NewServicePrincipalTokenFromUsernamePassword(
- oauthConfig,
+ *oauthConfig,
applicationID,
username,
password,
@@ -243,11 +243,11 @@ if (err == nil) {
``` Go
spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode(
- oauthConfig,
+ *oauthConfig,
applicationID,
clientSecret,
- authorizationCode,
- redirectURI,
+ authorizationCode,
+ redirectURI,
resource,
callbacks...)
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go
index 8c83a917f..fa5964742 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go
@@ -15,10 +15,15 @@ package adal
// limitations under the License.
import (
+ "errors"
"fmt"
"net/url"
)
+const (
+ activeDirectoryEndpointTemplate = "%s/oauth2/%s%s"
+)
+
// OAuthConfig represents the endpoints needed
// in OAuth operations
type OAuthConfig struct {
@@ -60,7 +65,6 @@ func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiV
}
api = fmt.Sprintf("?api-version=%s", *apiVersion)
}
- const activeDirectoryEndpointTemplate = "%s/oauth2/%s%s"
u, err := url.Parse(activeDirectoryEndpoint)
if err != nil {
return nil, err
@@ -89,3 +93,59 @@ func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiV
DeviceCodeEndpoint: *deviceCodeURL,
}, nil
}
+
+// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs.
+type MultiTenantOAuthConfig interface {
+ PrimaryTenant() *OAuthConfig
+ AuxiliaryTenants() []*OAuthConfig
+}
+
+// OAuthOptions contains optional OAuthConfig creation arguments.
+type OAuthOptions struct {
+ APIVersion string
+}
+
+func (c OAuthOptions) apiVersion() string {
+ if c.APIVersion != "" {
+ return fmt.Sprintf("?api-version=%s", c.APIVersion)
+ }
+ return "1.0"
+}
+
+// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration.
+// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information.
+func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) {
+ if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 {
+ return nil, errors.New("must specify one to three auxiliary tenants")
+ }
+ mtCfg := multiTenantOAuthConfig{
+ cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1),
+ }
+ apiVer := options.apiVersion()
+ pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err)
+ }
+ mtCfg.cfgs[0] = pri
+ for i := range auxiliaryTenantIDs {
+ aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i])
+ if err != nil {
+ return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err)
+ }
+ mtCfg.cfgs[i+1] = aux
+ }
+ return mtCfg, nil
+}
+
+type multiTenantOAuthConfig struct {
+ // first config in the slice is the primary tenant
+ cfgs []*OAuthConfig
+}
+
+func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig {
+ return m.cfgs[0]
+}
+
+func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig {
+ return m.cfgs[1:]
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go
index b38f4c245..9daa4b58b 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go
@@ -24,6 +24,7 @@ package adal
*/
import (
+ "context"
"encoding/json"
"fmt"
"io/ioutil"
@@ -101,7 +102,14 @@ type deviceToken struct {
// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode
// that can be used with CheckForUserCompletion or WaitForUserCompletion.
+// Deprecated: use InitiateDeviceAuthWithContext() instead.
func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) {
+ return InitiateDeviceAuthWithContext(context.Background(), sender, oauthConfig, clientID, resource)
+}
+
+// InitiateDeviceAuthWithContext initiates a device auth flow. It returns a DeviceCode
+// that can be used with CheckForUserCompletion or WaitForUserCompletion.
+func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) {
v := url.Values{
"client_id": []string{clientID},
"resource": []string{resource},
@@ -117,7 +125,7 @@ func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resour
req.ContentLength = int64(len(s))
req.Header.Set(contentType, mimeTypeFormPost)
- resp, err := sender.Do(req)
+ resp, err := sender.Do(req.WithContext(ctx))
if err != nil {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error())
}
@@ -151,7 +159,14 @@ func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resour
// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint
// to see if the device flow has: been completed, timed out, or otherwise failed
+// Deprecated: use CheckForUserCompletionWithContext() instead.
func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
+ return CheckForUserCompletionWithContext(context.Background(), sender, code)
+}
+
+// CheckForUserCompletionWithContext takes a DeviceCode and checks with the Azure AD OAuth endpoint
+// to see if the device flow has: been completed, timed out, or otherwise failed
+func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) {
v := url.Values{
"client_id": []string{code.ClientID},
"code": []string{*code.DeviceCode},
@@ -169,7 +184,7 @@ func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
req.ContentLength = int64(len(s))
req.Header.Set(contentType, mimeTypeFormPost)
- resp, err := sender.Do(req)
+ resp, err := sender.Do(req.WithContext(ctx))
if err != nil {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error())
}
@@ -207,18 +222,29 @@ func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
case "code_expired":
return nil, ErrDeviceCodeExpired
default:
+ // return a more meaningful error message if available
+ if token.ErrorDescription != nil {
+ return nil, fmt.Errorf("%s %s: %s", logPrefix, *token.Error, *token.ErrorDescription)
+ }
return nil, ErrDeviceGeneric
}
}
// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs.
// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'.
+// Deprecated: use WaitForUserCompletionWithContext() instead.
func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
+ return WaitForUserCompletionWithContext(context.Background(), sender, code)
+}
+
+// WaitForUserCompletionWithContext calls CheckForUserCompletion repeatedly until a token is granted or an error
+// state occurs. This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'.
+func WaitForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) {
intervalDuration := time.Duration(*code.Interval) * time.Second
waitDuration := intervalDuration
for {
- token, err := CheckForUserCompletion(sender, code)
+ token, err := CheckForUserCompletionWithContext(ctx, sender, code)
if err == nil {
return token, nil
@@ -237,6 +263,11 @@ func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) {
return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix)
}
- time.Sleep(waitDuration)
+ select {
+ case <-time.After(waitDuration):
+ // noop
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
}
}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod
new file mode 100644
index 000000000..bb53a1c23
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod
@@ -0,0 +1,12 @@
+module github.com/Azure/go-autorest/autorest/adal
+
+go 1.12
+
+require (
+ github.com/Azure/go-autorest v14.2.0+incompatible
+ github.com/Azure/go-autorest/autorest/date v0.3.0
+ github.com/Azure/go-autorest/autorest/mocks v0.4.1
+ github.com/Azure/go-autorest/tracing v0.6.0
+ github.com/dgrijalva/jwt-go v3.2.0+incompatible
+ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
+)
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum
new file mode 100644
index 000000000..e31b57957
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum
@@ -0,0 +1,19 @@
+github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
+github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
+github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
+github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
+github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go
new file mode 100644
index 000000000..7551b7923
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go
@@ -0,0 +1,24 @@
+// +build modhack
+
+package adal
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file, and the github.com/Azure/go-autorest import, won't actually become part of
+// the resultant binary.
+
+// Necessary for safely adding multi-module repo.
+// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
+import _ "github.com/Azure/go-autorest"
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go
index 9e15f2751..2a974a39b 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go
@@ -15,11 +15,24 @@ package adal
// limitations under the License.
import (
+ "crypto/rsa"
+ "crypto/x509"
"encoding/json"
+ "errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
+
+ "golang.org/x/crypto/pkcs12"
+)
+
+var (
+ // ErrMissingCertificate is returned when no local certificate is found in the provided PFX data.
+ ErrMissingCertificate = errors.New("adal: certificate missing")
+
+ // ErrMissingPrivateKey is returned when no private key is found in the provided PFX data.
+ ErrMissingPrivateKey = errors.New("adal: private key missing")
)
// LoadToken restores a Token object from a file located at 'path'.
@@ -71,3 +84,52 @@ func SaveToken(path string, mode os.FileMode, token Token) error {
}
return nil
}
+
+// DecodePfxCertificateData extracts the x509 certificate and RSA private key from the provided PFX data.
+// The PFX data must contain a private key along with a certificate whose public key matches that of the
+// private key or an error is returned.
+// If the private key is not password protected pass the empty string for password.
+func DecodePfxCertificateData(pfxData []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) {
+ blocks, err := pkcs12.ToPEM(pfxData, password)
+ if err != nil {
+ return nil, nil, err
+ }
+ // first extract the private key
+ var priv *rsa.PrivateKey
+ for _, block := range blocks {
+ if block.Type == "PRIVATE KEY" {
+ priv, err = x509.ParsePKCS1PrivateKey(block.Bytes)
+ if err != nil {
+ return nil, nil, err
+ }
+ break
+ }
+ }
+ if priv == nil {
+ return nil, nil, ErrMissingPrivateKey
+ }
+ // now find the certificate with the matching public key of our private key
+ var cert *x509.Certificate
+ for _, block := range blocks {
+ if block.Type == "CERTIFICATE" {
+ pcert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return nil, nil, err
+ }
+ certKey, ok := pcert.PublicKey.(*rsa.PublicKey)
+ if !ok {
+ // keep looking
+ continue
+ }
+ if priv.E == certKey.E && priv.N.Cmp(certKey.N) == 0 {
+ // found a match
+ cert = pcert
+ break
+ }
+ }
+ }
+ if cert == nil {
+ return nil, nil, ErrMissingCertificate
+ }
+ return cert, priv, nil
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
index 834401e00..d7e4372bb 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
@@ -15,7 +15,12 @@ package adal
// limitations under the License.
import (
+ "crypto/tls"
"net/http"
+ "net/http/cookiejar"
+ "sync"
+
+ "github.com/Azure/go-autorest/tracing"
)
const (
@@ -23,6 +28,9 @@ const (
mimeTypeFormPost = "application/x-www-form-urlencoded"
)
+var defaultSender Sender
+var defaultSenderInit = &sync.Once{}
+
// Sender is the interface that wraps the Do method to send HTTP requests.
//
// The standard http.Client conforms to this interface.
@@ -45,7 +53,7 @@ type SendDecorator func(Sender) Sender
// CreateSender creates, decorates, and returns, as a Sender, the default http.Client.
func CreateSender(decorators ...SendDecorator) Sender {
- return DecorateSender(&http.Client{}, decorators...)
+ return DecorateSender(sender(), decorators...)
}
// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to
@@ -58,3 +66,30 @@ func DecorateSender(s Sender, decorators ...SendDecorator) Sender {
}
return s
}
+
+func sender() Sender {
+ // note that we can't init defaultSender in init() since it will
+ // execute before calling code has had a chance to enable tracing
+ defaultSenderInit.Do(func() {
+ // Use behaviour compatible with DefaultTransport, but require TLS minimum version.
+ defaultTransport := http.DefaultTransport.(*http.Transport)
+ transport := &http.Transport{
+ Proxy: defaultTransport.Proxy,
+ DialContext: defaultTransport.DialContext,
+ MaxIdleConns: defaultTransport.MaxIdleConns,
+ IdleConnTimeout: defaultTransport.IdleConnTimeout,
+ TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
+ ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
+ TLSClientConfig: &tls.Config{
+ MinVersion: tls.VersionTLS12,
+ },
+ }
+ var roundTripper http.RoundTripper = transport
+ if tracing.IsEnabled() {
+ roundTripper = tracing.NewTransport(transport)
+ }
+ j, _ := cookiejar.New(nil)
+ defaultSender = &http.Client{Jar: j, Transport: roundTripper}
+ })
+ return defaultSender
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go
index effa87ab2..24d5ab26f 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go
@@ -24,17 +24,17 @@ import (
"encoding/json"
"errors"
"fmt"
+ "io"
"io/ioutil"
"math"
- "net"
"net/http"
"net/url"
+ "os"
"strings"
"sync"
"time"
"github.com/Azure/go-autorest/autorest/date"
- "github.com/Azure/go-autorest/tracing"
"github.com/dgrijalva/jwt-go"
)
@@ -62,8 +62,20 @@ const (
// msiEndpoint is the well known endpoint for getting MSI authentications tokens
msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token"
+ // the API version to use for the MSI endpoint
+ msiAPIVersion = "2018-02-01"
+
// the default number of attempts to refresh an MSI authentication token
defaultMaxMSIRefreshAttempts = 5
+
+ // asMSIEndpointEnv is the environment variable used to store the endpoint on App Service and Functions
+ asMSIEndpointEnv = "MSI_ENDPOINT"
+
+ // asMSISecretEnv is the environment variable used to store the request secret on App Service and Functions
+ asMSISecretEnv = "MSI_SECRET"
+
+ // the API version to use for the App Service MSI endpoint
+ appServiceAPIVersion = "2017-09-01"
)
// OAuthTokenProvider is an interface which should be implemented by an access token retriever
@@ -71,6 +83,12 @@ type OAuthTokenProvider interface {
OAuthToken() string
}
+// MultitenantOAuthTokenProvider provides tokens used for multi-tenant authorization.
+type MultitenantOAuthTokenProvider interface {
+ PrimaryOAuthToken() string
+ AuxiliaryOAuthTokens() []string
+}
+
// TokenRefreshError is an interface used by errors returned during token refresh.
type TokenRefreshError interface {
error
@@ -95,6 +113,9 @@ type RefresherWithContext interface {
// a successful token refresh
type TokenRefreshCallback func(Token) error
+// TokenRefresh is a type representing a custom callback to refresh a token
+type TokenRefresh func(ctx context.Context, resource string) (*Token, error)
+
// Token encapsulates the access token used to authorize Azure requests.
// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response
type Token struct {
@@ -234,7 +255,7 @@ func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalTo
"sub": spt.inner.ClientID,
"jti": base64.URLEncoding.EncodeToString(jti),
"nbf": time.Now().Unix(),
- "exp": time.Now().Add(time.Hour * 24).Unix(),
+ "exp": time.Now().Add(24 * time.Hour).Unix(),
}
signedString, err := token.SignedString(secret.PrivateKey)
@@ -333,11 +354,13 @@ func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, err
// ServicePrincipalToken encapsulates a Token created for a Service Principal.
type ServicePrincipalToken struct {
- inner servicePrincipalToken
- refreshLock *sync.RWMutex
- sender Sender
- refreshCallbacks []TokenRefreshCallback
+ inner servicePrincipalToken
+ refreshLock *sync.RWMutex
+ sender Sender
+ customRefreshFunc TokenRefresh
+ refreshCallbacks []TokenRefreshCallback
// MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token.
+ // Settings this to a value less than 1 will use the default value.
MaxMSIRefreshAttempts int
}
@@ -351,6 +374,11 @@ func (spt *ServicePrincipalToken) SetRefreshCallbacks(callbacks []TokenRefreshCa
spt.refreshCallbacks = callbacks
}
+// SetCustomRefreshFunc sets a custom refresh function used to refresh the token.
+func (spt *ServicePrincipalToken) SetCustomRefreshFunc(customRefreshFunc TokenRefresh) {
+ spt.customRefreshFunc = customRefreshFunc
+}
+
// MarshalJSON implements the json.Marshaler interface.
func (spt ServicePrincipalToken) MarshalJSON() ([]byte, error) {
return json.Marshal(spt.inner)
@@ -390,7 +418,7 @@ func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error {
spt.refreshLock = &sync.RWMutex{}
}
if spt.sender == nil {
- spt.sender = &http.Client{Transport: tracing.Transport}
+ spt.sender = sender()
}
return nil
}
@@ -438,7 +466,7 @@ func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, reso
RefreshWithin: defaultRefresh,
},
refreshLock: &sync.RWMutex{},
- sender: &http.Client{Transport: tracing.Transport},
+ sender: sender(),
refreshCallbacks: callbacks,
}
return spt, nil
@@ -629,19 +657,52 @@ func GetMSIVMEndpoint() (string, error) {
return msiEndpoint, nil
}
+// NOTE: this only indicates if the ASE environment credentials have been set
+// which does not necessarily mean that the caller is authenticating via ASE!
+func isAppService() bool {
+ _, asMSIEndpointEnvExists := os.LookupEnv(asMSIEndpointEnv)
+ _, asMSISecretEnvExists := os.LookupEnv(asMSISecretEnv)
+
+ return asMSIEndpointEnvExists && asMSISecretEnvExists
+}
+
+// GetMSIAppServiceEndpoint get the MSI endpoint for App Service and Functions
+func GetMSIAppServiceEndpoint() (string, error) {
+ asMSIEndpoint, asMSIEndpointEnvExists := os.LookupEnv(asMSIEndpointEnv)
+
+ if asMSIEndpointEnvExists {
+ return asMSIEndpoint, nil
+ }
+ return "", errors.New("MSI endpoint not found")
+}
+
+// GetMSIEndpoint get the appropriate MSI endpoint depending on the runtime environment
+func GetMSIEndpoint() (string, error) {
+ if isAppService() {
+ return GetMSIAppServiceEndpoint()
+ }
+ return GetMSIVMEndpoint()
+}
+
// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension.
// It will use the system assigned identity when creating the token.
func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
- return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, callbacks...)
+ return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, nil, callbacks...)
}
// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension.
-// It will use the specified user assigned identity when creating the token.
+// It will use the clientID of specified user assigned identity when creating the token.
func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
- return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, callbacks...)
+ return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, nil, callbacks...)
}
-func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
+// NewServicePrincipalTokenFromMSIWithIdentityResourceID creates a ServicePrincipalToken via the MSI VM Extension.
+// It will use the azure resource id of user assigned identity when creating the token.
+func NewServicePrincipalTokenFromMSIWithIdentityResourceID(msiEndpoint, resource string, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
+ return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, &identityResourceID, callbacks...)
+}
+
+func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, identityResourceID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil {
return nil, err
}
@@ -653,6 +714,11 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI
return nil, err
}
}
+ if identityResourceID != nil {
+ if err := validateStringParam(*identityResourceID, "identityResourceID"); err != nil {
+ return nil, err
+ }
+ }
// We set the oauth config token endpoint to be MSI's endpoint
msiEndpointURL, err := url.Parse(msiEndpoint)
if err != nil {
@@ -661,10 +727,18 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI
v := url.Values{}
v.Set("resource", resource)
- v.Set("api-version", "2018-02-01")
+ // App Service MSI currently only supports token API version 2017-09-01
+ if isAppService() {
+ v.Set("api-version", appServiceAPIVersion)
+ } else {
+ v.Set("api-version", msiAPIVersion)
+ }
if userAssignedID != nil {
v.Set("client_id", *userAssignedID)
}
+ if identityResourceID != nil {
+ v.Set("mi_res_id", *identityResourceID)
+ }
msiEndpointURL.RawQuery = v.Encode()
spt := &ServicePrincipalToken{
@@ -679,7 +753,7 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI
RefreshWithin: defaultRefresh,
},
refreshLock: &sync.RWMutex{},
- sender: &http.Client{Transport: tracing.Transport},
+ sender: sender(),
refreshCallbacks: callbacks,
MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts,
}
@@ -720,8 +794,9 @@ func (spt *ServicePrincipalToken) EnsureFresh() error {
// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by
// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use.
func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error {
- if spt.inner.AutoRefresh && spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) {
- // take the write lock then check to see if the token was already refreshed
+ // must take the read lock when initially checking the token's expiration
+ if spt.inner.AutoRefresh && spt.Token().WillExpireIn(spt.inner.RefreshWithin) {
+ // take the write lock then check again to see if the token was already refreshed
spt.refreshLock.Lock()
defer spt.refreshLock.Unlock()
if spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) {
@@ -745,13 +820,13 @@ func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error {
}
// Refresh obtains a fresh token for the Service Principal.
-// This method is not safe for concurrent use and should be syncrhonized.
+// This method is safe for concurrent use.
func (spt *ServicePrincipalToken) Refresh() error {
return spt.RefreshWithContext(context.Background())
}
// RefreshWithContext obtains a fresh token for the Service Principal.
-// This method is not safe for concurrent use and should be syncrhonized.
+// This method is safe for concurrent use.
func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error {
spt.refreshLock.Lock()
defer spt.refreshLock.Unlock()
@@ -759,13 +834,13 @@ func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error
}
// RefreshExchange refreshes the token, but for a different resource.
-// This method is not safe for concurrent use and should be syncrhonized.
+// This method is safe for concurrent use.
func (spt *ServicePrincipalToken) RefreshExchange(resource string) error {
return spt.RefreshExchangeWithContext(context.Background(), resource)
}
// RefreshExchangeWithContext refreshes the token, but for a different resource.
-// This method is not safe for concurrent use and should be syncrhonized.
+// This method is safe for concurrent use.
func (spt *ServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error {
spt.refreshLock.Lock()
defer spt.refreshLock.Unlock()
@@ -784,19 +859,50 @@ func (spt *ServicePrincipalToken) getGrantType() string {
}
func isIMDS(u url.URL) bool {
- imds, err := url.Parse(msiEndpoint)
+ return isMSIEndpoint(u) == true || isASEEndpoint(u) == true
+}
+
+func isMSIEndpoint(endpoint url.URL) bool {
+ msi, err := url.Parse(msiEndpoint)
if err != nil {
return false
}
- return u.Host == imds.Host && u.Path == imds.Path
+ return endpoint.Host == msi.Host && endpoint.Path == msi.Path
+}
+
+func isASEEndpoint(endpoint url.URL) bool {
+ aseEndpoint, err := GetMSIAppServiceEndpoint()
+ if err != nil {
+ // app service environment isn't enabled
+ return false
+ }
+ ase, err := url.Parse(aseEndpoint)
+ if err != nil {
+ return false
+ }
+ return endpoint.Host == ase.Host && endpoint.Path == ase.Path
}
func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error {
+ if spt.customRefreshFunc != nil {
+ token, err := spt.customRefreshFunc(ctx, resource)
+ if err != nil {
+ return err
+ }
+ spt.inner.Token = *token
+ return spt.InvokeRefreshCallbacks(spt.inner.Token)
+ }
+
req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil)
if err != nil {
return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err)
}
req.Header.Add("User-Agent", UserAgent())
+ // Add header when runtime is on App Service or Functions
+ if isASEEndpoint(spt.inner.OauthConfig.TokenEndpoint) {
+ asMSISecret, _ := os.LookupEnv(asMSISecretEnv)
+ req.Header.Add("Secret", asMSISecret)
+ }
req = req.WithContext(ctx)
if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) {
v := url.Values{}
@@ -835,13 +941,18 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
}
var resp *http.Response
+ if isMSIEndpoint(spt.inner.OauthConfig.TokenEndpoint) && !MSIAvailable(ctx, spt.sender) {
+ // return a TokenRefreshError here so that we don't keep retrying
+ return newTokenRefreshError("the MSI endpoint is not available", nil)
+ }
if isIMDS(spt.inner.OauthConfig.TokenEndpoint) {
resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts)
} else {
resp, err = spt.sender.Do(req)
}
if err != nil {
- return newTokenRefreshError(fmt.Sprintf("adal: Failed to execute the refresh request. Error = '%v'", err), nil)
+ // don't return a TokenRefreshError here; this will allow retry logic to apply
+ return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err)
}
defer resp.Body.Close()
@@ -906,12 +1017,19 @@ func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http
attempt := 0
delay := time.Duration(0)
+ // maxAttempts is user-specified, ensure that its value is greater than zero else no request will be made
+ if maxAttempts < 1 {
+ maxAttempts = defaultMaxMSIRefreshAttempts
+ }
+
for attempt < maxAttempts {
+ if resp != nil && resp.Body != nil {
+ io.Copy(ioutil.Discard, resp.Body)
+ resp.Body.Close()
+ }
resp, err = sender.Do(req)
- // retry on temporary network errors, e.g. transient network failures.
- // if we don't receive a response then assume we can't connect to the
- // endpoint so we're likely not running on an Azure VM so don't retry.
- if (err != nil && !isTemporaryNetworkError(err)) || resp == nil || resp.StatusCode == http.StatusOK || !containsInt(retries, resp.StatusCode) {
+ // we want to retry if err is not nil or the status code is in the list of retry codes
+ if err == nil && !responseHasStatusCode(resp, retries...) {
return
}
@@ -935,20 +1053,12 @@ func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http
return
}
-// returns true if the specified error is a temporary network error or false if it's not.
-// if the error doesn't implement the net.Error interface the return value is true.
-func isTemporaryNetworkError(err error) bool {
- if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) {
- return true
- }
- return false
-}
-
-// returns true if slice ints contains the value n
-func containsInt(ints []int, n int) bool {
- for _, i := range ints {
- if i == n {
- return true
+func responseHasStatusCode(resp *http.Response, codes ...int) bool {
+ if resp != nil {
+ for _, i := range codes {
+ if i == resp.StatusCode {
+ return true
+ }
}
}
return false
@@ -983,3 +1093,107 @@ func (spt *ServicePrincipalToken) Token() Token {
defer spt.refreshLock.RUnlock()
return spt.inner.Token
}
+
+// MultiTenantServicePrincipalToken contains tokens for multi-tenant authorization.
+type MultiTenantServicePrincipalToken struct {
+ PrimaryToken *ServicePrincipalToken
+ AuxiliaryTokens []*ServicePrincipalToken
+}
+
+// PrimaryOAuthToken returns the primary authorization token.
+func (mt *MultiTenantServicePrincipalToken) PrimaryOAuthToken() string {
+ return mt.PrimaryToken.OAuthToken()
+}
+
+// AuxiliaryOAuthTokens returns one to three auxiliary authorization tokens.
+func (mt *MultiTenantServicePrincipalToken) AuxiliaryOAuthTokens() []string {
+ tokens := make([]string, len(mt.AuxiliaryTokens))
+ for i := range mt.AuxiliaryTokens {
+ tokens[i] = mt.AuxiliaryTokens[i].OAuthToken()
+ }
+ return tokens
+}
+
+// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by
+// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use.
+func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error {
+ if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil {
+ return fmt.Errorf("failed to refresh primary token: %v", err)
+ }
+ for _, aux := range mt.AuxiliaryTokens {
+ if err := aux.EnsureFreshWithContext(ctx); err != nil {
+ return fmt.Errorf("failed to refresh auxiliary token: %v", err)
+ }
+ }
+ return nil
+}
+
+// RefreshWithContext obtains a fresh token for the Service Principal.
+func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error {
+ if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil {
+ return fmt.Errorf("failed to refresh primary token: %v", err)
+ }
+ for _, aux := range mt.AuxiliaryTokens {
+ if err := aux.RefreshWithContext(ctx); err != nil {
+ return fmt.Errorf("failed to refresh auxiliary token: %v", err)
+ }
+ }
+ return nil
+}
+
+// RefreshExchangeWithContext refreshes the token, but for a different resource.
+func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error {
+ if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil {
+ return fmt.Errorf("failed to refresh primary token: %v", err)
+ }
+ for _, aux := range mt.AuxiliaryTokens {
+ if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil {
+ return fmt.Errorf("failed to refresh auxiliary token: %v", err)
+ }
+ }
+ return nil
+}
+
+// NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource.
+func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) {
+ if err := validateStringParam(clientID, "clientID"); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(secret, "secret"); err != nil {
+ return nil, err
+ }
+ if err := validateStringParam(resource, "resource"); err != nil {
+ return nil, err
+ }
+ auxTenants := multiTenantCfg.AuxiliaryTenants()
+ m := MultiTenantServicePrincipalToken{
+ AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)),
+ }
+ primary, err := NewServicePrincipalToken(*multiTenantCfg.PrimaryTenant(), clientID, secret, resource)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err)
+ }
+ m.PrimaryToken = primary
+ for i := range auxTenants {
+ aux, err := NewServicePrincipalToken(*auxTenants[i], clientID, secret, resource)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err)
+ }
+ m.AuxiliaryTokens[i] = aux
+ }
+ return &m, nil
+}
+
+// MSIAvailable returns true if the MSI endpoint is available for authentication.
+func MSIAvailable(ctx context.Context, sender Sender) bool {
+ // this cannot fail, the return sig is due to legacy reasons
+ msiEndpoint, _ := GetMSIVMEndpoint()
+ tempCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond)
+ defer cancel()
+ req, _ := http.NewRequestWithContext(tempCtx, http.MethodGet, msiEndpoint, nil)
+ q := req.URL.Query()
+ q.Add("api-version", msiAPIVersion)
+ req.URL.RawQuery = q.Encode()
+ _, err := sender.Do(req)
+ return err == nil
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go
index 2e24b4b39..15138b642 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/authorization.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go
@@ -15,6 +15,7 @@ package autorest
// limitations under the License.
import (
+ "crypto/tls"
"encoding/base64"
"fmt"
"net/http"
@@ -22,7 +23,6 @@ import (
"strings"
"github.com/Azure/go-autorest/autorest/adal"
- "github.com/Azure/go-autorest/tracing"
)
const (
@@ -138,6 +138,11 @@ func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator {
}
}
+// TokenProvider returns OAuthTokenProvider so that it can be used for authorization outside the REST.
+func (ba *BearerAuthorizer) TokenProvider() adal.OAuthTokenProvider {
+ return ba.tokenProvider
+}
+
// BearerAuthorizerCallbackFunc is the authentication callback signature.
type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error)
@@ -149,11 +154,11 @@ type BearerAuthorizerCallback struct {
// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback
// is invoked when the HTTP request is submitted.
-func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback {
- if sender == nil {
- sender = &http.Client{Transport: tracing.Transport}
+func NewBearerAuthorizerCallback(s Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback {
+ if s == nil {
+ s = sender(tls.RenegotiateNever)
}
- return &BearerAuthorizerCallback{sender: sender, callback: callback}
+ return &BearerAuthorizerCallback{sender: s, callback: callback}
}
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value
@@ -171,20 +176,21 @@ func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator {
removeRequestBody(&rCopy)
resp, err := bacb.sender.Do(&rCopy)
- if err == nil && resp.StatusCode == 401 {
- defer resp.Body.Close()
- if hasBearerChallenge(resp) {
- bc, err := newBearerChallenge(resp)
+ if err != nil {
+ return r, err
+ }
+ DrainResponseBody(resp)
+ if resp.StatusCode == 401 && hasBearerChallenge(resp.Header) {
+ bc, err := newBearerChallenge(resp.Header)
+ if err != nil {
+ return r, err
+ }
+ if bacb.callback != nil {
+ ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"])
if err != nil {
return r, err
}
- if bacb.callback != nil {
- ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"])
- if err != nil {
- return r, err
- }
- return Prepare(r, ba.WithAuthorization())
- }
+ return Prepare(r, ba.WithAuthorization())
}
}
}
@@ -194,8 +200,8 @@ func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator {
}
// returns true if the HTTP response contains a bearer challenge
-func hasBearerChallenge(resp *http.Response) bool {
- authHeader := resp.Header.Get(bearerChallengeHeader)
+func hasBearerChallenge(header http.Header) bool {
+ authHeader := header.Get(bearerChallengeHeader)
if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 {
return false
}
@@ -206,8 +212,8 @@ type bearerChallenge struct {
values map[string]string
}
-func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) {
- challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader))
+func newBearerChallenge(header http.Header) (bc bearerChallenge, err error) {
+ challenge := strings.TrimSpace(header.Get(bearerChallengeHeader))
trimmedChallenge := challenge[len(bearer)+1:]
// challenge is a set of key=value pairs that are comma delimited
@@ -285,3 +291,52 @@ func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator {
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
}
+
+// MultiTenantServicePrincipalTokenAuthorizer provides authentication across tenants.
+type MultiTenantServicePrincipalTokenAuthorizer interface {
+ WithAuthorization() PrepareDecorator
+}
+
+// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider
+func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer {
+ return &multiTenantSPTAuthorizer{tp: tp}
+}
+
+type multiTenantSPTAuthorizer struct {
+ tp adal.MultitenantOAuthTokenProvider
+}
+
+// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the
+// primary token along with the auxiliary authorization header using the auxiliary tokens.
+//
+// By default, the token will be automatically refreshed through the Refresher interface.
+func (mt multiTenantSPTAuthorizer) WithAuthorization() PrepareDecorator {
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err != nil {
+ return r, err
+ }
+ if refresher, ok := mt.tp.(adal.RefresherWithContext); ok {
+ err = refresher.EnsureFreshWithContext(r.Context())
+ if err != nil {
+ var resp *http.Response
+ if tokError, ok := err.(adal.TokenRefreshError); ok {
+ resp = tokError.Response()
+ }
+ return r, NewErrorWithError(err, "azure.multiTenantSPTAuthorizer", "WithAuthorization", resp,
+ "Failed to refresh one or more Tokens for request to %s", r.URL)
+ }
+ }
+ r, err = Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", mt.tp.PrimaryOAuthToken())))
+ if err != nil {
+ return r, err
+ }
+ auxTokens := mt.tp.AuxiliaryOAuthTokens()
+ for i := range auxTokens {
+ auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i])
+ }
+ return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, ", ")))
+ })
+ }
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go
new file mode 100644
index 000000000..66501493b
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go
@@ -0,0 +1,66 @@
+package autorest
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+)
+
+// SASTokenAuthorizer implements an authorization for SAS Token Authentication
+// this can be used for interaction with Blob Storage Endpoints
+type SASTokenAuthorizer struct {
+ sasToken string
+}
+
+// NewSASTokenAuthorizer creates a SASTokenAuthorizer using the given credentials
+func NewSASTokenAuthorizer(sasToken string) (*SASTokenAuthorizer, error) {
+ if strings.TrimSpace(sasToken) == "" {
+ return nil, fmt.Errorf("sasToken cannot be empty")
+ }
+
+ token := sasToken
+ if strings.HasPrefix(sasToken, "?") {
+ token = strings.TrimPrefix(sasToken, "?")
+ }
+
+ return &SASTokenAuthorizer{
+ sasToken: token,
+ }, nil
+}
+
+// WithAuthorization returns a PrepareDecorator that adds a shared access signature token to the
+// URI's query parameters. This can be used for the Blob, Queue, and File Services.
+//
+// See https://docs.microsoft.com/en-us/rest/api/storageservices/delegate-access-with-shared-access-signature
+func (sas *SASTokenAuthorizer) WithAuthorization() PrepareDecorator {
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err != nil {
+ return r, err
+ }
+
+ if r.URL.RawQuery == "" {
+ r.URL.RawQuery = sas.sasToken
+ } else if !strings.Contains(r.URL.RawQuery, sas.sasToken) {
+ r.URL.RawQuery = fmt.Sprintf("%s&%s", r.URL.RawQuery, sas.sasToken)
+ }
+
+ return Prepare(r)
+ })
+ }
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go
new file mode 100644
index 000000000..2af5030a1
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go
@@ -0,0 +1,307 @@
+package autorest
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "net/url"
+ "sort"
+ "strings"
+ "time"
+)
+
+// SharedKeyType defines the enumeration for the various shared key types.
+// See https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key for details on the shared key types.
+type SharedKeyType string
+
+const (
+ // SharedKey is used to authorize against blobs, files and queues services.
+ SharedKey SharedKeyType = "sharedKey"
+
+ // SharedKeyForTable is used to authorize against the table service.
+ SharedKeyForTable SharedKeyType = "sharedKeyTable"
+
+ // SharedKeyLite is used to authorize against blobs, files and queues services. It's provided for
+ // backwards compatibility with API versions before 2009-09-19. Prefer SharedKey instead.
+ SharedKeyLite SharedKeyType = "sharedKeyLite"
+
+ // SharedKeyLiteForTable is used to authorize against the table service. It's provided for
+ // backwards compatibility with older table API versions. Prefer SharedKeyForTable instead.
+ SharedKeyLiteForTable SharedKeyType = "sharedKeyLiteTable"
+)
+
+const (
+ headerAccept = "Accept"
+ headerAcceptCharset = "Accept-Charset"
+ headerContentEncoding = "Content-Encoding"
+ headerContentLength = "Content-Length"
+ headerContentMD5 = "Content-MD5"
+ headerContentLanguage = "Content-Language"
+ headerIfModifiedSince = "If-Modified-Since"
+ headerIfMatch = "If-Match"
+ headerIfNoneMatch = "If-None-Match"
+ headerIfUnmodifiedSince = "If-Unmodified-Since"
+ headerDate = "Date"
+ headerXMSDate = "X-Ms-Date"
+ headerXMSVersion = "x-ms-version"
+ headerRange = "Range"
+)
+
+const storageEmulatorAccountName = "devstoreaccount1"
+
+// SharedKeyAuthorizer implements an authorization for Shared Key
+// this can be used for interaction with Blob, File and Queue Storage Endpoints
+type SharedKeyAuthorizer struct {
+ accountName string
+ accountKey []byte
+ keyType SharedKeyType
+}
+
+// NewSharedKeyAuthorizer creates a SharedKeyAuthorizer using the provided credentials and shared key type.
+func NewSharedKeyAuthorizer(accountName, accountKey string, keyType SharedKeyType) (*SharedKeyAuthorizer, error) {
+ key, err := base64.StdEncoding.DecodeString(accountKey)
+ if err != nil {
+ return nil, fmt.Errorf("malformed storage account key: %v", err)
+ }
+ return &SharedKeyAuthorizer{
+ accountName: accountName,
+ accountKey: key,
+ keyType: keyType,
+ }, nil
+}
+
+// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
+// value is " " followed by the computed key.
+// This can be used for the Blob, Queue, and File Services
+//
+// from: https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key
+// You may use Shared Key authorization to authorize a request made against the
+// 2009-09-19 version and later of the Blob and Queue services,
+// and version 2014-02-14 and later of the File services.
+func (sk *SharedKeyAuthorizer) WithAuthorization() PrepareDecorator {
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err != nil {
+ return r, err
+ }
+
+ sk, err := buildSharedKey(sk.accountName, sk.accountKey, r, sk.keyType)
+ if err != nil {
+ return r, err
+ }
+ return Prepare(r, WithHeader(headerAuthorization, sk))
+ })
+ }
+}
+
+func buildSharedKey(accName string, accKey []byte, req *http.Request, keyType SharedKeyType) (string, error) {
+ canRes, err := buildCanonicalizedResource(accName, req.URL.String(), keyType)
+ if err != nil {
+ return "", err
+ }
+
+ if req.Header == nil {
+ req.Header = http.Header{}
+ }
+
+ // ensure date is set
+ if req.Header.Get(headerDate) == "" && req.Header.Get(headerXMSDate) == "" {
+ date := time.Now().UTC().Format(http.TimeFormat)
+ req.Header.Set(headerXMSDate, date)
+ }
+ canString, err := buildCanonicalizedString(req.Method, req.Header, canRes, keyType)
+ if err != nil {
+ return "", err
+ }
+ return createAuthorizationHeader(accName, accKey, canString, keyType), nil
+}
+
+func buildCanonicalizedResource(accountName, uri string, keyType SharedKeyType) (string, error) {
+ errMsg := "buildCanonicalizedResource error: %s"
+ u, err := url.Parse(uri)
+ if err != nil {
+ return "", fmt.Errorf(errMsg, err.Error())
+ }
+
+ cr := bytes.NewBufferString("")
+ if accountName != storageEmulatorAccountName {
+ cr.WriteString("/")
+ cr.WriteString(getCanonicalizedAccountName(accountName))
+ }
+
+ if len(u.Path) > 0 {
+ // Any portion of the CanonicalizedResource string that is derived from
+ // the resource's URI should be encoded exactly as it is in the URI.
+ // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
+ cr.WriteString(u.EscapedPath())
+ } else {
+ // a slash is required to indicate the root path
+ cr.WriteString("/")
+ }
+
+ params, err := url.ParseQuery(u.RawQuery)
+ if err != nil {
+ return "", fmt.Errorf(errMsg, err.Error())
+ }
+
+ // See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277
+ if keyType == SharedKey {
+ if len(params) > 0 {
+ cr.WriteString("\n")
+
+ keys := []string{}
+ for key := range params {
+ keys = append(keys, key)
+ }
+ sort.Strings(keys)
+
+ completeParams := []string{}
+ for _, key := range keys {
+ if len(params[key]) > 1 {
+ sort.Strings(params[key])
+ }
+
+ completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ",")))
+ }
+ cr.WriteString(strings.Join(completeParams, "\n"))
+ }
+ } else {
+ // search for "comp" parameter, if exists then add it to canonicalizedresource
+ if v, ok := params["comp"]; ok {
+ cr.WriteString("?comp=" + v[0])
+ }
+ }
+
+ return string(cr.Bytes()), nil
+}
+
+func getCanonicalizedAccountName(accountName string) string {
+ // since we may be trying to access a secondary storage account, we need to
+ // remove the -secondary part of the storage name
+ return strings.TrimSuffix(accountName, "-secondary")
+}
+
+func buildCanonicalizedString(verb string, headers http.Header, canonicalizedResource string, keyType SharedKeyType) (string, error) {
+ contentLength := headers.Get(headerContentLength)
+ if contentLength == "0" {
+ contentLength = ""
+ }
+ date := headers.Get(headerDate)
+ if v := headers.Get(headerXMSDate); v != "" {
+ if keyType == SharedKey || keyType == SharedKeyLite {
+ date = ""
+ } else {
+ date = v
+ }
+ }
+ var canString string
+ switch keyType {
+ case SharedKey:
+ canString = strings.Join([]string{
+ verb,
+ headers.Get(headerContentEncoding),
+ headers.Get(headerContentLanguage),
+ contentLength,
+ headers.Get(headerContentMD5),
+ headers.Get(headerContentType),
+ date,
+ headers.Get(headerIfModifiedSince),
+ headers.Get(headerIfMatch),
+ headers.Get(headerIfNoneMatch),
+ headers.Get(headerIfUnmodifiedSince),
+ headers.Get(headerRange),
+ buildCanonicalizedHeader(headers),
+ canonicalizedResource,
+ }, "\n")
+ case SharedKeyForTable:
+ canString = strings.Join([]string{
+ verb,
+ headers.Get(headerContentMD5),
+ headers.Get(headerContentType),
+ date,
+ canonicalizedResource,
+ }, "\n")
+ case SharedKeyLite:
+ canString = strings.Join([]string{
+ verb,
+ headers.Get(headerContentMD5),
+ headers.Get(headerContentType),
+ date,
+ buildCanonicalizedHeader(headers),
+ canonicalizedResource,
+ }, "\n")
+ case SharedKeyLiteForTable:
+ canString = strings.Join([]string{
+ date,
+ canonicalizedResource,
+ }, "\n")
+ default:
+ return "", fmt.Errorf("key type '%s' is not supported", keyType)
+ }
+ return canString, nil
+}
+
+func buildCanonicalizedHeader(headers http.Header) string {
+ cm := make(map[string]string)
+
+ for k := range headers {
+ headerName := strings.TrimSpace(strings.ToLower(k))
+ if strings.HasPrefix(headerName, "x-ms-") {
+ cm[headerName] = headers.Get(k)
+ }
+ }
+
+ if len(cm) == 0 {
+ return ""
+ }
+
+ keys := []string{}
+ for key := range cm {
+ keys = append(keys, key)
+ }
+
+ sort.Strings(keys)
+
+ ch := bytes.NewBufferString("")
+
+ for _, key := range keys {
+ ch.WriteString(key)
+ ch.WriteRune(':')
+ ch.WriteString(cm[key])
+ ch.WriteRune('\n')
+ }
+
+ return strings.TrimSuffix(string(ch.Bytes()), "\n")
+}
+
+func createAuthorizationHeader(accountName string, accountKey []byte, canonicalizedString string, keyType SharedKeyType) string {
+ h := hmac.New(sha256.New, accountKey)
+ h.Write([]byte(canonicalizedString))
+ signature := base64.StdEncoding.EncodeToString(h.Sum(nil))
+ var key string
+ switch keyType {
+ case SharedKey, SharedKeyForTable:
+ key = "SharedKey"
+ case SharedKeyLite, SharedKeyLiteForTable:
+ key = "SharedKeyLite"
+ }
+ return fmt.Sprintf("%s %s:%s", key, getCanonicalizedAccountName(accountName), signature)
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go
index 0041eacf7..5326f1fd3 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go
@@ -45,14 +45,7 @@ var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.Stat
// Future provides a mechanism to access the status and results of an asynchronous request.
// Since futures are stateful they should be passed by value to avoid race conditions.
type Future struct {
- req *http.Request // legacy
- pt pollingTracker
-}
-
-// NewFuture returns a new Future object initialized with the specified request.
-// Deprecated: Please use NewFutureFromResponse instead.
-func NewFuture(req *http.Request) Future {
- return Future{req: req}
+ pt pollingTracker
}
// NewFutureFromResponse returns a new Future object initialized
@@ -86,12 +79,6 @@ func (f Future) PollingMethod() PollingMethodType {
return f.pt.pollingMethod()
}
-// Done queries the service to see if the operation has completed.
-// Deprecated: Use DoneWithContext()
-func (f *Future) Done(sender autorest.Sender) (bool, error) {
- return f.DoneWithContext(context.Background(), sender)
-}
-
// DoneWithContext queries the service to see if the operation has completed.
func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) {
ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext")
@@ -104,20 +91,6 @@ func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (d
tracing.EndSpan(ctx, sc, err)
}()
- // support for legacy Future implementation
- if f.req != nil {
- resp, err := sender.Do(f.req)
- if err != nil {
- return false, err
- }
- pt, err := createPollingTracker(resp)
- if err != nil {
- return false, err
- }
- f.pt = pt
- f.req = nil
- }
- // end legacy
if f.pt == nil {
return false, autorest.NewError("Future", "Done", "future is not initialized")
}
@@ -168,15 +141,6 @@ func (f Future) GetPollingDelay() (time.Duration, bool) {
return d, true
}
-// WaitForCompletion will return when one of the following conditions is met: the long
-// running operation has completed, the provided context is cancelled, or the client's
-// polling duration has been exceeded. It will retry failed polling attempts based on
-// the retry value defined in the client up to the maximum retry attempts.
-// Deprecated: Please use WaitForCompletionRef() instead.
-func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) error {
- return f.WaitForCompletionRef(ctx, client)
-}
-
// WaitForCompletionRef will return when one of the following conditions is met: the long
// running operation has completed, the provided context is cancelled, or the client's
// polling duration has been exceeded. It will retry failed polling attempts based on
@@ -203,7 +167,13 @@ func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Clien
cancelCtx, cancel = context.WithTimeout(ctx, d)
defer cancel()
}
-
+ // if the initial response has a Retry-After, sleep for the specified amount of time before starting to poll
+ if delay, ok := f.GetPollingDelay(); ok {
+ if delayElapsed := autorest.DelayForBackoff(delay, 0, cancelCtx.Done()); !delayElapsed {
+ err = cancelCtx.Err()
+ return
+ }
+ }
done, err := f.DoneWithContext(ctx, client)
for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) {
if attempts >= client.RetryAttempts {
@@ -294,7 +264,17 @@ func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) {
if err != nil {
return nil, err
}
- return sender.Do(req)
+ resp, err := sender.Do(req)
+ if err == nil && resp.Body != nil {
+ // copy the body and close it so callers don't have to
+ defer resp.Body.Close()
+ b, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return resp, err
+ }
+ resp.Body = ioutil.NopCloser(bytes.NewReader(b))
+ }
+ return resp, err
}
type pollingTracker interface {
@@ -453,6 +433,11 @@ func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest
}
req = req.WithContext(ctx)
+ preparer := autorest.CreatePreparer(autorest.GetPrepareDecorators(ctx)...)
+ req, err = preparer.Prepare(req)
+ if err != nil {
+ return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed preparing HTTP request")
+ }
pt.resp, err = sender.Do(req)
if err != nil {
return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request")
@@ -919,43 +904,6 @@ func isValidURL(s string) bool {
return err == nil && u.IsAbs()
}
-// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure
-// long-running operation. It will delay between requests for the duration specified in the
-// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled via
-// the context associated with the http.Request.
-// Deprecated: Prefer using Futures to allow for non-blocking async operations.
-func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator {
- return func(s autorest.Sender) autorest.Sender {
- return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) {
- resp, err := s.Do(r)
- if err != nil {
- return resp, err
- }
- if !autorest.ResponseHasStatusCode(resp, pollingCodes[:]...) {
- return resp, nil
- }
- future, err := NewFutureFromResponse(resp)
- if err != nil {
- return resp, err
- }
- // retry until either the LRO completes or we receive an error
- var done bool
- for done, err = future.Done(s); !done && err == nil; done, err = future.Done(s) {
- // check for Retry-After delay, if not present use the specified polling delay
- if pd, ok := future.GetPollingDelay(); ok {
- delay = pd
- }
- // wait until the delay elapses or the context is cancelled
- if delayElapsed := autorest.DelayForBackoff(delay, 0, r.Context().Done()); !delayElapsed {
- return future.Response(),
- autorest.NewErrorWithError(r.Context().Err(), "azure", "DoPollForAsynchronous", future.Response(), "context has been cancelled")
- }
- }
- return future.Response(), err
- })
- }
-}
-
// PollingMethodType defines a type used for enumerating polling mechanisms.
type PollingMethodType string
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/LICENSE
new file mode 100644
index 000000000..b9d6a27ea
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Microsoft Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go
index 20855d4ab..596b9f577 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go
@@ -16,8 +16,6 @@ package auth
import (
"bytes"
- "crypto/rsa"
- "crypto/x509"
"encoding/binary"
"encoding/json"
"errors"
@@ -33,13 +31,13 @@ import (
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/azure/cli"
"github.com/dimchansky/utfbom"
- "golang.org/x/crypto/pkcs12"
)
// The possible keys in the Values map.
const (
SubscriptionID = "AZURE_SUBSCRIPTION_ID"
TenantID = "AZURE_TENANT_ID"
+ AuxiliaryTenantIDs = "AZURE_AUXILIARY_TENANT_IDS"
ClientID = "AZURE_CLIENT_ID"
ClientSecret = "AZURE_CLIENT_SECRET"
CertificatePath = "AZURE_CERTIFICATE_PATH"
@@ -96,6 +94,7 @@ func GetSettingsFromEnvironment() (s EnvironmentSettings, err error) {
}
s.setValue(SubscriptionID)
s.setValue(TenantID)
+ s.setValue(AuxiliaryTenantIDs)
s.setValue(ClientID)
s.setValue(ClientSecret)
s.setValue(CertificatePath)
@@ -145,6 +144,12 @@ func (settings EnvironmentSettings) GetClientCredentials() (ClientCredentialsCon
config := NewClientCredentialsConfig(clientID, secret, tenantID)
config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint
config.Resource = settings.Values[Resource]
+ if auxTenants, ok := settings.Values[AuxiliaryTenantIDs]; ok {
+ config.AuxTenants = strings.Split(auxTenants, ";")
+ for i := range config.AuxTenants {
+ config.AuxTenants[i] = strings.TrimSpace(config.AuxTenants[i])
+ }
+ }
return config, nil
}
@@ -458,7 +463,7 @@ func decode(b []byte) ([]byte, error) {
}
func (settings FileSettings) getResourceForToken(baseURI string) (string, error) {
- // Compare dafault base URI from the SDK to the endpoints from the public cloud
+ // Compare default base URI from the SDK to the endpoints from the public cloud
// Base URI and token resource are the same string. This func finds the authentication
// file field that matches the SDK base URI. The SDK defines the public cloud
// endpoint as its default base URI
@@ -546,6 +551,7 @@ type ClientCredentialsConfig struct {
ClientID string
ClientSecret string
TenantID string
+ AuxTenants []string
AADEndpoint string
Resource string
}
@@ -559,13 +565,29 @@ func (ccc ClientCredentialsConfig) ServicePrincipalToken() (*adal.ServicePrincip
return adal.NewServicePrincipalToken(*oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource)
}
+// MultiTenantServicePrincipalToken creates a MultiTenantServicePrincipalToken from client credentials.
+func (ccc ClientCredentialsConfig) MultiTenantServicePrincipalToken() (*adal.MultiTenantServicePrincipalToken, error) {
+ oauthConfig, err := adal.NewMultiTenantOAuthConfig(ccc.AADEndpoint, ccc.TenantID, ccc.AuxTenants, adal.OAuthOptions{})
+ if err != nil {
+ return nil, err
+ }
+ return adal.NewMultiTenantServicePrincipalToken(oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource)
+}
+
// Authorizer gets the authorizer from client credentials.
func (ccc ClientCredentialsConfig) Authorizer() (autorest.Authorizer, error) {
- spToken, err := ccc.ServicePrincipalToken()
- if err != nil {
- return nil, fmt.Errorf("failed to get oauth token from client credentials: %v", err)
+ if len(ccc.AuxTenants) == 0 {
+ spToken, err := ccc.ServicePrincipalToken()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get SPT from client credentials: %v", err)
+ }
+ return autorest.NewBearerAuthorizer(spToken), nil
}
- return autorest.NewBearerAuthorizer(spToken), nil
+ mtSPT, err := ccc.MultiTenantServicePrincipalToken()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get multitenant SPT from client credentials: %v", err)
+ }
+ return autorest.NewMultiTenantServicePrincipalTokenAuthorizer(mtSPT), nil
}
// ClientCertificateConfig provides the options to get a bearer authorizer from a client certificate.
@@ -588,7 +610,7 @@ func (ccc ClientCertificateConfig) ServicePrincipalToken() (*adal.ServicePrincip
if err != nil {
return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err)
}
- certificate, rsaPrivateKey, err := decodePkcs12(certData, ccc.CertificatePassword)
+ certificate, rsaPrivateKey, err := adal.DecodePfxCertificateData(certData, ccc.CertificatePassword)
if err != nil {
return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err)
}
@@ -640,20 +662,6 @@ func (dfc DeviceFlowConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken
return adal.NewServicePrincipalTokenFromManualToken(*oauthConfig, dfc.ClientID, dfc.Resource, *token)
}
-func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) {
- privateKey, certificate, err := pkcs12.Decode(pkcs, password)
- if err != nil {
- return nil, nil, err
- }
-
- rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey)
- if !isRsaKey {
- return nil, nil, fmt.Errorf("PKCS#12 certificate must contain an RSA private key")
- }
-
- return certificate, rsaPrivateKey, nil
-}
-
// UsernamePasswordConfig provides the options to get a bearer authorizer from a username and a password.
type UsernamePasswordConfig struct {
ClientID string
@@ -688,9 +696,9 @@ type MSIConfig struct {
ClientID string
}
-// Authorizer gets the authorizer from MSI.
-func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) {
- msiEndpoint, err := adal.GetMSIVMEndpoint()
+// ServicePrincipalToken creates a ServicePrincipalToken from MSI.
+func (mc MSIConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) {
+ msiEndpoint, err := adal.GetMSIEndpoint()
if err != nil {
return nil, err
}
@@ -708,5 +716,15 @@ func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) {
}
}
+ return spToken, nil
+}
+
+// Authorizer gets the authorizer from MSI.
+func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) {
+ spToken, err := mc.ServicePrincipalToken()
+ if err != nil {
+ return nil, err
+ }
+
return autorest.NewBearerAuthorizer(spToken), nil
}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.mod b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.mod
new file mode 100644
index 000000000..b92707a9c
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.mod
@@ -0,0 +1,11 @@
+module github.com/Azure/go-autorest/autorest/azure/auth
+
+go 1.12
+
+require (
+ github.com/Azure/go-autorest v14.2.0+incompatible
+ github.com/Azure/go-autorest/autorest v0.11.0
+ github.com/Azure/go-autorest/autorest/adal v0.9.2
+ github.com/Azure/go-autorest/autorest/azure/cli v0.4.0
+ github.com/dimchansky/utfbom v1.1.0
+)
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.sum b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.sum
new file mode 100644
index 000000000..27656df78
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.sum
@@ -0,0 +1,33 @@
+github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest/autorest v0.11.0 h1:tnO41Uo+/0sxTMFY/U7aKg2abek3JOnnXcuSuba74jI=
+github.com/Azure/go-autorest/autorest v0.11.0/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
+github.com/Azure/go-autorest/autorest/adal v0.9.0 h1:SigMbuFNuKgc1xcGhaeapbh+8fgsu+GxgDRFyg7f5lM=
+github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
+github.com/Azure/go-autorest/autorest/adal v0.9.2 h1:Aze/GQeAN1RRbGmnUJvUj+tFGBzFdIg3293/A9rbxC4=
+github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE=
+github.com/Azure/go-autorest/autorest/azure/cli v0.4.0 h1:Ml+UCrnlKD+cJmSzrZ/RDcDw86NjkRUpnFh7V5JUhzU=
+github.com/Azure/go-autorest/autorest/azure/cli v0.4.0/go.mod h1:JljT387FplPzBA31vUcvsetLKF3pec5bdAxjVU4kI2s=
+github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
+github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
+github.com/Azure/go-autorest/autorest/mocks v0.4.0 h1:z20OWOSG5aCye0HEkDp6TPmP17ZcfeMxPi6HnSALa8c=
+github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
+github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE=
+github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
+github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=
+github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go
new file mode 100644
index 000000000..38e4900ad
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go
@@ -0,0 +1,24 @@
+// +build modhack
+
+package auth
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file, and the github.com/Azure/go-autorest import, won't actually become part of
+// the resultant binary.
+
+// Necessary for safely adding multi-module repo.
+// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
+import _ "github.com/Azure/go-autorest"
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go
index 3a0a439ff..26be936b7 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go
@@ -17,6 +17,7 @@ package azure
// limitations under the License.
import (
+ "bytes"
"encoding/json"
"fmt"
"io/ioutil"
@@ -143,7 +144,7 @@ type RequestError struct {
autorest.DetailedError
// The error returned by the Azure service.
- ServiceError *ServiceError `json:"error"`
+ ServiceError *ServiceError `json:"error" xml:"Error"`
// The request id (from the x-ms-request-id-header) of the request.
RequestID string
@@ -285,26 +286,34 @@ func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator {
var e RequestError
defer resp.Body.Close()
+ encodedAs := autorest.EncodedAsJSON
+ if strings.Contains(resp.Header.Get("Content-Type"), "xml") {
+ encodedAs = autorest.EncodedAsXML
+ }
+
// Copy and replace the Body in case it does not contain an error object.
// This will leave the Body available to the caller.
- b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e)
+ b, decodeErr := autorest.CopyAndDecode(encodedAs, resp.Body, &e)
resp.Body = ioutil.NopCloser(&b)
if decodeErr != nil {
return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr)
}
if e.ServiceError == nil {
// Check if error is unwrapped ServiceError
- if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil {
+ decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes()))
+ if err := decoder.Decode(&e.ServiceError); err != nil {
return err
}
}
if e.ServiceError.Message == "" {
// if we're here it means the returned error wasn't OData v4 compliant.
- // try to unmarshal the body as raw JSON in hopes of getting something.
+ // try to unmarshal the body in hopes of getting something.
rawBody := map[string]interface{}{}
- if err := json.Unmarshal(b.Bytes(), &rawBody); err != nil {
+ decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes()))
+ if err := decoder.Decode(&rawBody); err != nil {
return err
}
+
e.ServiceError = &ServiceError{
Code: "Unknown",
Message: "Unknown service error",
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE
new file mode 100644
index 000000000..b9d6a27ea
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Microsoft Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod
new file mode 100644
index 000000000..087f737e9
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod
@@ -0,0 +1,11 @@
+module github.com/Azure/go-autorest/autorest/azure/cli
+
+go 1.12
+
+require (
+ github.com/Azure/go-autorest v14.2.0+incompatible
+ github.com/Azure/go-autorest/autorest/adal v0.9.0
+ github.com/Azure/go-autorest/autorest/date v0.3.0
+ github.com/dimchansky/utfbom v1.1.0
+ github.com/mitchellh/go-homedir v1.1.0
+)
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum
new file mode 100644
index 000000000..90d0dd239
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum
@@ -0,0 +1,23 @@
+github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest/autorest/adal v0.9.0 h1:SigMbuFNuKgc1xcGhaeapbh+8fgsu+GxgDRFyg7f5lM=
+github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
+github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
+github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
+github.com/Azure/go-autorest/autorest/mocks v0.4.0 h1:z20OWOSG5aCye0HEkDp6TPmP17ZcfeMxPi6HnSALa8c=
+github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
+github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=
+github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go
new file mode 100644
index 000000000..861ce2984
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go
@@ -0,0 +1,24 @@
+// +build modhack
+
+package cli
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file, and the github.com/Azure/go-autorest import, won't actually become part of
+// the resultant binary.
+
+// Necessary for safely adding multi-module repo.
+// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
+import _ "github.com/Azure/go-autorest"
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go
index a336b958d..f45c3a516 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go
@@ -51,9 +51,13 @@ type User struct {
const azureProfileJSON = "azureProfile.json"
+func configDir() string {
+ return os.Getenv("AZURE_CONFIG_DIR")
+}
+
// ProfilePath returns the path where the Azure Profile is stored from the Azure CLI
func ProfilePath() (string, error) {
- if cfgDir := os.Getenv("AZURE_CONFIG_DIR"); cfgDir != "" {
+ if cfgDir := configDir(); cfgDir != "" {
return filepath.Join(cfgDir, azureProfileJSON), nil
}
return homedir.Expand("~/.azure/" + azureProfileJSON)
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go
index 810075ba6..44ff446f6 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go
@@ -20,6 +20,7 @@ import (
"fmt"
"os"
"os/exec"
+ "path/filepath"
"regexp"
"runtime"
"strconv"
@@ -44,6 +45,8 @@ type Token struct {
UserID string `json:"userId"`
}
+const accessTokensJSON = "accessTokens.json"
+
// ToADALToken converts an Azure CLI `Token`` to an `adal.Token``
func (t Token) ToADALToken() (converted adal.Token, err error) {
tokenExpirationDate, err := ParseExpirationDate(t.ExpiresOn)
@@ -68,17 +71,19 @@ func (t Token) ToADALToken() (converted adal.Token, err error) {
// AccessTokensPath returns the path where access tokens are stored from the Azure CLI
// TODO(#199): add unit test.
func AccessTokensPath() (string, error) {
- // Azure-CLI allows user to customize the path of access tokens thorugh environment variable.
- var accessTokenPath = os.Getenv("AZURE_ACCESS_TOKEN_FILE")
- var err error
+ // Azure-CLI allows user to customize the path of access tokens through environment variable.
+ if accessTokenPath := os.Getenv("AZURE_ACCESS_TOKEN_FILE"); accessTokenPath != "" {
+ return accessTokenPath, nil
+ }
+
+ // Azure-CLI allows user to customize the path to Azure config directory through environment variable.
+ if cfgDir := configDir(); cfgDir != "" {
+ return filepath.Join(cfgDir, accessTokensJSON), nil
+ }
// Fallback logic to default path on non-cloud-shell environment.
// TODO(#200): remove the dependency on hard-coding path.
- if accessTokenPath == "" {
- accessTokenPath, err = homedir.Expand("~/.azure/accessTokens.json")
- }
-
- return accessTokenPath, err
+ return homedir.Expand("~/.azure/" + accessTokensJSON)
}
// ParseExpirationDate parses either a Azure CLI or CloudShell date into a time object
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
index 85d3202af..efbb57617 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
@@ -22,9 +22,14 @@ import (
"strings"
)
-// EnvironmentFilepathName captures the name of the environment variable containing the path to the file
-// to be used while populating the Azure Environment.
-const EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH"
+const (
+ // EnvironmentFilepathName captures the name of the environment variable containing the path to the file
+ // to be used while populating the Azure Environment.
+ EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH"
+
+ // NotAvailable is used for endpoints and resource IDs that are not available for a given cloud.
+ NotAvailable = "N/A"
+)
var environments = map[string]Environment{
"AZURECHINACLOUD": ChinaCloud,
@@ -33,29 +38,43 @@ var environments = map[string]Environment{
"AZUREUSGOVERNMENTCLOUD": USGovernmentCloud,
}
+// ResourceIdentifier contains a set of Azure resource IDs.
+type ResourceIdentifier struct {
+ Graph string `json:"graph"`
+ KeyVault string `json:"keyVault"`
+ Datalake string `json:"datalake"`
+ Batch string `json:"batch"`
+ OperationalInsights string `json:"operationalInsights"`
+ Storage string `json:"storage"`
+ Synapse string `json:"synapse"`
+}
+
// Environment represents a set of endpoints for each of Azure's Clouds.
type Environment struct {
- Name string `json:"name"`
- ManagementPortalURL string `json:"managementPortalURL"`
- PublishSettingsURL string `json:"publishSettingsURL"`
- ServiceManagementEndpoint string `json:"serviceManagementEndpoint"`
- ResourceManagerEndpoint string `json:"resourceManagerEndpoint"`
- ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"`
- GalleryEndpoint string `json:"galleryEndpoint"`
- KeyVaultEndpoint string `json:"keyVaultEndpoint"`
- GraphEndpoint string `json:"graphEndpoint"`
- ServiceBusEndpoint string `json:"serviceBusEndpoint"`
- BatchManagementEndpoint string `json:"batchManagementEndpoint"`
- StorageEndpointSuffix string `json:"storageEndpointSuffix"`
- SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"`
- TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"`
- KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"`
- ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"`
- ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"`
- ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"`
- ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"`
- CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"`
- TokenAudience string `json:"tokenAudience"`
+ Name string `json:"name"`
+ ManagementPortalURL string `json:"managementPortalURL"`
+ PublishSettingsURL string `json:"publishSettingsURL"`
+ ServiceManagementEndpoint string `json:"serviceManagementEndpoint"`
+ ResourceManagerEndpoint string `json:"resourceManagerEndpoint"`
+ ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"`
+ GalleryEndpoint string `json:"galleryEndpoint"`
+ KeyVaultEndpoint string `json:"keyVaultEndpoint"`
+ GraphEndpoint string `json:"graphEndpoint"`
+ ServiceBusEndpoint string `json:"serviceBusEndpoint"`
+ BatchManagementEndpoint string `json:"batchManagementEndpoint"`
+ StorageEndpointSuffix string `json:"storageEndpointSuffix"`
+ SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"`
+ TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"`
+ KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"`
+ ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"`
+ ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"`
+ ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"`
+ ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"`
+ CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"`
+ TokenAudience string `json:"tokenAudience"`
+ APIManagementHostNameSuffix string `json:"apiManagementHostNameSuffix"`
+ SynapseEndpointSuffix string `json:"synapseEndpointSuffix"`
+ ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"`
}
var (
@@ -82,6 +101,17 @@ var (
ContainerRegistryDNSSuffix: "azurecr.io",
CosmosDBDNSSuffix: "documents.azure.com",
TokenAudience: "https://management.azure.com/",
+ APIManagementHostNameSuffix: "azure-api.net",
+ SynapseEndpointSuffix: "dev.azuresynapse.net",
+ ResourceIdentifiers: ResourceIdentifier{
+ Graph: "https://graph.windows.net/",
+ KeyVault: "https://vault.azure.net",
+ Datalake: "https://datalake.azure.net/",
+ Batch: "https://batch.core.windows.net/",
+ OperationalInsights: "https://api.loganalytics.io",
+ Storage: "https://storage.azure.com/",
+ Synapse: "https://dev.azuresynapse.net",
+ },
}
// USGovernmentCloud is the cloud environment for the US Government
@@ -103,10 +133,21 @@ var (
KeyVaultDNSSuffix: "vault.usgovcloudapi.net",
ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net",
ServiceManagementVMDNSSuffix: "usgovcloudapp.net",
- ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us",
+ ResourceManagerVMDNSSuffix: "cloudapp.usgovcloudapi.net",
ContainerRegistryDNSSuffix: "azurecr.us",
CosmosDBDNSSuffix: "documents.azure.us",
TokenAudience: "https://management.usgovcloudapi.net/",
+ APIManagementHostNameSuffix: "azure-api.us",
+ SynapseEndpointSuffix: NotAvailable,
+ ResourceIdentifiers: ResourceIdentifier{
+ Graph: "https://graph.windows.net/",
+ KeyVault: "https://vault.usgovcloudapi.net",
+ Datalake: NotAvailable,
+ Batch: "https://batch.core.usgovcloudapi.net/",
+ OperationalInsights: "https://api.loganalytics.us",
+ Storage: "https://storage.azure.com/",
+ Synapse: NotAvailable,
+ },
}
// ChinaCloud is the cloud environment operated in China
@@ -128,10 +169,21 @@ var (
KeyVaultDNSSuffix: "vault.azure.cn",
ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn",
ServiceManagementVMDNSSuffix: "chinacloudapp.cn",
- ResourceManagerVMDNSSuffix: "cloudapp.azure.cn",
+ ResourceManagerVMDNSSuffix: "cloudapp.chinacloudapi.cn",
ContainerRegistryDNSSuffix: "azurecr.cn",
CosmosDBDNSSuffix: "documents.azure.cn",
TokenAudience: "https://management.chinacloudapi.cn/",
+ APIManagementHostNameSuffix: "azure-api.cn",
+ SynapseEndpointSuffix: "dev.azuresynapse.azure.cn",
+ ResourceIdentifiers: ResourceIdentifier{
+ Graph: "https://graph.chinacloudapi.cn/",
+ KeyVault: "https://vault.azure.cn",
+ Datalake: NotAvailable,
+ Batch: "https://batch.chinacloudapi.cn/",
+ OperationalInsights: NotAvailable,
+ Storage: "https://storage.azure.com/",
+ Synapse: "https://dev.azuresynapse.net",
+ },
}
// GermanCloud is the cloud environment operated in Germany
@@ -154,9 +206,20 @@ var (
ServiceBusEndpointSuffix: "servicebus.cloudapi.de",
ServiceManagementVMDNSSuffix: "azurecloudapp.de",
ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de",
- // ContainerRegistryDNSSuffix: "", ACR not present yet in the German Cloud
- CosmosDBDNSSuffix: "documents.microsoftazure.de",
- TokenAudience: "https://management.microsoftazure.de/",
+ ContainerRegistryDNSSuffix: NotAvailable,
+ CosmosDBDNSSuffix: "documents.microsoftazure.de",
+ TokenAudience: "https://management.microsoftazure.de/",
+ APIManagementHostNameSuffix: NotAvailable,
+ SynapseEndpointSuffix: NotAvailable,
+ ResourceIdentifiers: ResourceIdentifier{
+ Graph: "https://graph.cloudapi.de/",
+ KeyVault: "https://vault.microsoftazure.de",
+ Datalake: NotAvailable,
+ Batch: "https://batch.cloudapi.de/",
+ OperationalInsights: NotAvailable,
+ Storage: "https://storage.azure.com/",
+ Synapse: NotAvailable,
+ },
}
)
@@ -194,3 +257,8 @@ func EnvironmentFromFile(location string) (unmarshaled Environment, err error) {
return
}
+
+// SetEnvironment updates the environment map with the specified values.
+func SetEnvironment(name string, env Environment) {
+ environments[strings.ToUpper(name)] = env
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go
index 86ce9f2b5..c6d39f686 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go
@@ -47,11 +47,15 @@ func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator {
if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration {
return resp, err
}
+
var re RequestError
- err = autorest.Respond(
- resp,
- autorest.ByUnmarshallingJSON(&re),
- )
+ if strings.Contains(r.Header.Get("Content-Type"), "xml") {
+ // XML errors (e.g. Storage Data Plane) only return the inner object
+ err = autorest.Respond(resp, autorest.ByUnmarshallingXML(&re.ServiceError))
+ } else {
+ err = autorest.Respond(resp, autorest.ByUnmarshallingJSON(&re))
+ }
+
if err != nil {
return resp, err
}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go
index 9520001fc..e04f9fd4e 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/client.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/client.go
@@ -22,12 +22,10 @@ import (
"io/ioutil"
"log"
"net/http"
- "net/http/cookiejar"
"strings"
"time"
"github.com/Azure/go-autorest/logger"
- "github.com/Azure/go-autorest/tracing"
)
const (
@@ -73,6 +71,22 @@ type Response struct {
*http.Response `json:"-"`
}
+// IsHTTPStatus returns true if the returned HTTP status code matches the provided status code.
+// If there was no response (i.e. the underlying http.Response is nil) the return value is false.
+func (r Response) IsHTTPStatus(statusCode int) bool {
+ if r.Response == nil {
+ return false
+ }
+ return r.Response.StatusCode == statusCode
+}
+
+// HasHTTPStatus returns true if the returned HTTP status code matches one of the provided status codes.
+// If there was no response (i.e. the underlying http.Response is nil) or not status codes are provided
+// the return value is false.
+func (r Response) HasHTTPStatus(statusCodes ...int) bool {
+ return ResponseHasStatusCode(r.Response, statusCodes...)
+}
+
// LoggingInspector implements request and response inspectors that log the full request and
// response to a supplied log.
type LoggingInspector struct {
@@ -165,11 +179,34 @@ type Client struct {
// Set to true to skip attempted registration of resource providers (false by default).
SkipResourceProviderRegistration bool
+
+ // SendDecorators can be used to override the default chain of SendDecorators.
+ // This can be used to specify things like a custom retry SendDecorator.
+ // Set this to an empty slice to use no SendDecorators.
+ SendDecorators []SendDecorator
}
// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed
// string.
func NewClientWithUserAgent(ua string) Client {
+ return newClient(ua, tls.RenegotiateNever)
+}
+
+// ClientOptions contains various Client configuration options.
+type ClientOptions struct {
+ // UserAgent is an optional user-agent string to append to the default user agent.
+ UserAgent string
+
+ // Renegotiation is an optional setting to control client-side TLS renegotiation.
+ Renegotiation tls.RenegotiationSupport
+}
+
+// NewClientWithOptions returns an instance of a Client with the specified values.
+func NewClientWithOptions(options ClientOptions) Client {
+ return newClient(options.UserAgent, options.Renegotiation)
+}
+
+func newClient(ua string, renegotiation tls.RenegotiationSupport) Client {
c := Client{
PollingDelay: DefaultPollingDelay,
PollingDuration: DefaultPollingDuration,
@@ -177,7 +214,7 @@ func NewClientWithUserAgent(ua string) Client {
RetryDuration: DefaultRetryDuration,
UserAgent: UserAgent(),
}
- c.Sender = c.sender()
+ c.Sender = c.sender(renegotiation)
c.AddToUserAgent(ua)
return c
}
@@ -221,34 +258,17 @@ func (c Client) Do(r *http.Request) (*http.Response, error) {
return true, v
},
})
- resp, err := SendWithSender(c.sender(), r)
+ resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r)
logger.Instance.WriteResponse(resp, logger.Filter{})
Respond(resp, c.ByInspecting())
return resp, err
}
// sender returns the Sender to which to send requests.
-func (c Client) sender() Sender {
+func (c Client) sender(renengotiation tls.RenegotiationSupport) Sender {
if c.Sender == nil {
- // Use behaviour compatible with DefaultTransport, but require TLS minimum version.
- var defaultTransport = http.DefaultTransport.(*http.Transport)
-
- tracing.Transport.Base = &http.Transport{
- Proxy: defaultTransport.Proxy,
- DialContext: defaultTransport.DialContext,
- MaxIdleConns: defaultTransport.MaxIdleConns,
- IdleConnTimeout: defaultTransport.IdleConnTimeout,
- TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
- ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
- TLSClientConfig: &tls.Config{
- MinVersion: tls.VersionTLS12,
- },
- }
-
- j, _ := cookiejar.New(nil)
- return &http.Client{Jar: j, Transport: tracing.Transport}
+ return sender(renengotiation)
}
-
return c.Sender
}
@@ -283,3 +303,21 @@ func (c Client) ByInspecting() RespondDecorator {
}
return c.ResponseInspector
}
+
+// Send sends the provided http.Request using the client's Sender or the default sender.
+// It returns the http.Response and possible error. It also accepts a, possibly empty,
+// default set of SendDecorators used when sending the request.
+// SendDecorators have the following precedence:
+// 1. In a request's context via WithSendDecorators()
+// 2. Specified on the client in SendDecorators
+// 3. The default values specified in this method
+func (c Client) Send(req *http.Request, decorators ...SendDecorator) (*http.Response, error) {
+ if c.SendDecorators != nil {
+ decorators = c.SendDecorators
+ }
+ inCtx := req.Context().Value(ctxSendDecorators{})
+ if sd, ok := inCtx.([]SendDecorator); ok {
+ decorators = sd
+ }
+ return SendWithSender(c, req, decorators...)
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE
new file mode 100644
index 000000000..b9d6a27ea
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Microsoft Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.mod b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod
new file mode 100644
index 000000000..f88ecc402
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod
@@ -0,0 +1,5 @@
+module github.com/Azure/go-autorest/autorest/date
+
+go 1.12
+
+require github.com/Azure/go-autorest v14.2.0+incompatible
diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.sum b/vendor/github.com/Azure/go-autorest/autorest/date/go.sum
new file mode 100644
index 000000000..1fc56a962
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/date/go.sum
@@ -0,0 +1,2 @@
+github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go
new file mode 100644
index 000000000..4e0543207
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go
@@ -0,0 +1,24 @@
+// +build modhack
+
+package date
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file, and the github.com/Azure/go-autorest import, won't actually become part of
+// the resultant binary.
+
+// Necessary for safely adding multi-module repo.
+// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
+import _ "github.com/Azure/go-autorest"
diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.mod b/vendor/github.com/Azure/go-autorest/autorest/go.mod
new file mode 100644
index 000000000..b66c78da2
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/go.mod
@@ -0,0 +1,12 @@
+module github.com/Azure/go-autorest/autorest
+
+go 1.12
+
+require (
+ github.com/Azure/go-autorest v14.2.0+incompatible
+ github.com/Azure/go-autorest/autorest/adal v0.9.0
+ github.com/Azure/go-autorest/autorest/mocks v0.4.0
+ github.com/Azure/go-autorest/logger v0.2.0
+ github.com/Azure/go-autorest/tracing v0.6.0
+ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
+)
diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.sum b/vendor/github.com/Azure/go-autorest/autorest/go.sum
new file mode 100644
index 000000000..96d2ad0fc
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/go.sum
@@ -0,0 +1,23 @@
+github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest/autorest/adal v0.9.0 h1:SigMbuFNuKgc1xcGhaeapbh+8fgsu+GxgDRFyg7f5lM=
+github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
+github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
+github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
+github.com/Azure/go-autorest/autorest/mocks v0.4.0 h1:z20OWOSG5aCye0HEkDp6TPmP17ZcfeMxPi6HnSALa8c=
+github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE=
+github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
+github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go
new file mode 100644
index 000000000..da65e1041
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go
@@ -0,0 +1,24 @@
+// +build modhack
+
+package autorest
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file, and the github.com/Azure/go-autorest import, won't actually become part of
+// the resultant binary.
+
+// Necessary for safely adding multi-module repo.
+// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
+import _ "github.com/Azure/go-autorest"
diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go
index 6d67bd733..6e8ed64eb 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/preparer.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go
@@ -16,7 +16,9 @@ package autorest
import (
"bytes"
+ "context"
"encoding/json"
+ "encoding/xml"
"fmt"
"io"
"io/ioutil"
@@ -31,11 +33,33 @@ const (
mimeTypeOctetStream = "application/octet-stream"
mimeTypeFormPost = "application/x-www-form-urlencoded"
- headerAuthorization = "Authorization"
- headerContentType = "Content-Type"
- headerUserAgent = "User-Agent"
+ headerAuthorization = "Authorization"
+ headerAuxAuthorization = "x-ms-authorization-auxiliary"
+ headerContentType = "Content-Type"
+ headerUserAgent = "User-Agent"
)
+// used as a key type in context.WithValue()
+type ctxPrepareDecorators struct{}
+
+// WithPrepareDecorators adds the specified PrepareDecorators to the provided context.
+// If no PrepareDecorators are provided the context is unchanged.
+func WithPrepareDecorators(ctx context.Context, prepareDecorator []PrepareDecorator) context.Context {
+ if len(prepareDecorator) == 0 {
+ return ctx
+ }
+ return context.WithValue(ctx, ctxPrepareDecorators{}, prepareDecorator)
+}
+
+// GetPrepareDecorators returns the PrepareDecorators in the provided context or the provided default PrepareDecorators.
+func GetPrepareDecorators(ctx context.Context, defaultPrepareDecorators ...PrepareDecorator) []PrepareDecorator {
+ inCtx := ctx.Value(ctxPrepareDecorators{})
+ if pd, ok := inCtx.([]PrepareDecorator); ok {
+ return pd
+ }
+ return defaultPrepareDecorators
+}
+
// Preparer is the interface that wraps the Prepare method.
//
// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations
@@ -190,6 +214,9 @@ func AsGet() PrepareDecorator { return WithMethod("GET") }
// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD.
func AsHead() PrepareDecorator { return WithMethod("HEAD") }
+// AsMerge returns a PrepareDecorator that sets the HTTP method to MERGE.
+func AsMerge() PrepareDecorator { return WithMethod("MERGE") }
+
// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS.
func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") }
@@ -225,6 +252,25 @@ func WithBaseURL(baseURL string) PrepareDecorator {
}
}
+// WithBytes returns a PrepareDecorator that takes a list of bytes
+// which passes the bytes directly to the body
+func WithBytes(input *[]byte) PrepareDecorator {
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err == nil {
+ if input == nil {
+ return r, fmt.Errorf("Input Bytes was nil")
+ }
+
+ r.ContentLength = int64(len(*input))
+ r.Body = ioutil.NopCloser(bytes.NewReader(*input))
+ }
+ return r, err
+ })
+ }
+}
+
// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the
// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map.
func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator {
@@ -377,6 +423,28 @@ func WithJSON(v interface{}) PrepareDecorator {
}
}
+// WithXML returns a PrepareDecorator that encodes the data passed as XML into the body of the
+// request and sets the Content-Length header.
+func WithXML(v interface{}) PrepareDecorator {
+ return func(p Preparer) Preparer {
+ return PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err == nil {
+ b, err := xml.Marshal(v)
+ if err == nil {
+ // we have to tack on an XML header
+ withHeader := xml.Header + string(b)
+ bytesWithHeader := []byte(withHeader)
+
+ r.ContentLength = int64(len(bytesWithHeader))
+ r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader))
+ }
+ }
+ return r, err
+ })
+ }
+}
+
// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path
// is absolute (that is, it begins with a "/"), it replaces the existing path.
func WithPath(path string) PrepareDecorator {
@@ -455,7 +523,7 @@ func parseURL(u *url.URL, path string) (*url.URL, error) {
// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters
// given in the supplied map (i.e., key=value).
func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator {
- parameters := ensureValueStrings(queryParameters)
+ parameters := MapToValues(queryParameters)
return func(p Preparer) Preparer {
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
r, err := p.Prepare(r)
@@ -463,14 +531,16 @@ func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorato
if r.URL == nil {
return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL")
}
-
v := r.URL.Query()
for key, value := range parameters {
- d, err := url.QueryUnescape(value)
- if err != nil {
- return r, err
+ for i := range value {
+ d, err := url.QueryUnescape(value[i])
+ if err != nil {
+ return r, err
+ }
+ value[i] = d
}
- v.Add(key, d)
+ v[key] = value
}
r.URL.RawQuery = v.Encode()
}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go
index a908a0adb..349e1963a 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/responder.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go
@@ -153,6 +153,25 @@ func ByClosingIfError() RespondDecorator {
}
}
+// ByUnmarshallingBytes returns a RespondDecorator that copies the Bytes returned in the
+// response Body into the value pointed to by v.
+func ByUnmarshallingBytes(v *[]byte) RespondDecorator {
+ return func(r Responder) Responder {
+ return ResponderFunc(func(resp *http.Response) error {
+ err := r.Respond(resp)
+ if err == nil {
+ bytes, errInner := ioutil.ReadAll(resp.Body)
+ if errInner != nil {
+ err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner)
+ } else {
+ *v = bytes
+ }
+ }
+ return err
+ })
+ }
+}
+
// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the
// response Body into the value pointed to by v.
func ByUnmarshallingJSON(v interface{}) RespondDecorator {
diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go
index 6665d7c00..704f3e55e 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/sender.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go
@@ -15,16 +15,40 @@ package autorest
// limitations under the License.
import (
+ "context"
+ "crypto/tls"
"fmt"
"log"
"math"
"net/http"
+ "net/http/cookiejar"
"strconv"
"time"
"github.com/Azure/go-autorest/tracing"
)
+// used as a key type in context.WithValue()
+type ctxSendDecorators struct{}
+
+// WithSendDecorators adds the specified SendDecorators to the provided context.
+// If no SendDecorators are provided the context is unchanged.
+func WithSendDecorators(ctx context.Context, sendDecorator []SendDecorator) context.Context {
+ if len(sendDecorator) == 0 {
+ return ctx
+ }
+ return context.WithValue(ctx, ctxSendDecorators{}, sendDecorator)
+}
+
+// GetSendDecorators returns the SendDecorators in the provided context or the provided default SendDecorators.
+func GetSendDecorators(ctx context.Context, defaultSendDecorators ...SendDecorator) []SendDecorator {
+ inCtx := ctx.Value(ctxSendDecorators{})
+ if sd, ok := inCtx.([]SendDecorator); ok {
+ return sd
+ }
+ return defaultSendDecorators
+}
+
// Sender is the interface that wraps the Do method to send HTTP requests.
//
// The standard http.Client conforms to this interface.
@@ -47,7 +71,7 @@ type SendDecorator func(Sender) Sender
// CreateSender creates, decorates, and returns, as a Sender, the default http.Client.
func CreateSender(decorators ...SendDecorator) Sender {
- return DecorateSender(&http.Client{}, decorators...)
+ return DecorateSender(sender(tls.RenegotiateNever), decorators...)
}
// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to
@@ -70,7 +94,7 @@ func DecorateSender(s Sender, decorators ...SendDecorator) Sender {
//
// Send will not poll or retry requests.
func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) {
- return SendWithSender(&http.Client{Transport: tracing.Transport}, r, decorators...)
+ return SendWithSender(sender(tls.RenegotiateNever), r, decorators...)
}
// SendWithSender sends the passed http.Request, through the provided Sender, returning the
@@ -82,6 +106,29 @@ func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*ht
return DecorateSender(s, decorators...).Do(r)
}
+func sender(renengotiation tls.RenegotiationSupport) Sender {
+ // Use behaviour compatible with DefaultTransport, but require TLS minimum version.
+ defaultTransport := http.DefaultTransport.(*http.Transport)
+ transport := &http.Transport{
+ Proxy: defaultTransport.Proxy,
+ DialContext: defaultTransport.DialContext,
+ MaxIdleConns: defaultTransport.MaxIdleConns,
+ IdleConnTimeout: defaultTransport.IdleConnTimeout,
+ TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
+ ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
+ TLSClientConfig: &tls.Config{
+ MinVersion: tls.VersionTLS12,
+ Renegotiation: renengotiation,
+ },
+ }
+ var roundTripper http.RoundTripper = transport
+ if tracing.IsEnabled() {
+ roundTripper = tracing.NewTransport(transport)
+ }
+ j, _ := cookiejar.New(nil)
+ return &http.Client{Jar: j, Transport: roundTripper}
+}
+
// AfterDelay returns a SendDecorator that delays for the passed time.Duration before
// invoking the Sender. The delay may be terminated by closing the optional channel on the
// http.Request. If canceled, no further Senders are invoked.
@@ -196,6 +243,7 @@ func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator {
if err != nil {
return resp, err
}
+ DrainResponseBody(resp)
resp, err = s.Do(rr.Request())
if err == nil {
return resp, err
@@ -209,55 +257,90 @@ func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator {
}
}
+// Count429AsRetry indicates that a 429 response should be included as a retry attempt.
+var Count429AsRetry = true
+
+// Max429Delay is the maximum duration to wait between retries on a 429 if no Retry-After header was received.
+var Max429Delay time.Duration
+
// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified
// number of attempts, exponentially backing off between requests using the supplied backoff
-// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on
-// the http.Request.
+// time.Duration (which may be zero). Retrying may be canceled by cancelling the context on the http.Request.
+// NOTE: Code http.StatusTooManyRequests (429) will *not* be counted against the number of attempts.
func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator {
return func(s Sender) Sender {
- return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
- rr := NewRetriableRequest(r)
- // Increment to add the first call (attempts denotes number of retries)
- for attempt := 0; attempt < attempts+1; {
- err = rr.Prepare()
- if err != nil {
- return resp, err
- }
- resp, err = s.Do(rr.Request())
- // if the error isn't temporary don't bother retrying
- if err != nil && !IsTemporaryNetworkError(err) {
- return nil, err
- }
- // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication
- // resp and err will both have a value, so in this case we don't want to retry as it will never succeed.
- if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) {
- return resp, err
- }
- delayed := DelayWithRetryAfter(resp, r.Context().Done())
- if !delayed && !DelayForBackoff(backoff, attempt, r.Context().Done()) {
- return resp, r.Context().Err()
- }
- // don't count a 429 against the number of attempts
- // so that we continue to retry until it succeeds
- if resp == nil || resp.StatusCode != http.StatusTooManyRequests {
- attempt++
- }
- }
- return resp, err
+ return SenderFunc(func(r *http.Request) (*http.Response, error) {
+ return doRetryForStatusCodesImpl(s, r, Count429AsRetry, attempts, backoff, 0, codes...)
})
}
}
-// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header in
-// responses with status code 429
+// DoRetryForStatusCodesWithCap returns a SendDecorator that retries for specified statusCodes for up to the
+// specified number of attempts, exponentially backing off between requests using the supplied backoff
+// time.Duration (which may be zero). To cap the maximum possible delay between iterations specify a value greater
+// than zero for cap. Retrying may be canceled by cancelling the context on the http.Request.
+func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, codes ...int) SendDecorator {
+ return func(s Sender) Sender {
+ return SenderFunc(func(r *http.Request) (*http.Response, error) {
+ return doRetryForStatusCodesImpl(s, r, Count429AsRetry, attempts, backoff, cap, codes...)
+ })
+ }
+}
+
+func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempts int, backoff, cap time.Duration, codes ...int) (resp *http.Response, err error) {
+ rr := NewRetriableRequest(r)
+ // Increment to add the first call (attempts denotes number of retries)
+ for attempt, delayCount := 0, 0; attempt < attempts+1; {
+ err = rr.Prepare()
+ if err != nil {
+ return
+ }
+ DrainResponseBody(resp)
+ resp, err = s.Do(rr.Request())
+ // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication
+ // resp and err will both have a value, so in this case we don't want to retry as it will never succeed.
+ if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) {
+ return resp, err
+ }
+ delayed := DelayWithRetryAfter(resp, r.Context().Done())
+ // if this was a 429 set the delay cap as specified.
+ // applicable only in the absence of a retry-after header.
+ if resp != nil && resp.StatusCode == http.StatusTooManyRequests {
+ cap = Max429Delay
+ }
+ if !delayed && !DelayForBackoffWithCap(backoff, cap, delayCount, r.Context().Done()) {
+ return resp, r.Context().Err()
+ }
+ // when count429 == false don't count a 429 against the number
+ // of attempts so that we continue to retry until it succeeds
+ if count429 || (resp == nil || resp.StatusCode != http.StatusTooManyRequests) {
+ attempt++
+ }
+ // delay count is tracked separately from attempts to
+ // ensure that 429 participates in exponential back-off
+ delayCount++
+ }
+ return resp, err
+}
+
+// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header.
+// The value of Retry-After can be either the number of seconds or a date in RFC1123 format.
+// The function returns true after successfully waiting for the specified duration. If there is
+// no Retry-After header or the wait is cancelled the return value is false.
func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool {
if resp == nil {
return false
}
- retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After"))
- if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 {
+ var dur time.Duration
+ ra := resp.Header.Get("Retry-After")
+ if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 {
+ dur = time.Duration(retryAfter) * time.Second
+ } else if t, err := time.Parse(time.RFC1123, ra); err == nil {
+ dur = t.Sub(time.Now())
+ }
+ if dur > 0 {
select {
- case <-time.After(time.Duration(retryAfter) * time.Second):
+ case <-time.After(dur):
return true
case <-cancel:
return false
@@ -280,6 +363,7 @@ func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator {
if err != nil {
return resp, err
}
+ DrainResponseBody(resp)
resp, err = s.Do(rr.Request())
if err == nil {
return resp, err
@@ -317,8 +401,22 @@ func WithLogging(logger *log.Logger) SendDecorator {
// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt
// count.
func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool {
+ return DelayForBackoffWithCap(backoff, 0, attempt, cancel)
+}
+
+// DelayForBackoffWithCap invokes time.After for the supplied backoff duration raised to the power of
+// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set
+// to zero for no delay. To cap the maximum possible delay specify a value greater than zero for cap.
+// The delay may be canceled by closing the passed channel. If terminated early, returns false.
+// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt
+// count.
+func DelayForBackoffWithCap(backoff, cap time.Duration, attempt int, cancel <-chan struct{}) bool {
+ d := time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second
+ if cap > 0 && d > cap {
+ d = cap
+ }
select {
- case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second):
+ case <-time.After(d):
return true
case <-cancel:
return false
diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE
new file mode 100644
index 000000000..b9d6a27ea
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Microsoft Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/convert.go b/vendor/github.com/Azure/go-autorest/autorest/to/convert.go
index fdda2ce1a..86694bd25 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/to/convert.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/to/convert.go
@@ -145,3 +145,8 @@ func Float64(i *float64) float64 {
func Float64Ptr(i float64) *float64 {
return &i
}
+
+// ByteSlicePtr returns a pointer to the passed byte slice.
+func ByteSlicePtr(b []byte) *[]byte {
+ return &b
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/go.mod b/vendor/github.com/Azure/go-autorest/autorest/to/go.mod
new file mode 100644
index 000000000..8fd041e2b
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/to/go.mod
@@ -0,0 +1,5 @@
+module github.com/Azure/go-autorest/autorest/to
+
+go 1.12
+
+require github.com/Azure/go-autorest v14.2.0+incompatible
diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/go.sum b/vendor/github.com/Azure/go-autorest/autorest/to/go.sum
new file mode 100644
index 000000000..1fc56a962
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/to/go.sum
@@ -0,0 +1,2 @@
+github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go
new file mode 100644
index 000000000..b7310f6b8
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go
@@ -0,0 +1,24 @@
+// +build modhack
+
+package to
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file, and the github.com/Azure/go-autorest import, won't actually become part of
+// the resultant binary.
+
+// Necessary for safely adding multi-module repo.
+// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
+import _ "github.com/Azure/go-autorest"
diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go
index 08cf11c11..67baab2ce 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/utility.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go
@@ -20,6 +20,7 @@ import (
"encoding/xml"
"fmt"
"io"
+ "io/ioutil"
"net"
"net/http"
"net/url"
@@ -140,18 +141,18 @@ func MapToValues(m map[string]interface{}) url.Values {
return v
}
-// AsStringSlice method converts interface{} to []string. This expects a
-//that the parameter passed to be a slice or array of a type that has the underlying
-//type a string.
+// AsStringSlice method converts interface{} to []string.
+// s must be of type slice or array or an error is returned.
+// Each element of s will be converted to its string representation.
func AsStringSlice(s interface{}) ([]string, error) {
v := reflect.ValueOf(s)
if v.Kind() != reflect.Slice && v.Kind() != reflect.Array {
- return nil, NewError("autorest", "AsStringSlice", "the value's type is not an array.")
+ return nil, NewError("autorest", "AsStringSlice", "the value's type is not a slice or array.")
}
stringSlice := make([]string, 0, v.Len())
for i := 0; i < v.Len(); i++ {
- stringSlice = append(stringSlice, v.Index(i).String())
+ stringSlice = append(stringSlice, fmt.Sprintf("%v", v.Index(i)))
}
return stringSlice, nil
}
@@ -226,3 +227,13 @@ func IsTemporaryNetworkError(err error) bool {
}
return false
}
+
+// DrainResponseBody reads the response body then closes it.
+func DrainResponseBody(resp *http.Response) error {
+ if resp != nil && resp.Body != nil {
+ _, err := io.Copy(ioutil.Discard, resp.Body)
+ resp.Body.Close()
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE
new file mode 100644
index 000000000..b9d6a27ea
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Microsoft Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod b/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod
new file mode 100644
index 000000000..a0a69e9ae
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod
@@ -0,0 +1,8 @@
+module github.com/Azure/go-autorest/autorest/validation
+
+go 1.12
+
+require (
+ github.com/Azure/go-autorest v14.2.0+incompatible
+ github.com/stretchr/testify v1.3.0
+)
diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum b/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum
new file mode 100644
index 000000000..6c1119aab
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum
@@ -0,0 +1,9 @@
+github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go
new file mode 100644
index 000000000..cf1436291
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go
@@ -0,0 +1,24 @@
+// +build modhack
+
+package validation
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file, and the github.com/Azure/go-autorest import, won't actually become part of
+// the resultant binary.
+
+// Necessary for safely adding multi-module repo.
+// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
+import _ "github.com/Azure/go-autorest"
diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go
index ae987f8fa..65899b69b 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go
@@ -398,11 +398,3 @@ func toInt64(v interface{}) (int64, bool) {
}
return 0, false
}
-
-// NewErrorWithValidationError appends package type and method name in
-// validation error.
-//
-// Deprecated: Please use validation.NewError() instead.
-func NewErrorWithValidationError(err error, packageType, method string) error {
- return NewError(packageType, method, err.Error())
-}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go
index 773fb9612..713e23581 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/version.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/version.go
@@ -19,7 +19,7 @@ import (
"runtime"
)
-const number = "v11.7.1"
+const number = "v14.2.1"
var (
userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",
diff --git a/vendor/github.com/Azure/go-autorest/azure-pipelines.yml b/vendor/github.com/Azure/go-autorest/azure-pipelines.yml
new file mode 100644
index 000000000..6fb8404fd
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/azure-pipelines.yml
@@ -0,0 +1,105 @@
+variables:
+ GOPATH: '$(system.defaultWorkingDirectory)/work'
+ sdkPath: '$(GOPATH)/src/github.com/$(build.repository.name)'
+
+jobs:
+ - job: 'goautorest'
+ displayName: 'Run go-autorest CI Checks'
+
+ strategy:
+ matrix:
+ Linux_Go113:
+ vm.image: 'ubuntu-18.04'
+ go.version: '1.13'
+ Linux_Go114:
+ vm.image: 'ubuntu-18.04'
+ go.version: '1.14'
+
+ pool:
+ vmImage: '$(vm.image)'
+
+ steps:
+ - task: GoTool@0
+ inputs:
+ version: '$(go.version)'
+ displayName: "Select Go Version"
+
+ - script: |
+ set -e
+ mkdir -p '$(GOPATH)/bin'
+ mkdir -p '$(sdkPath)'
+ shopt -s extglob
+ mv !(work) '$(sdkPath)'
+ echo '##vso[task.prependpath]$(GOPATH)/bin'
+ displayName: 'Create Go Workspace'
+
+ - script: |
+ set -e
+ curl -sSL https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
+ dep ensure -v
+ go install ./vendor/golang.org/x/lint/golint
+ go get github.com/jstemmer/go-junit-report
+ go get github.com/axw/gocov/gocov
+ go get github.com/AlekSi/gocov-xml
+ go get -u github.com/matm/gocov-html
+ workingDirectory: '$(sdkPath)'
+ displayName: 'Install Dependencies'
+
+ - script: |
+ go vet ./autorest/...
+ go vet ./logger/...
+ go vet ./tracing/...
+ workingDirectory: '$(sdkPath)'
+ displayName: 'Vet'
+
+ - script: |
+ go build -v ./autorest/...
+ go build -v ./logger/...
+ go build -v ./tracing/...
+ workingDirectory: '$(sdkPath)'
+ displayName: 'Build'
+
+ - script: |
+ set -e
+ go test -race -v -coverprofile=coverage.txt -covermode atomic ./autorest/... ./logger/... ./tracing/... 2>&1 | go-junit-report > report.xml
+ gocov convert coverage.txt > coverage.json
+ gocov-xml < coverage.json > coverage.xml
+ gocov-html < coverage.json > coverage.html
+ workingDirectory: '$(sdkPath)'
+ displayName: 'Run Tests'
+
+ - script: grep -L -r --include *.go --exclude-dir vendor -P "Copyright (\d{4}|\(c\)) Microsoft" ./ | tee >&2
+ workingDirectory: '$(sdkPath)'
+ displayName: 'Copyright Header Check'
+ failOnStderr: true
+ condition: succeededOrFailed()
+
+ - script: |
+ gofmt -s -l -w ./autorest/. >&2
+ gofmt -s -l -w ./logger/. >&2
+ gofmt -s -l -w ./tracing/. >&2
+ workingDirectory: '$(sdkPath)'
+ displayName: 'Format Check'
+ failOnStderr: true
+ condition: succeededOrFailed()
+
+ - script: |
+ golint ./autorest/... >&2
+ golint ./logger/... >&2
+ golint ./tracing/... >&2
+ workingDirectory: '$(sdkPath)'
+ displayName: 'Linter Check'
+ failOnStderr: true
+ condition: succeededOrFailed()
+
+ - task: PublishTestResults@2
+ inputs:
+ testRunner: JUnit
+ testResultsFiles: $(sdkPath)/report.xml
+ failTaskOnFailedTests: true
+
+ - task: PublishCodeCoverageResults@1
+ inputs:
+ codeCoverageTool: Cobertura
+ summaryFileLocation: $(sdkPath)/coverage.xml
+ additionalCodeCoverageFiles: $(sdkPath)/coverage.html
diff --git a/vendor/github.com/Azure/go-autorest/doc.go b/vendor/github.com/Azure/go-autorest/doc.go
new file mode 100644
index 000000000..99ae6ca98
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/doc.go
@@ -0,0 +1,18 @@
+/*
+Package go-autorest provides an HTTP request client for use with Autorest-generated API client packages.
+*/
+package go_autorest
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
diff --git a/vendor/github.com/Azure/go-autorest/logger/LICENSE b/vendor/github.com/Azure/go-autorest/logger/LICENSE
new file mode 100644
index 000000000..b9d6a27ea
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/logger/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Microsoft Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/Azure/go-autorest/logger/go.mod b/vendor/github.com/Azure/go-autorest/logger/go.mod
new file mode 100644
index 000000000..bedeaee03
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/logger/go.mod
@@ -0,0 +1,5 @@
+module github.com/Azure/go-autorest/logger
+
+go 1.12
+
+require github.com/Azure/go-autorest v14.2.0+incompatible
diff --git a/vendor/github.com/Azure/go-autorest/logger/go.sum b/vendor/github.com/Azure/go-autorest/logger/go.sum
new file mode 100644
index 000000000..1fc56a962
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/logger/go.sum
@@ -0,0 +1,2 @@
+github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
diff --git a/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go
new file mode 100644
index 000000000..0aa27680d
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go
@@ -0,0 +1,24 @@
+// +build modhack
+
+package logger
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file, and the github.com/Azure/go-autorest import, won't actually become part of
+// the resultant binary.
+
+// Necessary for safely adding multi-module repo.
+// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
+import _ "github.com/Azure/go-autorest"
diff --git a/vendor/github.com/Azure/go-autorest/tracing/LICENSE b/vendor/github.com/Azure/go-autorest/tracing/LICENSE
new file mode 100644
index 000000000..b9d6a27ea
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/tracing/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Microsoft Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/Azure/go-autorest/tracing/go.mod b/vendor/github.com/Azure/go-autorest/tracing/go.mod
new file mode 100644
index 000000000..a2cdec78c
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/tracing/go.mod
@@ -0,0 +1,5 @@
+module github.com/Azure/go-autorest/tracing
+
+go 1.12
+
+require github.com/Azure/go-autorest v14.2.0+incompatible
diff --git a/vendor/github.com/Azure/go-autorest/tracing/go.sum b/vendor/github.com/Azure/go-autorest/tracing/go.sum
new file mode 100644
index 000000000..1fc56a962
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/tracing/go.sum
@@ -0,0 +1,2 @@
+github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
diff --git a/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go
new file mode 100644
index 000000000..e163975cd
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go
@@ -0,0 +1,24 @@
+// +build modhack
+
+package tracing
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file, and the github.com/Azure/go-autorest import, won't actually become part of
+// the resultant binary.
+
+// Necessary for safely adding multi-module repo.
+// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
+import _ "github.com/Azure/go-autorest"
diff --git a/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/vendor/github.com/Azure/go-autorest/tracing/tracing.go
index cd61cb18b..0e7a6e962 100644
--- a/vendor/github.com/Azure/go-autorest/tracing/tracing.go
+++ b/vendor/github.com/Azure/go-autorest/tracing/tracing.go
@@ -16,175 +16,52 @@ package tracing
import (
"context"
- "fmt"
"net/http"
- "os"
-
- "contrib.go.opencensus.io/exporter/ocagent"
- "go.opencensus.io/plugin/ochttp"
- "go.opencensus.io/plugin/ochttp/propagation/tracecontext"
- "go.opencensus.io/stats/view"
- "go.opencensus.io/trace"
)
+// Tracer represents an HTTP tracing facility.
+type Tracer interface {
+ NewTransport(base *http.Transport) http.RoundTripper
+ StartSpan(ctx context.Context, name string) context.Context
+ EndSpan(ctx context.Context, httpStatusCode int, err error)
+}
+
var (
- // Transport is the default tracing RoundTripper. The custom options setter will control
- // if traces are being emitted or not.
- Transport = &ochttp.Transport{
- Propagation: &tracecontext.HTTPFormat{},
- GetStartOptions: getStartOptions,
- }
-
- // enabled is the flag for marking if tracing is enabled.
- enabled = false
-
- // Sampler is the tracing sampler. If tracing is disabled it will never sample. Otherwise
- // it will be using the parent sampler or the default.
- sampler = trace.NeverSample()
-
- // Views for metric instrumentation.
- views = map[string]*view.View{}
-
- // the trace exporter
- traceExporter trace.Exporter
+ tracer Tracer
)
-func init() {
- enableFromEnv()
+// Register will register the provided Tracer. Pass nil to unregister a Tracer.
+func Register(t Tracer) {
+ tracer = t
}
-func enableFromEnv() {
- _, ok := os.LookupEnv("AZURE_SDK_TRACING_ENABLED")
- _, legacyOk := os.LookupEnv("AZURE_SDK_TRACING_ENABELD")
- if ok || legacyOk {
- agentEndpoint, ok := os.LookupEnv("OCAGENT_TRACE_EXPORTER_ENDPOINT")
-
- if ok {
- EnableWithAIForwarding(agentEndpoint)
- } else {
- Enable()
- }
- }
-}
-
-// IsEnabled returns true if monitoring is enabled for the sdk.
+// IsEnabled returns true if a Tracer has been registered.
func IsEnabled() bool {
- return enabled
+ return tracer != nil
}
-// Enable will start instrumentation for metrics and traces.
-func Enable() error {
- enabled = true
- sampler = nil
-
- err := initStats()
- return err
+// NewTransport creates a new instrumenting http.RoundTripper for the
+// registered Tracer. If no Tracer has been registered it returns nil.
+func NewTransport(base *http.Transport) http.RoundTripper {
+ if tracer != nil {
+ return tracer.NewTransport(base)
+ }
+ return nil
}
-// Disable will disable instrumentation for metrics and traces.
-func Disable() {
- disableStats()
- sampler = trace.NeverSample()
- if traceExporter != nil {
- trace.UnregisterExporter(traceExporter)
- }
- enabled = false
-}
-
-// EnableWithAIForwarding will start instrumentation and will connect to app insights forwarder
-// exporter making the metrics and traces available in app insights.
-func EnableWithAIForwarding(agentEndpoint string) (err error) {
- err = Enable()
- if err != nil {
- return err
- }
-
- traceExporter, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithAddress(agentEndpoint))
- if err != nil {
- return err
- }
- trace.RegisterExporter(traceExporter)
- return
-}
-
-// getStartOptions is the custom options setter for the ochttp package.
-func getStartOptions(*http.Request) trace.StartOptions {
- return trace.StartOptions{
- Sampler: sampler,
- }
-}
-
-// initStats registers the views for the http metrics
-func initStats() (err error) {
- clientViews := []*view.View{
- ochttp.ClientCompletedCount,
- ochttp.ClientRoundtripLatencyDistribution,
- ochttp.ClientReceivedBytesDistribution,
- ochttp.ClientSentBytesDistribution,
- }
- for _, cv := range clientViews {
- vn := fmt.Sprintf("Azure/go-autorest/tracing-%s", cv.Name)
- views[vn] = cv.WithName(vn)
- err = view.Register(views[vn])
- if err != nil {
- return err
- }
- }
- return
-}
-
-// disableStats will unregister the previously registered metrics
-func disableStats() {
- for _, v := range views {
- view.Unregister(v)
- }
-}
-
-// StartSpan starts a trace span
+// StartSpan starts a trace span with the specified name, associating it with the
+// provided context. Has no effect if a Tracer has not been registered.
func StartSpan(ctx context.Context, name string) context.Context {
- ctx, _ = trace.StartSpan(ctx, name, trace.WithSampler(sampler))
+ if tracer != nil {
+ return tracer.StartSpan(ctx, name)
+ }
return ctx
}
-// EndSpan ends a previously started span stored in the context
+// EndSpan ends a previously started span stored in the context.
+// Has no effect if a Tracer has not been registered.
func EndSpan(ctx context.Context, httpStatusCode int, err error) {
- span := trace.FromContext(ctx)
-
- if span == nil {
- return
- }
-
- if err != nil {
- span.SetStatus(trace.Status{Message: err.Error(), Code: toTraceStatusCode(httpStatusCode)})
- }
- span.End()
-}
-
-// toTraceStatusCode converts HTTP Codes to OpenCensus codes as defined
-// at https://github.com/census-instrumentation/opencensus-specs/blob/master/trace/HTTP.md#status
-func toTraceStatusCode(httpStatusCode int) int32 {
- switch {
- case http.StatusOK <= httpStatusCode && httpStatusCode < http.StatusBadRequest:
- return trace.StatusCodeOK
- case httpStatusCode == http.StatusBadRequest:
- return trace.StatusCodeInvalidArgument
- case httpStatusCode == http.StatusUnauthorized: // 401 is actually unauthenticated.
- return trace.StatusCodeUnauthenticated
- case httpStatusCode == http.StatusForbidden:
- return trace.StatusCodePermissionDenied
- case httpStatusCode == http.StatusNotFound:
- return trace.StatusCodeNotFound
- case httpStatusCode == http.StatusTooManyRequests:
- return trace.StatusCodeResourceExhausted
- case httpStatusCode == 499:
- return trace.StatusCodeCancelled
- case httpStatusCode == http.StatusNotImplemented:
- return trace.StatusCodeUnimplemented
- case httpStatusCode == http.StatusServiceUnavailable:
- return trace.StatusCodeUnavailable
- case httpStatusCode == http.StatusGatewayTimeout:
- return trace.StatusCodeDeadlineExceeded
- default:
- return trace.StatusCodeUnknown
+ if tracer != nil {
+ tracer.EndSpan(ctx, httpStatusCode, err)
}
}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS b/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS
deleted file mode 100644
index e068e731e..000000000
--- a/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS
+++ /dev/null
@@ -1 +0,0 @@
-Google Inc.
\ No newline at end of file
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go
deleted file mode 100644
index a6f0febe2..000000000
--- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go
+++ /dev/null
@@ -1,361 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: opencensus/proto/agent/common/v1/common.proto
-
-package v1
-
-import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- timestamp "github.com/golang/protobuf/ptypes/timestamp"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-type LibraryInfo_Language int32
-
-const (
- LibraryInfo_LANGUAGE_UNSPECIFIED LibraryInfo_Language = 0
- LibraryInfo_CPP LibraryInfo_Language = 1
- LibraryInfo_C_SHARP LibraryInfo_Language = 2
- LibraryInfo_ERLANG LibraryInfo_Language = 3
- LibraryInfo_GO_LANG LibraryInfo_Language = 4
- LibraryInfo_JAVA LibraryInfo_Language = 5
- LibraryInfo_NODE_JS LibraryInfo_Language = 6
- LibraryInfo_PHP LibraryInfo_Language = 7
- LibraryInfo_PYTHON LibraryInfo_Language = 8
- LibraryInfo_RUBY LibraryInfo_Language = 9
- LibraryInfo_WEB_JS LibraryInfo_Language = 10
-)
-
-var LibraryInfo_Language_name = map[int32]string{
- 0: "LANGUAGE_UNSPECIFIED",
- 1: "CPP",
- 2: "C_SHARP",
- 3: "ERLANG",
- 4: "GO_LANG",
- 5: "JAVA",
- 6: "NODE_JS",
- 7: "PHP",
- 8: "PYTHON",
- 9: "RUBY",
- 10: "WEB_JS",
-}
-
-var LibraryInfo_Language_value = map[string]int32{
- "LANGUAGE_UNSPECIFIED": 0,
- "CPP": 1,
- "C_SHARP": 2,
- "ERLANG": 3,
- "GO_LANG": 4,
- "JAVA": 5,
- "NODE_JS": 6,
- "PHP": 7,
- "PYTHON": 8,
- "RUBY": 9,
- "WEB_JS": 10,
-}
-
-func (x LibraryInfo_Language) String() string {
- return proto.EnumName(LibraryInfo_Language_name, int32(x))
-}
-
-func (LibraryInfo_Language) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_126c72ed8a252c84, []int{2, 0}
-}
-
-// Identifier metadata of the Node that produces the span or tracing data.
-// Note, this is not the metadata about the Node or service that is described by associated spans.
-// In the future we plan to extend the identifier proto definition to support
-// additional information (e.g cloud id, etc.)
-type Node struct {
- // Identifier that uniquely identifies a process within a VM/container.
- Identifier *ProcessIdentifier `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"`
- // Information on the OpenCensus Library that initiates the stream.
- LibraryInfo *LibraryInfo `protobuf:"bytes,2,opt,name=library_info,json=libraryInfo,proto3" json:"library_info,omitempty"`
- // Additional information on service.
- ServiceInfo *ServiceInfo `protobuf:"bytes,3,opt,name=service_info,json=serviceInfo,proto3" json:"service_info,omitempty"`
- // Additional attributes.
- Attributes map[string]string `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Node) Reset() { *m = Node{} }
-func (m *Node) String() string { return proto.CompactTextString(m) }
-func (*Node) ProtoMessage() {}
-func (*Node) Descriptor() ([]byte, []int) {
- return fileDescriptor_126c72ed8a252c84, []int{0}
-}
-
-func (m *Node) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Node.Unmarshal(m, b)
-}
-func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Node.Marshal(b, m, deterministic)
-}
-func (m *Node) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Node.Merge(m, src)
-}
-func (m *Node) XXX_Size() int {
- return xxx_messageInfo_Node.Size(m)
-}
-func (m *Node) XXX_DiscardUnknown() {
- xxx_messageInfo_Node.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Node proto.InternalMessageInfo
-
-func (m *Node) GetIdentifier() *ProcessIdentifier {
- if m != nil {
- return m.Identifier
- }
- return nil
-}
-
-func (m *Node) GetLibraryInfo() *LibraryInfo {
- if m != nil {
- return m.LibraryInfo
- }
- return nil
-}
-
-func (m *Node) GetServiceInfo() *ServiceInfo {
- if m != nil {
- return m.ServiceInfo
- }
- return nil
-}
-
-func (m *Node) GetAttributes() map[string]string {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-// Identifier that uniquely identifies a process within a VM/container.
-type ProcessIdentifier struct {
- // The host name. Usually refers to the machine/container name.
- // For example: os.Hostname() in Go, socket.gethostname() in Python.
- HostName string `protobuf:"bytes,1,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"`
- // Process id.
- Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
- // Start time of this ProcessIdentifier. Represented in epoch time.
- StartTimestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ProcessIdentifier) Reset() { *m = ProcessIdentifier{} }
-func (m *ProcessIdentifier) String() string { return proto.CompactTextString(m) }
-func (*ProcessIdentifier) ProtoMessage() {}
-func (*ProcessIdentifier) Descriptor() ([]byte, []int) {
- return fileDescriptor_126c72ed8a252c84, []int{1}
-}
-
-func (m *ProcessIdentifier) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ProcessIdentifier.Unmarshal(m, b)
-}
-func (m *ProcessIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ProcessIdentifier.Marshal(b, m, deterministic)
-}
-func (m *ProcessIdentifier) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ProcessIdentifier.Merge(m, src)
-}
-func (m *ProcessIdentifier) XXX_Size() int {
- return xxx_messageInfo_ProcessIdentifier.Size(m)
-}
-func (m *ProcessIdentifier) XXX_DiscardUnknown() {
- xxx_messageInfo_ProcessIdentifier.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ProcessIdentifier proto.InternalMessageInfo
-
-func (m *ProcessIdentifier) GetHostName() string {
- if m != nil {
- return m.HostName
- }
- return ""
-}
-
-func (m *ProcessIdentifier) GetPid() uint32 {
- if m != nil {
- return m.Pid
- }
- return 0
-}
-
-func (m *ProcessIdentifier) GetStartTimestamp() *timestamp.Timestamp {
- if m != nil {
- return m.StartTimestamp
- }
- return nil
-}
-
-// Information on OpenCensus Library.
-type LibraryInfo struct {
- // Language of OpenCensus Library.
- Language LibraryInfo_Language `protobuf:"varint,1,opt,name=language,proto3,enum=opencensus.proto.agent.common.v1.LibraryInfo_Language" json:"language,omitempty"`
- // Version of Agent exporter of Library.
- ExporterVersion string `protobuf:"bytes,2,opt,name=exporter_version,json=exporterVersion,proto3" json:"exporter_version,omitempty"`
- // Version of OpenCensus Library.
- CoreLibraryVersion string `protobuf:"bytes,3,opt,name=core_library_version,json=coreLibraryVersion,proto3" json:"core_library_version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LibraryInfo) Reset() { *m = LibraryInfo{} }
-func (m *LibraryInfo) String() string { return proto.CompactTextString(m) }
-func (*LibraryInfo) ProtoMessage() {}
-func (*LibraryInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_126c72ed8a252c84, []int{2}
-}
-
-func (m *LibraryInfo) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LibraryInfo.Unmarshal(m, b)
-}
-func (m *LibraryInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LibraryInfo.Marshal(b, m, deterministic)
-}
-func (m *LibraryInfo) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LibraryInfo.Merge(m, src)
-}
-func (m *LibraryInfo) XXX_Size() int {
- return xxx_messageInfo_LibraryInfo.Size(m)
-}
-func (m *LibraryInfo) XXX_DiscardUnknown() {
- xxx_messageInfo_LibraryInfo.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LibraryInfo proto.InternalMessageInfo
-
-func (m *LibraryInfo) GetLanguage() LibraryInfo_Language {
- if m != nil {
- return m.Language
- }
- return LibraryInfo_LANGUAGE_UNSPECIFIED
-}
-
-func (m *LibraryInfo) GetExporterVersion() string {
- if m != nil {
- return m.ExporterVersion
- }
- return ""
-}
-
-func (m *LibraryInfo) GetCoreLibraryVersion() string {
- if m != nil {
- return m.CoreLibraryVersion
- }
- return ""
-}
-
-// Additional service information.
-type ServiceInfo struct {
- // Name of the service.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ServiceInfo) Reset() { *m = ServiceInfo{} }
-func (m *ServiceInfo) String() string { return proto.CompactTextString(m) }
-func (*ServiceInfo) ProtoMessage() {}
-func (*ServiceInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_126c72ed8a252c84, []int{3}
-}
-
-func (m *ServiceInfo) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ServiceInfo.Unmarshal(m, b)
-}
-func (m *ServiceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ServiceInfo.Marshal(b, m, deterministic)
-}
-func (m *ServiceInfo) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceInfo.Merge(m, src)
-}
-func (m *ServiceInfo) XXX_Size() int {
- return xxx_messageInfo_ServiceInfo.Size(m)
-}
-func (m *ServiceInfo) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceInfo.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceInfo proto.InternalMessageInfo
-
-func (m *ServiceInfo) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func init() {
- proto.RegisterEnum("opencensus.proto.agent.common.v1.LibraryInfo_Language", LibraryInfo_Language_name, LibraryInfo_Language_value)
- proto.RegisterType((*Node)(nil), "opencensus.proto.agent.common.v1.Node")
- proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.agent.common.v1.Node.AttributesEntry")
- proto.RegisterType((*ProcessIdentifier)(nil), "opencensus.proto.agent.common.v1.ProcessIdentifier")
- proto.RegisterType((*LibraryInfo)(nil), "opencensus.proto.agent.common.v1.LibraryInfo")
- proto.RegisterType((*ServiceInfo)(nil), "opencensus.proto.agent.common.v1.ServiceInfo")
-}
-
-func init() {
- proto.RegisterFile("opencensus/proto/agent/common/v1/common.proto", fileDescriptor_126c72ed8a252c84)
-}
-
-var fileDescriptor_126c72ed8a252c84 = []byte{
- // 618 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0x4f, 0x6e, 0xda, 0x4e,
- 0x14, 0xc7, 0x7f, 0xc6, 0x24, 0x81, 0xe7, 0x5f, 0x13, 0x77, 0x94, 0x05, 0x4a, 0x17, 0xa5, 0x74,
- 0x93, 0x2e, 0xb0, 0x9b, 0x44, 0xaa, 0xaa, 0x4a, 0x5d, 0x18, 0xe2, 0x26, 0x44, 0x11, 0x58, 0x26,
- 0xa1, 0x4a, 0x37, 0x96, 0x21, 0x83, 0x33, 0x2a, 0x9e, 0x41, 0xe3, 0x31, 0x2a, 0x27, 0xe8, 0x09,
- 0xda, 0x03, 0xf4, 0x50, 0x3d, 0x44, 0x4f, 0x51, 0xcd, 0x8c, 0x01, 0xab, 0x59, 0x90, 0xdd, 0xfb,
- 0xf3, 0xfd, 0x7e, 0x9e, 0xf5, 0xe6, 0xc9, 0xd0, 0x66, 0x73, 0x4c, 0x27, 0x98, 0x66, 0x79, 0xe6,
- 0xce, 0x39, 0x13, 0xcc, 0x8d, 0x13, 0x4c, 0x85, 0x3b, 0x61, 0x69, 0xca, 0xa8, 0xbb, 0x38, 0x29,
- 0x22, 0x47, 0x35, 0x51, 0x73, 0x23, 0xd7, 0x15, 0x47, 0xc9, 0x9d, 0x42, 0xb4, 0x38, 0x39, 0x7a,
- 0x99, 0x30, 0x96, 0xcc, 0xb0, 0x86, 0x8d, 0xf3, 0xa9, 0x2b, 0x48, 0x8a, 0x33, 0x11, 0xa7, 0x73,
- 0x6d, 0x68, 0xfd, 0x34, 0xa1, 0xda, 0x67, 0xf7, 0x18, 0x0d, 0x01, 0xc8, 0x3d, 0xa6, 0x82, 0x4c,
- 0x09, 0xe6, 0x0d, 0xa3, 0x69, 0x1c, 0x5b, 0xa7, 0x67, 0xce, 0xb6, 0x01, 0x4e, 0xc0, 0xd9, 0x04,
- 0x67, 0x59, 0x6f, 0x6d, 0x0d, 0x4b, 0x18, 0x14, 0xc0, 0xff, 0x33, 0x32, 0xe6, 0x31, 0x5f, 0x46,
- 0x84, 0x4e, 0x59, 0xa3, 0xa2, 0xb0, 0xed, 0xed, 0xd8, 0x6b, 0xed, 0xea, 0xd1, 0x29, 0x0b, 0xad,
- 0xd9, 0x26, 0x91, 0xc4, 0x0c, 0xf3, 0x05, 0x99, 0x60, 0x4d, 0x34, 0x9f, 0x4a, 0x1c, 0x6a, 0x97,
- 0x26, 0x66, 0x9b, 0x04, 0x8d, 0x00, 0x62, 0x21, 0x38, 0x19, 0xe7, 0x02, 0x67, 0x8d, 0x6a, 0xd3,
- 0x3c, 0xb6, 0x4e, 0xdf, 0x6d, 0xe7, 0xc9, 0xa5, 0x39, 0xde, 0xda, 0xe8, 0x53, 0xc1, 0x97, 0x61,
- 0x89, 0x74, 0xf4, 0x11, 0x0e, 0xfe, 0x69, 0x23, 0x1b, 0xcc, 0xaf, 0x78, 0xa9, 0x96, 0x5b, 0x0f,
- 0x65, 0x88, 0x0e, 0x61, 0x67, 0x11, 0xcf, 0x72, 0xac, 0x36, 0x53, 0x0f, 0x75, 0xf2, 0xa1, 0xf2,
- 0xde, 0x68, 0x7d, 0x37, 0xe0, 0xf9, 0xa3, 0xe5, 0xa2, 0x17, 0x50, 0x7f, 0x60, 0x99, 0x88, 0x68,
- 0x9c, 0xe2, 0x82, 0x53, 0x93, 0x85, 0x7e, 0x9c, 0x62, 0x89, 0x9f, 0x93, 0x7b, 0x85, 0x7a, 0x16,
- 0xca, 0x10, 0x75, 0xe1, 0x20, 0x13, 0x31, 0x17, 0xd1, 0xfa, 0xd9, 0x8b, 0x85, 0x1d, 0x39, 0xfa,
- 0x30, 0x9c, 0xd5, 0x61, 0x38, 0x37, 0x2b, 0x45, 0xb8, 0xaf, 0x2c, 0xeb, 0xbc, 0xf5, 0xbb, 0x02,
- 0x56, 0xe9, 0x3d, 0x50, 0x08, 0xb5, 0x59, 0x4c, 0x93, 0x3c, 0x4e, 0xf4, 0x27, 0xec, 0x3f, 0x65,
- 0x5d, 0x25, 0x80, 0x73, 0x5d, 0xb8, 0xc3, 0x35, 0x07, 0xbd, 0x01, 0x1b, 0x7f, 0x9b, 0x33, 0x2e,
- 0x30, 0x8f, 0x16, 0x98, 0x67, 0x84, 0xd1, 0x62, 0x25, 0x07, 0xab, 0xfa, 0x48, 0x97, 0xd1, 0x5b,
- 0x38, 0x9c, 0x30, 0x8e, 0xa3, 0xd5, 0x61, 0xad, 0xe4, 0xa6, 0x92, 0x23, 0xd9, 0x2b, 0x86, 0x15,
- 0x8e, 0xd6, 0x0f, 0x03, 0x6a, 0xab, 0x99, 0xa8, 0x01, 0x87, 0xd7, 0x5e, 0xff, 0xe2, 0xd6, 0xbb,
- 0xf0, 0xa3, 0xdb, 0xfe, 0x30, 0xf0, 0xbb, 0xbd, 0x4f, 0x3d, 0xff, 0xdc, 0xfe, 0x0f, 0xed, 0x81,
- 0xd9, 0x0d, 0x02, 0xdb, 0x40, 0x16, 0xec, 0x75, 0xa3, 0xe1, 0xa5, 0x17, 0x06, 0x76, 0x05, 0x01,
- 0xec, 0xfa, 0xa1, 0x74, 0xd8, 0xa6, 0x6c, 0x5c, 0x0c, 0x22, 0x95, 0x54, 0x51, 0x0d, 0xaa, 0x57,
- 0xde, 0xc8, 0xb3, 0x77, 0x64, 0xb9, 0x3f, 0x38, 0xf7, 0xa3, 0xab, 0xa1, 0xbd, 0x2b, 0x29, 0xc1,
- 0x65, 0x60, 0xef, 0x49, 0x63, 0x70, 0x77, 0x73, 0x39, 0xe8, 0xdb, 0x35, 0xa9, 0x0d, 0x6f, 0x3b,
- 0x77, 0x76, 0x5d, 0x56, 0x3f, 0xfb, 0x1d, 0x29, 0x85, 0xd6, 0x2b, 0xb0, 0x4a, 0x57, 0x89, 0x10,
- 0x54, 0x4b, 0xcf, 0xaa, 0xe2, 0xce, 0x2f, 0x03, 0x5e, 0x13, 0xb6, 0x75, 0xbd, 0x1d, 0xab, 0xab,
- 0xc2, 0x40, 0x36, 0x03, 0xe3, 0x4b, 0x2f, 0x21, 0xe2, 0x21, 0x1f, 0x4b, 0x81, 0xab, 0x7d, 0x6d,
- 0x42, 0x33, 0xc1, 0xf3, 0x14, 0x53, 0x11, 0x0b, 0xc2, 0xa8, 0xbb, 0x41, 0xb6, 0xf5, 0x9f, 0x26,
- 0xc1, 0xb4, 0x9d, 0x3c, 0xfa, 0xe1, 0xfc, 0xa9, 0x34, 0x07, 0x73, 0x4c, 0xbb, 0x7a, 0xb8, 0xe2,
- 0x3b, 0x9e, 0x1a, 0xae, 0x27, 0x3a, 0xa3, 0x93, 0xf1, 0xae, 0x02, 0x9c, 0xfd, 0x0d, 0x00, 0x00,
- 0xff, 0xff, 0xe3, 0x53, 0x74, 0x5e, 0xbe, 0x04, 0x00, 0x00,
-}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go
deleted file mode 100644
index 5f222b473..000000000
--- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go
+++ /dev/null
@@ -1,275 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: opencensus/proto/agent/metrics/v1/metrics_service.proto
-
-package v1
-
-import (
- context "context"
- fmt "fmt"
- v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
- v11 "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
- v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
- proto "github.com/golang/protobuf/proto"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-type ExportMetricsServiceRequest struct {
- // This is required only in the first message on the stream or if the
- // previous sent ExportMetricsServiceRequest message has a different Node (e.g.
- // when the same RPC is used to send Metrics from multiple Applications).
- Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
- // A list of metrics that belong to the last received Node.
- Metrics []*v11.Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"`
- // The resource for the metrics in this message that do not have an explicit
- // resource set.
- // If unset, the most recently set resource in the RPC stream applies. It is
- // valid to never be set within a stream, e.g. when no resource info is known
- // at all or when all sent metrics have an explicit resource set.
- Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ExportMetricsServiceRequest) Reset() { *m = ExportMetricsServiceRequest{} }
-func (m *ExportMetricsServiceRequest) String() string { return proto.CompactTextString(m) }
-func (*ExportMetricsServiceRequest) ProtoMessage() {}
-func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_47e253a956287d04, []int{0}
-}
-
-func (m *ExportMetricsServiceRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ExportMetricsServiceRequest.Unmarshal(m, b)
-}
-func (m *ExportMetricsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ExportMetricsServiceRequest.Marshal(b, m, deterministic)
-}
-func (m *ExportMetricsServiceRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportMetricsServiceRequest.Merge(m, src)
-}
-func (m *ExportMetricsServiceRequest) XXX_Size() int {
- return xxx_messageInfo_ExportMetricsServiceRequest.Size(m)
-}
-func (m *ExportMetricsServiceRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportMetricsServiceRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportMetricsServiceRequest proto.InternalMessageInfo
-
-func (m *ExportMetricsServiceRequest) GetNode() *v1.Node {
- if m != nil {
- return m.Node
- }
- return nil
-}
-
-func (m *ExportMetricsServiceRequest) GetMetrics() []*v11.Metric {
- if m != nil {
- return m.Metrics
- }
- return nil
-}
-
-func (m *ExportMetricsServiceRequest) GetResource() *v12.Resource {
- if m != nil {
- return m.Resource
- }
- return nil
-}
-
-type ExportMetricsServiceResponse struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ExportMetricsServiceResponse) Reset() { *m = ExportMetricsServiceResponse{} }
-func (m *ExportMetricsServiceResponse) String() string { return proto.CompactTextString(m) }
-func (*ExportMetricsServiceResponse) ProtoMessage() {}
-func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_47e253a956287d04, []int{1}
-}
-
-func (m *ExportMetricsServiceResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ExportMetricsServiceResponse.Unmarshal(m, b)
-}
-func (m *ExportMetricsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ExportMetricsServiceResponse.Marshal(b, m, deterministic)
-}
-func (m *ExportMetricsServiceResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportMetricsServiceResponse.Merge(m, src)
-}
-func (m *ExportMetricsServiceResponse) XXX_Size() int {
- return xxx_messageInfo_ExportMetricsServiceResponse.Size(m)
-}
-func (m *ExportMetricsServiceResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportMetricsServiceResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportMetricsServiceResponse proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*ExportMetricsServiceRequest)(nil), "opencensus.proto.agent.metrics.v1.ExportMetricsServiceRequest")
- proto.RegisterType((*ExportMetricsServiceResponse)(nil), "opencensus.proto.agent.metrics.v1.ExportMetricsServiceResponse")
-}
-
-func init() {
- proto.RegisterFile("opencensus/proto/agent/metrics/v1/metrics_service.proto", fileDescriptor_47e253a956287d04)
-}
-
-var fileDescriptor_47e253a956287d04 = []byte{
- // 361 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0x41, 0x4a, 0xc3, 0x40,
- 0x14, 0x86, 0x9d, 0x56, 0xaa, 0x4c, 0xc1, 0x45, 0xdc, 0x94, 0x2a, 0xd2, 0x56, 0x91, 0x8a, 0x64,
- 0x62, 0xea, 0x42, 0x10, 0x54, 0xac, 0xb8, 0x11, 0xd4, 0x12, 0xc1, 0x85, 0x1b, 0x69, 0xd3, 0x47,
- 0xcc, 0x22, 0x33, 0x71, 0x66, 0x12, 0xbc, 0x85, 0x77, 0x70, 0xef, 0x8d, 0x3c, 0x81, 0xa7, 0x90,
- 0xe4, 0x4d, 0x5a, 0x4a, 0x8c, 0x05, 0x77, 0x8f, 0xe4, 0xff, 0xfe, 0xf7, 0xff, 0x33, 0x43, 0x4f,
- 0x44, 0x0c, 0xdc, 0x07, 0xae, 0x12, 0xe5, 0xc4, 0x52, 0x68, 0xe1, 0x8c, 0x03, 0xe0, 0xda, 0x89,
- 0x40, 0xcb, 0xd0, 0x57, 0x4e, 0xea, 0x16, 0xe3, 0xb3, 0x02, 0x99, 0x86, 0x3e, 0xb0, 0x5c, 0x66,
- 0x75, 0xe7, 0x20, 0x7e, 0x61, 0x39, 0xc8, 0x8c, 0x9a, 0xa5, 0x6e, 0xdb, 0xae, 0xf0, 0xf6, 0x45,
- 0x14, 0x09, 0x9e, 0x59, 0xe3, 0x84, 0x7c, 0xfb, 0xa0, 0x24, 0x2f, 0x87, 0x30, 0xd2, 0xc3, 0x92,
- 0x54, 0x82, 0x12, 0x89, 0xf4, 0x21, 0xd3, 0x16, 0x33, 0x8a, 0x7b, 0x5f, 0x84, 0x6e, 0x5d, 0xbf,
- 0xc5, 0x42, 0xea, 0x5b, 0x34, 0x79, 0xc0, 0x22, 0x1e, 0xbc, 0x26, 0xa0, 0xb4, 0x75, 0x4a, 0x57,
- 0xb9, 0x98, 0x42, 0x8b, 0x74, 0x48, 0xbf, 0x39, 0xd8, 0x67, 0x15, 0xc5, 0x4c, 0xd6, 0xd4, 0x65,
- 0x77, 0x62, 0x0a, 0x5e, 0xce, 0x58, 0x67, 0x74, 0xcd, 0x24, 0x6b, 0xd5, 0x3a, 0xf5, 0x7e, 0x73,
- 0xb0, 0x5b, 0xc6, 0xe7, 0x27, 0xc2, 0x30, 0x80, 0x57, 0x30, 0xd6, 0x90, 0xae, 0x17, 0x61, 0x5b,
- 0xf5, 0xaa, 0xf5, 0xb3, 0x3a, 0xa9, 0xcb, 0x3c, 0x33, 0x7b, 0x33, 0xae, 0xb7, 0x43, 0xb7, 0x7f,
- 0x6f, 0xa7, 0x62, 0xc1, 0x15, 0x0c, 0x3e, 0x08, 0xdd, 0x58, 0xfc, 0x65, 0xbd, 0x13, 0xda, 0x40,
- 0xc6, 0x3a, 0x67, 0x4b, 0xef, 0x91, 0xfd, 0x71, 0x78, 0xed, 0x8b, 0x7f, 0xf3, 0x18, 0xaf, 0xb7,
- 0xd2, 0x27, 0x47, 0x64, 0xf8, 0x49, 0xe8, 0x5e, 0x28, 0x96, 0x7b, 0x0d, 0x37, 0x17, 0x6d, 0x46,
- 0x99, 0x6a, 0x44, 0x9e, 0x6e, 0x82, 0x50, 0xbf, 0x24, 0x93, 0xec, 0x92, 0x1c, 0x34, 0xb0, 0x43,
- 0xae, 0xb4, 0x4c, 0x22, 0xe0, 0x7a, 0xac, 0x43, 0xc1, 0x9d, 0xb9, 0xb7, 0x8d, 0x4f, 0x26, 0x00,
- 0x6e, 0x07, 0xe5, 0xf7, 0xfe, 0x5d, 0xeb, 0xde, 0xc7, 0xc0, 0xaf, 0x30, 0x46, 0xbe, 0x80, 0x5d,
- 0xe6, 0x31, 0xcc, 0x6a, 0xf6, 0xe8, 0x4e, 0x1a, 0xb9, 0xc5, 0xf1, 0x4f, 0x00, 0x00, 0x00, 0xff,
- 0xff, 0x19, 0x28, 0xa4, 0x50, 0x3f, 0x03, 0x00, 0x00,
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// MetricsServiceClient is the client API for MetricsService service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type MetricsServiceClient interface {
- // For performance reasons, it is recommended to keep this RPC
- // alive for the entire life of the application.
- Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error)
-}
-
-type metricsServiceClient struct {
- cc *grpc.ClientConn
-}
-
-func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient {
- return &metricsServiceClient{cc}
-}
-
-func (c *metricsServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) {
- stream, err := c.cc.NewStream(ctx, &_MetricsService_serviceDesc.Streams[0], "/opencensus.proto.agent.metrics.v1.MetricsService/Export", opts...)
- if err != nil {
- return nil, err
- }
- x := &metricsServiceExportClient{stream}
- return x, nil
-}
-
-type MetricsService_ExportClient interface {
- Send(*ExportMetricsServiceRequest) error
- Recv() (*ExportMetricsServiceResponse, error)
- grpc.ClientStream
-}
-
-type metricsServiceExportClient struct {
- grpc.ClientStream
-}
-
-func (x *metricsServiceExportClient) Send(m *ExportMetricsServiceRequest) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *metricsServiceExportClient) Recv() (*ExportMetricsServiceResponse, error) {
- m := new(ExportMetricsServiceResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// MetricsServiceServer is the server API for MetricsService service.
-type MetricsServiceServer interface {
- // For performance reasons, it is recommended to keep this RPC
- // alive for the entire life of the application.
- Export(MetricsService_ExportServer) error
-}
-
-// UnimplementedMetricsServiceServer can be embedded to have forward compatible implementations.
-type UnimplementedMetricsServiceServer struct {
-}
-
-func (*UnimplementedMetricsServiceServer) Export(srv MetricsService_ExportServer) error {
- return status.Errorf(codes.Unimplemented, "method Export not implemented")
-}
-
-func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) {
- s.RegisterService(&_MetricsService_serviceDesc, srv)
-}
-
-func _MetricsService_Export_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(MetricsServiceServer).Export(&metricsServiceExportServer{stream})
-}
-
-type MetricsService_ExportServer interface {
- Send(*ExportMetricsServiceResponse) error
- Recv() (*ExportMetricsServiceRequest, error)
- grpc.ServerStream
-}
-
-type metricsServiceExportServer struct {
- grpc.ServerStream
-}
-
-func (x *metricsServiceExportServer) Send(m *ExportMetricsServiceResponse) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *metricsServiceExportServer) Recv() (*ExportMetricsServiceRequest, error) {
- m := new(ExportMetricsServiceRequest)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-var _MetricsService_serviceDesc = grpc.ServiceDesc{
- ServiceName: "opencensus.proto.agent.metrics.v1.MetricsService",
- HandlerType: (*MetricsServiceServer)(nil),
- Methods: []grpc.MethodDesc{},
- Streams: []grpc.StreamDesc{
- {
- StreamName: "Export",
- Handler: _MetricsService_Export_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- },
- Metadata: "opencensus/proto/agent/metrics/v1/metrics_service.proto",
-}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.gw.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.gw.go
deleted file mode 100644
index 158c16089..000000000
--- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.gw.go
+++ /dev/null
@@ -1,150 +0,0 @@
-// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
-// source: opencensus/proto/agent/metrics/v1/metrics_service.proto
-
-/*
-Package v1 is a reverse proxy.
-
-It translates gRPC into RESTful JSON APIs.
-*/
-package v1
-
-import (
- "context"
- "io"
- "net/http"
-
- "github.com/golang/protobuf/proto"
- "github.com/grpc-ecosystem/grpc-gateway/runtime"
- "github.com/grpc-ecosystem/grpc-gateway/utilities"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/status"
-)
-
-var _ codes.Code
-var _ io.Reader
-var _ status.Status
-var _ = runtime.String
-var _ = utilities.NewDoubleArray
-
-func request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client MetricsServiceClient, req *http.Request, pathParams map[string]string) (MetricsService_ExportClient, runtime.ServerMetadata, error) {
- var metadata runtime.ServerMetadata
- stream, err := client.Export(ctx)
- if err != nil {
- grpclog.Infof("Failed to start streaming: %v", err)
- return nil, metadata, err
- }
- dec := marshaler.NewDecoder(req.Body)
- handleSend := func() error {
- var protoReq ExportMetricsServiceRequest
- err := dec.Decode(&protoReq)
- if err == io.EOF {
- return err
- }
- if err != nil {
- grpclog.Infof("Failed to decode request: %v", err)
- return err
- }
- if err := stream.Send(&protoReq); err != nil {
- grpclog.Infof("Failed to send request: %v", err)
- return err
- }
- return nil
- }
- if err := handleSend(); err != nil {
- if cerr := stream.CloseSend(); cerr != nil {
- grpclog.Infof("Failed to terminate client stream: %v", cerr)
- }
- if err == io.EOF {
- return stream, metadata, nil
- }
- return nil, metadata, err
- }
- go func() {
- for {
- if err := handleSend(); err != nil {
- break
- }
- }
- if err := stream.CloseSend(); err != nil {
- grpclog.Infof("Failed to terminate client stream: %v", err)
- }
- }()
- header, err := stream.Header()
- if err != nil {
- grpclog.Infof("Failed to get header from client: %v", err)
- return nil, metadata, err
- }
- metadata.HeaderMD = header
- return stream, metadata, nil
-}
-
-// RegisterMetricsServiceHandlerFromEndpoint is same as RegisterMetricsServiceHandler but
-// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
-func RegisterMetricsServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
- if err != nil {
- return err
- }
- defer func() {
- if err != nil {
- if cerr := conn.Close(); cerr != nil {
- grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
- }
- return
- }
- go func() {
- <-ctx.Done()
- if cerr := conn.Close(); cerr != nil {
- grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
- }
- }()
- }()
-
- return RegisterMetricsServiceHandler(ctx, mux, conn)
-}
-
-// RegisterMetricsServiceHandler registers the http handlers for service MetricsService to "mux".
-// The handlers forward requests to the grpc endpoint over "conn".
-func RegisterMetricsServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
- return RegisterMetricsServiceHandlerClient(ctx, mux, NewMetricsServiceClient(conn))
-}
-
-// RegisterMetricsServiceHandlerClient registers the http handlers for service MetricsService
-// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "MetricsServiceClient".
-// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MetricsServiceClient"
-// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "MetricsServiceClient" to call the correct interceptors.
-func RegisterMetricsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client MetricsServiceClient) error {
-
- mux.Handle("POST", pattern_MetricsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_MetricsService_Export_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_MetricsService_Export_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
-
- })
-
- return nil
-}
-
-var (
- pattern_MetricsService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "metrics"}, ""))
-)
-
-var (
- forward_MetricsService_Export_0 = runtime.ForwardResponseStream
-)
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go
deleted file mode 100644
index a0a3504dd..000000000
--- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go
+++ /dev/null
@@ -1,457 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: opencensus/proto/agent/trace/v1/trace_service.proto
-
-package v1
-
-import (
- context "context"
- fmt "fmt"
- v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
- v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
- v11 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
- proto "github.com/golang/protobuf/proto"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-type CurrentLibraryConfig struct {
- // This is required only in the first message on the stream or if the
- // previous sent CurrentLibraryConfig message has a different Node (e.g.
- // when the same RPC is used to configure multiple Applications).
- Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
- // Current configuration.
- Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CurrentLibraryConfig) Reset() { *m = CurrentLibraryConfig{} }
-func (m *CurrentLibraryConfig) String() string { return proto.CompactTextString(m) }
-func (*CurrentLibraryConfig) ProtoMessage() {}
-func (*CurrentLibraryConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_7027f99caf7ac6a5, []int{0}
-}
-
-func (m *CurrentLibraryConfig) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CurrentLibraryConfig.Unmarshal(m, b)
-}
-func (m *CurrentLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CurrentLibraryConfig.Marshal(b, m, deterministic)
-}
-func (m *CurrentLibraryConfig) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CurrentLibraryConfig.Merge(m, src)
-}
-func (m *CurrentLibraryConfig) XXX_Size() int {
- return xxx_messageInfo_CurrentLibraryConfig.Size(m)
-}
-func (m *CurrentLibraryConfig) XXX_DiscardUnknown() {
- xxx_messageInfo_CurrentLibraryConfig.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CurrentLibraryConfig proto.InternalMessageInfo
-
-func (m *CurrentLibraryConfig) GetNode() *v1.Node {
- if m != nil {
- return m.Node
- }
- return nil
-}
-
-func (m *CurrentLibraryConfig) GetConfig() *v11.TraceConfig {
- if m != nil {
- return m.Config
- }
- return nil
-}
-
-type UpdatedLibraryConfig struct {
- // This field is ignored when the RPC is used to configure only one Application.
- // This is required only in the first message on the stream or if the
- // previous sent UpdatedLibraryConfig message has a different Node.
- Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
- // Requested updated configuration.
- Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *UpdatedLibraryConfig) Reset() { *m = UpdatedLibraryConfig{} }
-func (m *UpdatedLibraryConfig) String() string { return proto.CompactTextString(m) }
-func (*UpdatedLibraryConfig) ProtoMessage() {}
-func (*UpdatedLibraryConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_7027f99caf7ac6a5, []int{1}
-}
-
-func (m *UpdatedLibraryConfig) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_UpdatedLibraryConfig.Unmarshal(m, b)
-}
-func (m *UpdatedLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_UpdatedLibraryConfig.Marshal(b, m, deterministic)
-}
-func (m *UpdatedLibraryConfig) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UpdatedLibraryConfig.Merge(m, src)
-}
-func (m *UpdatedLibraryConfig) XXX_Size() int {
- return xxx_messageInfo_UpdatedLibraryConfig.Size(m)
-}
-func (m *UpdatedLibraryConfig) XXX_DiscardUnknown() {
- xxx_messageInfo_UpdatedLibraryConfig.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UpdatedLibraryConfig proto.InternalMessageInfo
-
-func (m *UpdatedLibraryConfig) GetNode() *v1.Node {
- if m != nil {
- return m.Node
- }
- return nil
-}
-
-func (m *UpdatedLibraryConfig) GetConfig() *v11.TraceConfig {
- if m != nil {
- return m.Config
- }
- return nil
-}
-
-type ExportTraceServiceRequest struct {
- // This is required only in the first message on the stream or if the
- // previous sent ExportTraceServiceRequest message has a different Node (e.g.
- // when the same RPC is used to send Spans from multiple Applications).
- Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
- // A list of Spans that belong to the last received Node.
- Spans []*v11.Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"`
- // The resource for the spans in this message that do not have an explicit
- // resource set.
- // If unset, the most recently set resource in the RPC stream applies. It is
- // valid to never be set within a stream, e.g. when no resource info is known.
- Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ExportTraceServiceRequest) Reset() { *m = ExportTraceServiceRequest{} }
-func (m *ExportTraceServiceRequest) String() string { return proto.CompactTextString(m) }
-func (*ExportTraceServiceRequest) ProtoMessage() {}
-func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_7027f99caf7ac6a5, []int{2}
-}
-
-func (m *ExportTraceServiceRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ExportTraceServiceRequest.Unmarshal(m, b)
-}
-func (m *ExportTraceServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ExportTraceServiceRequest.Marshal(b, m, deterministic)
-}
-func (m *ExportTraceServiceRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportTraceServiceRequest.Merge(m, src)
-}
-func (m *ExportTraceServiceRequest) XXX_Size() int {
- return xxx_messageInfo_ExportTraceServiceRequest.Size(m)
-}
-func (m *ExportTraceServiceRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportTraceServiceRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportTraceServiceRequest proto.InternalMessageInfo
-
-func (m *ExportTraceServiceRequest) GetNode() *v1.Node {
- if m != nil {
- return m.Node
- }
- return nil
-}
-
-func (m *ExportTraceServiceRequest) GetSpans() []*v11.Span {
- if m != nil {
- return m.Spans
- }
- return nil
-}
-
-func (m *ExportTraceServiceRequest) GetResource() *v12.Resource {
- if m != nil {
- return m.Resource
- }
- return nil
-}
-
-type ExportTraceServiceResponse struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ExportTraceServiceResponse) Reset() { *m = ExportTraceServiceResponse{} }
-func (m *ExportTraceServiceResponse) String() string { return proto.CompactTextString(m) }
-func (*ExportTraceServiceResponse) ProtoMessage() {}
-func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_7027f99caf7ac6a5, []int{3}
-}
-
-func (m *ExportTraceServiceResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ExportTraceServiceResponse.Unmarshal(m, b)
-}
-func (m *ExportTraceServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ExportTraceServiceResponse.Marshal(b, m, deterministic)
-}
-func (m *ExportTraceServiceResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExportTraceServiceResponse.Merge(m, src)
-}
-func (m *ExportTraceServiceResponse) XXX_Size() int {
- return xxx_messageInfo_ExportTraceServiceResponse.Size(m)
-}
-func (m *ExportTraceServiceResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_ExportTraceServiceResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExportTraceServiceResponse proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*CurrentLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.CurrentLibraryConfig")
- proto.RegisterType((*UpdatedLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.UpdatedLibraryConfig")
- proto.RegisterType((*ExportTraceServiceRequest)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceRequest")
- proto.RegisterType((*ExportTraceServiceResponse)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceResponse")
-}
-
-func init() {
- proto.RegisterFile("opencensus/proto/agent/trace/v1/trace_service.proto", fileDescriptor_7027f99caf7ac6a5)
-}
-
-var fileDescriptor_7027f99caf7ac6a5 = []byte{
- // 442 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x94, 0xcf, 0xaa, 0xd4, 0x30,
- 0x14, 0xc6, 0x4d, 0xaf, 0x16, 0xc9, 0x75, 0x63, 0x71, 0x51, 0x8b, 0x30, 0x97, 0x82, 0x32, 0xa0,
- 0x4d, 0xed, 0x5c, 0xee, 0xe6, 0x0a, 0x82, 0x33, 0x08, 0x2e, 0x44, 0x2f, 0x1d, 0x75, 0xe1, 0x66,
- 0xe8, 0xb4, 0xc7, 0xda, 0xc5, 0x24, 0x31, 0x49, 0x8b, 0x82, 0x7b, 0xf7, 0x2e, 0x7c, 0x03, 0x5f,
- 0xc8, 0xc7, 0xf0, 0x29, 0xa4, 0x39, 0x9d, 0x3f, 0x3a, 0x53, 0x0b, 0xba, 0xb9, 0xbb, 0x43, 0xf3,
- 0xfd, 0xbe, 0xf3, 0x25, 0x39, 0x29, 0x3d, 0x15, 0x12, 0x78, 0x0e, 0x5c, 0xd7, 0x3a, 0x96, 0x4a,
- 0x18, 0x11, 0x67, 0x25, 0x70, 0x13, 0x1b, 0x95, 0xe5, 0x10, 0x37, 0x09, 0x16, 0x0b, 0x0d, 0xaa,
- 0xa9, 0x72, 0x60, 0x56, 0xe2, 0x8d, 0xb6, 0x10, 0x7e, 0x61, 0x16, 0x62, 0x56, 0xcb, 0x9a, 0x24,
- 0x88, 0x7a, 0x5c, 0x73, 0xb1, 0x5a, 0x09, 0xde, 0xda, 0x62, 0x85, 0x74, 0x70, 0x7f, 0x4f, 0xae,
- 0x40, 0x8b, 0x5a, 0x61, 0x82, 0x75, 0xdd, 0x89, 0xef, 0xee, 0x89, 0x7f, 0xcf, 0xda, 0xc9, 0x1e,
- 0x0c, 0xc8, 0x16, 0xb9, 0xe0, 0xef, 0xaa, 0x12, 0xd5, 0xe1, 0x57, 0x42, 0x6f, 0xcd, 0x6a, 0xa5,
- 0x80, 0x9b, 0xe7, 0xd5, 0x52, 0x65, 0xea, 0xd3, 0xcc, 0x2e, 0x7b, 0xe7, 0xf4, 0x2a, 0x17, 0x05,
- 0xf8, 0xe4, 0x84, 0x8c, 0x8f, 0x27, 0xf7, 0x58, 0xcf, 0xce, 0xbb, 0xed, 0x34, 0x09, 0x7b, 0x21,
- 0x0a, 0x48, 0x2d, 0xe3, 0x3d, 0xa6, 0x2e, 0x36, 0xf1, 0x9d, 0x3e, 0x7a, 0x7d, 0x62, 0xec, 0x55,
- 0x5b, 0x60, 0xcf, 0xb4, 0xa3, 0x6c, 0xa8, 0xd7, 0xb2, 0xc8, 0x0c, 0x14, 0x97, 0x27, 0xd4, 0x0f,
- 0x42, 0x6f, 0x3f, 0xfd, 0x28, 0x85, 0x32, 0x76, 0x75, 0x8e, 0x83, 0x91, 0xc2, 0x87, 0x1a, 0xb4,
- 0xf9, 0xaf, 0x64, 0x67, 0xf4, 0x9a, 0x96, 0x19, 0xd7, 0xbe, 0x73, 0x72, 0x34, 0x3e, 0x9e, 0x8c,
- 0xfe, 0x12, 0x6c, 0x2e, 0x33, 0x9e, 0xa2, 0xda, 0x9b, 0xd2, 0xeb, 0xeb, 0x09, 0xf1, 0x8f, 0xfa,
- 0xda, 0x6e, 0x66, 0xa8, 0x49, 0x58, 0xda, 0xd5, 0xe9, 0x86, 0x0b, 0xef, 0xd0, 0xe0, 0xd0, 0x9e,
- 0xb4, 0x14, 0x5c, 0xc3, 0xe4, 0x9b, 0x43, 0x6f, 0xec, 0x2e, 0x78, 0x9f, 0xa9, 0xdb, 0xdd, 0xc4,
- 0x19, 0x1b, 0x78, 0x0a, 0xec, 0xd0, 0x54, 0x05, 0xc3, 0xd8, 0xa1, 0x7b, 0x0f, 0xaf, 0x8c, 0xc9,
- 0x43, 0xe2, 0x7d, 0x21, 0xd4, 0xc5, 0xb4, 0xde, 0xf9, 0xa0, 0x4f, 0xef, 0x55, 0x05, 0x8f, 0xfe,
- 0x89, 0xc5, 0x23, 0xc1, 0x24, 0xd3, 0xef, 0x84, 0x86, 0x95, 0x18, 0xf2, 0x99, 0xde, 0xdc, 0xb5,
- 0xb8, 0x68, 0x15, 0x17, 0xe4, 0xed, 0xb3, 0xb2, 0x32, 0xef, 0xeb, 0x65, 0x3b, 0x0a, 0x31, 0xc2,
- 0x51, 0xc5, 0xb5, 0x51, 0xf5, 0x0a, 0xb8, 0xc9, 0x4c, 0x25, 0x78, 0xbc, 0xf5, 0x8d, 0xf0, 0x05,
- 0x97, 0xc0, 0xa3, 0xf2, 0xcf, 0x3f, 0xd4, 0x4f, 0x67, 0xf4, 0x52, 0x02, 0x9f, 0x61, 0x00, 0x6b,
- 0xcf, 0x9e, 0xd8, 0x00, 0xb6, 0x2d, 0x7b, 0x93, 0x2c, 0x5d, 0x8b, 0x9f, 0xfe, 0x0a, 0x00, 0x00,
- 0xff, 0xff, 0x65, 0x76, 0xd7, 0xb9, 0xed, 0x04, 0x00, 0x00,
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// TraceServiceClient is the client API for TraceService service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type TraceServiceClient interface {
- // After initialization, this RPC must be kept alive for the entire life of
- // the application. The agent pushes configs down to applications via a
- // stream.
- Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error)
- // For performance reasons, it is recommended to keep this RPC
- // alive for the entire life of the application.
- Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error)
-}
-
-type traceServiceClient struct {
- cc *grpc.ClientConn
-}
-
-func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient {
- return &traceServiceClient{cc}
-}
-
-func (c *traceServiceClient) Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) {
- stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[0], "/opencensus.proto.agent.trace.v1.TraceService/Config", opts...)
- if err != nil {
- return nil, err
- }
- x := &traceServiceConfigClient{stream}
- return x, nil
-}
-
-type TraceService_ConfigClient interface {
- Send(*CurrentLibraryConfig) error
- Recv() (*UpdatedLibraryConfig, error)
- grpc.ClientStream
-}
-
-type traceServiceConfigClient struct {
- grpc.ClientStream
-}
-
-func (x *traceServiceConfigClient) Send(m *CurrentLibraryConfig) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *traceServiceConfigClient) Recv() (*UpdatedLibraryConfig, error) {
- m := new(UpdatedLibraryConfig)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *traceServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) {
- stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[1], "/opencensus.proto.agent.trace.v1.TraceService/Export", opts...)
- if err != nil {
- return nil, err
- }
- x := &traceServiceExportClient{stream}
- return x, nil
-}
-
-type TraceService_ExportClient interface {
- Send(*ExportTraceServiceRequest) error
- Recv() (*ExportTraceServiceResponse, error)
- grpc.ClientStream
-}
-
-type traceServiceExportClient struct {
- grpc.ClientStream
-}
-
-func (x *traceServiceExportClient) Send(m *ExportTraceServiceRequest) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *traceServiceExportClient) Recv() (*ExportTraceServiceResponse, error) {
- m := new(ExportTraceServiceResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// TraceServiceServer is the server API for TraceService service.
-type TraceServiceServer interface {
- // After initialization, this RPC must be kept alive for the entire life of
- // the application. The agent pushes configs down to applications via a
- // stream.
- Config(TraceService_ConfigServer) error
- // For performance reasons, it is recommended to keep this RPC
- // alive for the entire life of the application.
- Export(TraceService_ExportServer) error
-}
-
-// UnimplementedTraceServiceServer can be embedded to have forward compatible implementations.
-type UnimplementedTraceServiceServer struct {
-}
-
-func (*UnimplementedTraceServiceServer) Config(srv TraceService_ConfigServer) error {
- return status.Errorf(codes.Unimplemented, "method Config not implemented")
-}
-func (*UnimplementedTraceServiceServer) Export(srv TraceService_ExportServer) error {
- return status.Errorf(codes.Unimplemented, "method Export not implemented")
-}
-
-func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) {
- s.RegisterService(&_TraceService_serviceDesc, srv)
-}
-
-func _TraceService_Config_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(TraceServiceServer).Config(&traceServiceConfigServer{stream})
-}
-
-type TraceService_ConfigServer interface {
- Send(*UpdatedLibraryConfig) error
- Recv() (*CurrentLibraryConfig, error)
- grpc.ServerStream
-}
-
-type traceServiceConfigServer struct {
- grpc.ServerStream
-}
-
-func (x *traceServiceConfigServer) Send(m *UpdatedLibraryConfig) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *traceServiceConfigServer) Recv() (*CurrentLibraryConfig, error) {
- m := new(CurrentLibraryConfig)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func _TraceService_Export_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(TraceServiceServer).Export(&traceServiceExportServer{stream})
-}
-
-type TraceService_ExportServer interface {
- Send(*ExportTraceServiceResponse) error
- Recv() (*ExportTraceServiceRequest, error)
- grpc.ServerStream
-}
-
-type traceServiceExportServer struct {
- grpc.ServerStream
-}
-
-func (x *traceServiceExportServer) Send(m *ExportTraceServiceResponse) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *traceServiceExportServer) Recv() (*ExportTraceServiceRequest, error) {
- m := new(ExportTraceServiceRequest)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-var _TraceService_serviceDesc = grpc.ServiceDesc{
- ServiceName: "opencensus.proto.agent.trace.v1.TraceService",
- HandlerType: (*TraceServiceServer)(nil),
- Methods: []grpc.MethodDesc{},
- Streams: []grpc.StreamDesc{
- {
- StreamName: "Config",
- Handler: _TraceService_Config_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- {
- StreamName: "Export",
- Handler: _TraceService_Export_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- },
- Metadata: "opencensus/proto/agent/trace/v1/trace_service.proto",
-}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go
deleted file mode 100644
index 334331b0d..000000000
--- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go
+++ /dev/null
@@ -1,150 +0,0 @@
-// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
-// source: opencensus/proto/agent/trace/v1/trace_service.proto
-
-/*
-Package v1 is a reverse proxy.
-
-It translates gRPC into RESTful JSON APIs.
-*/
-package v1
-
-import (
- "context"
- "io"
- "net/http"
-
- "github.com/golang/protobuf/proto"
- "github.com/grpc-ecosystem/grpc-gateway/runtime"
- "github.com/grpc-ecosystem/grpc-gateway/utilities"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/status"
-)
-
-var _ codes.Code
-var _ io.Reader
-var _ status.Status
-var _ = runtime.String
-var _ = utilities.NewDoubleArray
-
-func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (TraceService_ExportClient, runtime.ServerMetadata, error) {
- var metadata runtime.ServerMetadata
- stream, err := client.Export(ctx)
- if err != nil {
- grpclog.Infof("Failed to start streaming: %v", err)
- return nil, metadata, err
- }
- dec := marshaler.NewDecoder(req.Body)
- handleSend := func() error {
- var protoReq ExportTraceServiceRequest
- err := dec.Decode(&protoReq)
- if err == io.EOF {
- return err
- }
- if err != nil {
- grpclog.Infof("Failed to decode request: %v", err)
- return err
- }
- if err := stream.Send(&protoReq); err != nil {
- grpclog.Infof("Failed to send request: %v", err)
- return err
- }
- return nil
- }
- if err := handleSend(); err != nil {
- if cerr := stream.CloseSend(); cerr != nil {
- grpclog.Infof("Failed to terminate client stream: %v", cerr)
- }
- if err == io.EOF {
- return stream, metadata, nil
- }
- return nil, metadata, err
- }
- go func() {
- for {
- if err := handleSend(); err != nil {
- break
- }
- }
- if err := stream.CloseSend(); err != nil {
- grpclog.Infof("Failed to terminate client stream: %v", err)
- }
- }()
- header, err := stream.Header()
- if err != nil {
- grpclog.Infof("Failed to get header from client: %v", err)
- return nil, metadata, err
- }
- metadata.HeaderMD = header
- return stream, metadata, nil
-}
-
-// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but
-// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
-func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
- if err != nil {
- return err
- }
- defer func() {
- if err != nil {
- if cerr := conn.Close(); cerr != nil {
- grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
- }
- return
- }
- go func() {
- <-ctx.Done()
- if cerr := conn.Close(); cerr != nil {
- grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
- }
- }()
- }()
-
- return RegisterTraceServiceHandler(ctx, mux, conn)
-}
-
-// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux".
-// The handlers forward requests to the grpc endpoint over "conn".
-func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
- return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn))
-}
-
-// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService
-// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient".
-// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient"
-// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "TraceServiceClient" to call the correct interceptors.
-func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error {
-
- mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_TraceService_Export_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
-
- })
-
- return nil
-}
-
-var (
- pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, ""))
-)
-
-var (
- forward_TraceService_Export_0 = runtime.ForwardResponseStream
-)
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go
deleted file mode 100644
index 466b23428..000000000
--- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go
+++ /dev/null
@@ -1,1127 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: opencensus/proto/metrics/v1/metrics.proto
-
-package v1
-
-import (
- fmt "fmt"
- v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
- proto "github.com/golang/protobuf/proto"
- timestamp "github.com/golang/protobuf/ptypes/timestamp"
- wrappers "github.com/golang/protobuf/ptypes/wrappers"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-// The kind of metric. It describes how the data is reported.
-//
-// A gauge is an instantaneous measurement of a value.
-//
-// A cumulative measurement is a value accumulated over a time interval. In
-// a time series, cumulative measurements should have the same start time,
-// increasing values and increasing end times, until an event resets the
-// cumulative value to zero and sets a new start time for the following
-// points.
-type MetricDescriptor_Type int32
-
-const (
- // Do not use this default value.
- MetricDescriptor_UNSPECIFIED MetricDescriptor_Type = 0
- // Integer gauge. The value can go both up and down.
- MetricDescriptor_GAUGE_INT64 MetricDescriptor_Type = 1
- // Floating point gauge. The value can go both up and down.
- MetricDescriptor_GAUGE_DOUBLE MetricDescriptor_Type = 2
- // Distribution gauge measurement. The count and sum can go both up and
- // down. Recorded values are always >= 0.
- // Used in scenarios like a snapshot of time the current items in a queue
- // have spent there.
- MetricDescriptor_GAUGE_DISTRIBUTION MetricDescriptor_Type = 3
- // Integer cumulative measurement. The value cannot decrease, if resets
- // then the start_time should also be reset.
- MetricDescriptor_CUMULATIVE_INT64 MetricDescriptor_Type = 4
- // Floating point cumulative measurement. The value cannot decrease, if
- // resets then the start_time should also be reset. Recorded values are
- // always >= 0.
- MetricDescriptor_CUMULATIVE_DOUBLE MetricDescriptor_Type = 5
- // Distribution cumulative measurement. The count and sum cannot decrease,
- // if resets then the start_time should also be reset.
- MetricDescriptor_CUMULATIVE_DISTRIBUTION MetricDescriptor_Type = 6
- // Some frameworks implemented Histograms as a summary of observations
- // (usually things like request durations and response sizes). While it
- // also provides a total count of observations and a sum of all observed
- // values, it calculates configurable percentiles over a sliding time
- // window. This is not recommended, since it cannot be aggregated.
- MetricDescriptor_SUMMARY MetricDescriptor_Type = 7
-)
-
-var MetricDescriptor_Type_name = map[int32]string{
- 0: "UNSPECIFIED",
- 1: "GAUGE_INT64",
- 2: "GAUGE_DOUBLE",
- 3: "GAUGE_DISTRIBUTION",
- 4: "CUMULATIVE_INT64",
- 5: "CUMULATIVE_DOUBLE",
- 6: "CUMULATIVE_DISTRIBUTION",
- 7: "SUMMARY",
-}
-
-var MetricDescriptor_Type_value = map[string]int32{
- "UNSPECIFIED": 0,
- "GAUGE_INT64": 1,
- "GAUGE_DOUBLE": 2,
- "GAUGE_DISTRIBUTION": 3,
- "CUMULATIVE_INT64": 4,
- "CUMULATIVE_DOUBLE": 5,
- "CUMULATIVE_DISTRIBUTION": 6,
- "SUMMARY": 7,
-}
-
-func (x MetricDescriptor_Type) String() string {
- return proto.EnumName(MetricDescriptor_Type_name, int32(x))
-}
-
-func (MetricDescriptor_Type) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{1, 0}
-}
-
-// Defines a Metric which has one or more timeseries.
-type Metric struct {
- // The descriptor of the Metric.
- // TODO(issue #152): consider only sending the name of descriptor for
- // optimization.
- MetricDescriptor *MetricDescriptor `protobuf:"bytes,1,opt,name=metric_descriptor,json=metricDescriptor,proto3" json:"metric_descriptor,omitempty"`
- // One or more timeseries for a single metric, where each timeseries has
- // one or more points.
- Timeseries []*TimeSeries `protobuf:"bytes,2,rep,name=timeseries,proto3" json:"timeseries,omitempty"`
- // The resource for the metric. If unset, it may be set to a default value
- // provided for a sequence of messages in an RPC stream.
- Resource *v1.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Metric) Reset() { *m = Metric{} }
-func (m *Metric) String() string { return proto.CompactTextString(m) }
-func (*Metric) ProtoMessage() {}
-func (*Metric) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{0}
-}
-
-func (m *Metric) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Metric.Unmarshal(m, b)
-}
-func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
-}
-func (m *Metric) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Metric.Merge(m, src)
-}
-func (m *Metric) XXX_Size() int {
- return xxx_messageInfo_Metric.Size(m)
-}
-func (m *Metric) XXX_DiscardUnknown() {
- xxx_messageInfo_Metric.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Metric proto.InternalMessageInfo
-
-func (m *Metric) GetMetricDescriptor() *MetricDescriptor {
- if m != nil {
- return m.MetricDescriptor
- }
- return nil
-}
-
-func (m *Metric) GetTimeseries() []*TimeSeries {
- if m != nil {
- return m.Timeseries
- }
- return nil
-}
-
-func (m *Metric) GetResource() *v1.Resource {
- if m != nil {
- return m.Resource
- }
- return nil
-}
-
-// Defines a metric type and its schema.
-type MetricDescriptor struct {
- // The metric type, including its DNS name prefix. It must be unique.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // A detailed description of the metric, which can be used in documentation.
- Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
- // The unit in which the metric value is reported. Follows the format
- // described by http://unitsofmeasure.org/ucum.html.
- Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"`
- Type MetricDescriptor_Type `protobuf:"varint,4,opt,name=type,proto3,enum=opencensus.proto.metrics.v1.MetricDescriptor_Type" json:"type,omitempty"`
- // The label keys associated with the metric descriptor.
- LabelKeys []*LabelKey `protobuf:"bytes,5,rep,name=label_keys,json=labelKeys,proto3" json:"label_keys,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MetricDescriptor) Reset() { *m = MetricDescriptor{} }
-func (m *MetricDescriptor) String() string { return proto.CompactTextString(m) }
-func (*MetricDescriptor) ProtoMessage() {}
-func (*MetricDescriptor) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{1}
-}
-
-func (m *MetricDescriptor) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_MetricDescriptor.Unmarshal(m, b)
-}
-func (m *MetricDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_MetricDescriptor.Marshal(b, m, deterministic)
-}
-func (m *MetricDescriptor) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricDescriptor.Merge(m, src)
-}
-func (m *MetricDescriptor) XXX_Size() int {
- return xxx_messageInfo_MetricDescriptor.Size(m)
-}
-func (m *MetricDescriptor) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricDescriptor.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MetricDescriptor proto.InternalMessageInfo
-
-func (m *MetricDescriptor) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *MetricDescriptor) GetDescription() string {
- if m != nil {
- return m.Description
- }
- return ""
-}
-
-func (m *MetricDescriptor) GetUnit() string {
- if m != nil {
- return m.Unit
- }
- return ""
-}
-
-func (m *MetricDescriptor) GetType() MetricDescriptor_Type {
- if m != nil {
- return m.Type
- }
- return MetricDescriptor_UNSPECIFIED
-}
-
-func (m *MetricDescriptor) GetLabelKeys() []*LabelKey {
- if m != nil {
- return m.LabelKeys
- }
- return nil
-}
-
-// Defines a label key associated with a metric descriptor.
-type LabelKey struct {
- // The key for the label.
- Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- // A human-readable description of what this label key represents.
- Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LabelKey) Reset() { *m = LabelKey{} }
-func (m *LabelKey) String() string { return proto.CompactTextString(m) }
-func (*LabelKey) ProtoMessage() {}
-func (*LabelKey) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{2}
-}
-
-func (m *LabelKey) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LabelKey.Unmarshal(m, b)
-}
-func (m *LabelKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LabelKey.Marshal(b, m, deterministic)
-}
-func (m *LabelKey) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LabelKey.Merge(m, src)
-}
-func (m *LabelKey) XXX_Size() int {
- return xxx_messageInfo_LabelKey.Size(m)
-}
-func (m *LabelKey) XXX_DiscardUnknown() {
- xxx_messageInfo_LabelKey.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LabelKey proto.InternalMessageInfo
-
-func (m *LabelKey) GetKey() string {
- if m != nil {
- return m.Key
- }
- return ""
-}
-
-func (m *LabelKey) GetDescription() string {
- if m != nil {
- return m.Description
- }
- return ""
-}
-
-// A collection of data points that describes the time-varying values
-// of a metric.
-type TimeSeries struct {
- // Must be present for cumulative metrics. The time when the cumulative value
- // was reset to zero. Exclusive. The cumulative value is over the time interval
- // (start_timestamp, timestamp]. If not specified, the backend can use the
- // previous recorded value.
- StartTimestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"`
- // The set of label values that uniquely identify this timeseries. Applies to
- // all points. The order of label values must match that of label keys in the
- // metric descriptor.
- LabelValues []*LabelValue `protobuf:"bytes,2,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"`
- // The data points of this timeseries. Point.value type MUST match the
- // MetricDescriptor.type.
- Points []*Point `protobuf:"bytes,3,rep,name=points,proto3" json:"points,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *TimeSeries) Reset() { *m = TimeSeries{} }
-func (m *TimeSeries) String() string { return proto.CompactTextString(m) }
-func (*TimeSeries) ProtoMessage() {}
-func (*TimeSeries) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{3}
-}
-
-func (m *TimeSeries) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_TimeSeries.Unmarshal(m, b)
-}
-func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic)
-}
-func (m *TimeSeries) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TimeSeries.Merge(m, src)
-}
-func (m *TimeSeries) XXX_Size() int {
- return xxx_messageInfo_TimeSeries.Size(m)
-}
-func (m *TimeSeries) XXX_DiscardUnknown() {
- xxx_messageInfo_TimeSeries.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TimeSeries proto.InternalMessageInfo
-
-func (m *TimeSeries) GetStartTimestamp() *timestamp.Timestamp {
- if m != nil {
- return m.StartTimestamp
- }
- return nil
-}
-
-func (m *TimeSeries) GetLabelValues() []*LabelValue {
- if m != nil {
- return m.LabelValues
- }
- return nil
-}
-
-func (m *TimeSeries) GetPoints() []*Point {
- if m != nil {
- return m.Points
- }
- return nil
-}
-
-type LabelValue struct {
- // The value for the label.
- Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
- // If false the value field is ignored and considered not set.
- // This is used to differentiate a missing label from an empty string.
- HasValue bool `protobuf:"varint,2,opt,name=has_value,json=hasValue,proto3" json:"has_value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LabelValue) Reset() { *m = LabelValue{} }
-func (m *LabelValue) String() string { return proto.CompactTextString(m) }
-func (*LabelValue) ProtoMessage() {}
-func (*LabelValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{4}
-}
-
-func (m *LabelValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LabelValue.Unmarshal(m, b)
-}
-func (m *LabelValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LabelValue.Marshal(b, m, deterministic)
-}
-func (m *LabelValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LabelValue.Merge(m, src)
-}
-func (m *LabelValue) XXX_Size() int {
- return xxx_messageInfo_LabelValue.Size(m)
-}
-func (m *LabelValue) XXX_DiscardUnknown() {
- xxx_messageInfo_LabelValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LabelValue proto.InternalMessageInfo
-
-func (m *LabelValue) GetValue() string {
- if m != nil {
- return m.Value
- }
- return ""
-}
-
-func (m *LabelValue) GetHasValue() bool {
- if m != nil {
- return m.HasValue
- }
- return false
-}
-
-// A timestamped measurement.
-type Point struct {
- // The moment when this point was recorded. Inclusive.
- // If not specified, the timestamp will be decided by the backend.
- Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
- // The actual point value.
- //
- // Types that are valid to be assigned to Value:
- // *Point_Int64Value
- // *Point_DoubleValue
- // *Point_DistributionValue
- // *Point_SummaryValue
- Value isPoint_Value `protobuf_oneof:"value"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Point) Reset() { *m = Point{} }
-func (m *Point) String() string { return proto.CompactTextString(m) }
-func (*Point) ProtoMessage() {}
-func (*Point) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{5}
-}
-
-func (m *Point) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Point.Unmarshal(m, b)
-}
-func (m *Point) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Point.Marshal(b, m, deterministic)
-}
-func (m *Point) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Point.Merge(m, src)
-}
-func (m *Point) XXX_Size() int {
- return xxx_messageInfo_Point.Size(m)
-}
-func (m *Point) XXX_DiscardUnknown() {
- xxx_messageInfo_Point.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Point proto.InternalMessageInfo
-
-func (m *Point) GetTimestamp() *timestamp.Timestamp {
- if m != nil {
- return m.Timestamp
- }
- return nil
-}
-
-type isPoint_Value interface {
- isPoint_Value()
-}
-
-type Point_Int64Value struct {
- Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"`
-}
-
-type Point_DoubleValue struct {
- DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"`
-}
-
-type Point_DistributionValue struct {
- DistributionValue *DistributionValue `protobuf:"bytes,4,opt,name=distribution_value,json=distributionValue,proto3,oneof"`
-}
-
-type Point_SummaryValue struct {
- SummaryValue *SummaryValue `protobuf:"bytes,5,opt,name=summary_value,json=summaryValue,proto3,oneof"`
-}
-
-func (*Point_Int64Value) isPoint_Value() {}
-
-func (*Point_DoubleValue) isPoint_Value() {}
-
-func (*Point_DistributionValue) isPoint_Value() {}
-
-func (*Point_SummaryValue) isPoint_Value() {}
-
-func (m *Point) GetValue() isPoint_Value {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *Point) GetInt64Value() int64 {
- if x, ok := m.GetValue().(*Point_Int64Value); ok {
- return x.Int64Value
- }
- return 0
-}
-
-func (m *Point) GetDoubleValue() float64 {
- if x, ok := m.GetValue().(*Point_DoubleValue); ok {
- return x.DoubleValue
- }
- return 0
-}
-
-func (m *Point) GetDistributionValue() *DistributionValue {
- if x, ok := m.GetValue().(*Point_DistributionValue); ok {
- return x.DistributionValue
- }
- return nil
-}
-
-func (m *Point) GetSummaryValue() *SummaryValue {
- if x, ok := m.GetValue().(*Point_SummaryValue); ok {
- return x.SummaryValue
- }
- return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*Point) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*Point_Int64Value)(nil),
- (*Point_DoubleValue)(nil),
- (*Point_DistributionValue)(nil),
- (*Point_SummaryValue)(nil),
- }
-}
-
-// Distribution contains summary statistics for a population of values. It
-// optionally contains a histogram representing the distribution of those
-// values across a set of buckets.
-type DistributionValue struct {
- // The number of values in the population. Must be non-negative. This value
- // must equal the sum of the values in bucket_counts if a histogram is
- // provided.
- Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
- // The sum of the values in the population. If count is zero then this field
- // must be zero.
- Sum float64 `protobuf:"fixed64,2,opt,name=sum,proto3" json:"sum,omitempty"`
- // The sum of squared deviations from the mean of the values in the
- // population. For values x_i this is:
- //
- // Sum[i=1..n]((x_i - mean)^2)
- //
- // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition
- // describes Welford's method for accumulating this sum in one pass.
- //
- // If count is zero then this field must be zero.
- SumOfSquaredDeviation float64 `protobuf:"fixed64,3,opt,name=sum_of_squared_deviation,json=sumOfSquaredDeviation,proto3" json:"sum_of_squared_deviation,omitempty"`
- // Don't change bucket boundaries within a TimeSeries if your backend doesn't
- // support this.
- // TODO(issue #152): consider not required to send bucket options for
- // optimization.
- BucketOptions *DistributionValue_BucketOptions `protobuf:"bytes,4,opt,name=bucket_options,json=bucketOptions,proto3" json:"bucket_options,omitempty"`
- // If the distribution does not have a histogram, then omit this field.
- // If there is a histogram, then the sum of the values in the Bucket counts
- // must equal the value in the count field of the distribution.
- Buckets []*DistributionValue_Bucket `protobuf:"bytes,5,rep,name=buckets,proto3" json:"buckets,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DistributionValue) Reset() { *m = DistributionValue{} }
-func (m *DistributionValue) String() string { return proto.CompactTextString(m) }
-func (*DistributionValue) ProtoMessage() {}
-func (*DistributionValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{6}
-}
-
-func (m *DistributionValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DistributionValue.Unmarshal(m, b)
-}
-func (m *DistributionValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DistributionValue.Marshal(b, m, deterministic)
-}
-func (m *DistributionValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DistributionValue.Merge(m, src)
-}
-func (m *DistributionValue) XXX_Size() int {
- return xxx_messageInfo_DistributionValue.Size(m)
-}
-func (m *DistributionValue) XXX_DiscardUnknown() {
- xxx_messageInfo_DistributionValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DistributionValue proto.InternalMessageInfo
-
-func (m *DistributionValue) GetCount() int64 {
- if m != nil {
- return m.Count
- }
- return 0
-}
-
-func (m *DistributionValue) GetSum() float64 {
- if m != nil {
- return m.Sum
- }
- return 0
-}
-
-func (m *DistributionValue) GetSumOfSquaredDeviation() float64 {
- if m != nil {
- return m.SumOfSquaredDeviation
- }
- return 0
-}
-
-func (m *DistributionValue) GetBucketOptions() *DistributionValue_BucketOptions {
- if m != nil {
- return m.BucketOptions
- }
- return nil
-}
-
-func (m *DistributionValue) GetBuckets() []*DistributionValue_Bucket {
- if m != nil {
- return m.Buckets
- }
- return nil
-}
-
-// A Distribution may optionally contain a histogram of the values in the
-// population. The bucket boundaries for that histogram are described by
-// BucketOptions.
-//
-// If bucket_options has no type, then there is no histogram associated with
-// the Distribution.
-type DistributionValue_BucketOptions struct {
- // Types that are valid to be assigned to Type:
- // *DistributionValue_BucketOptions_Explicit_
- Type isDistributionValue_BucketOptions_Type `protobuf_oneof:"type"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DistributionValue_BucketOptions) Reset() { *m = DistributionValue_BucketOptions{} }
-func (m *DistributionValue_BucketOptions) String() string { return proto.CompactTextString(m) }
-func (*DistributionValue_BucketOptions) ProtoMessage() {}
-func (*DistributionValue_BucketOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{6, 0}
-}
-
-func (m *DistributionValue_BucketOptions) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DistributionValue_BucketOptions.Unmarshal(m, b)
-}
-func (m *DistributionValue_BucketOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DistributionValue_BucketOptions.Marshal(b, m, deterministic)
-}
-func (m *DistributionValue_BucketOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DistributionValue_BucketOptions.Merge(m, src)
-}
-func (m *DistributionValue_BucketOptions) XXX_Size() int {
- return xxx_messageInfo_DistributionValue_BucketOptions.Size(m)
-}
-func (m *DistributionValue_BucketOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_DistributionValue_BucketOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DistributionValue_BucketOptions proto.InternalMessageInfo
-
-type isDistributionValue_BucketOptions_Type interface {
- isDistributionValue_BucketOptions_Type()
-}
-
-type DistributionValue_BucketOptions_Explicit_ struct {
- Explicit *DistributionValue_BucketOptions_Explicit `protobuf:"bytes,1,opt,name=explicit,proto3,oneof"`
-}
-
-func (*DistributionValue_BucketOptions_Explicit_) isDistributionValue_BucketOptions_Type() {}
-
-func (m *DistributionValue_BucketOptions) GetType() isDistributionValue_BucketOptions_Type {
- if m != nil {
- return m.Type
- }
- return nil
-}
-
-func (m *DistributionValue_BucketOptions) GetExplicit() *DistributionValue_BucketOptions_Explicit {
- if x, ok := m.GetType().(*DistributionValue_BucketOptions_Explicit_); ok {
- return x.Explicit
- }
- return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*DistributionValue_BucketOptions) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*DistributionValue_BucketOptions_Explicit_)(nil),
- }
-}
-
-// Specifies a set of buckets with arbitrary upper-bounds.
-// This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket
-// index i are:
-//
-// [0, bucket_bounds[i]) for i == 0
-// [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-1
-// [bucket_bounds[i], +infinity) for i == N-1
-type DistributionValue_BucketOptions_Explicit struct {
- // The values must be strictly increasing and > 0.
- Bounds []float64 `protobuf:"fixed64,1,rep,packed,name=bounds,proto3" json:"bounds,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DistributionValue_BucketOptions_Explicit) Reset() {
- *m = DistributionValue_BucketOptions_Explicit{}
-}
-func (m *DistributionValue_BucketOptions_Explicit) String() string { return proto.CompactTextString(m) }
-func (*DistributionValue_BucketOptions_Explicit) ProtoMessage() {}
-func (*DistributionValue_BucketOptions_Explicit) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{6, 0, 0}
-}
-
-func (m *DistributionValue_BucketOptions_Explicit) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Unmarshal(m, b)
-}
-func (m *DistributionValue_BucketOptions_Explicit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Marshal(b, m, deterministic)
-}
-func (m *DistributionValue_BucketOptions_Explicit) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Merge(m, src)
-}
-func (m *DistributionValue_BucketOptions_Explicit) XXX_Size() int {
- return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Size(m)
-}
-func (m *DistributionValue_BucketOptions_Explicit) XXX_DiscardUnknown() {
- xxx_messageInfo_DistributionValue_BucketOptions_Explicit.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DistributionValue_BucketOptions_Explicit proto.InternalMessageInfo
-
-func (m *DistributionValue_BucketOptions_Explicit) GetBounds() []float64 {
- if m != nil {
- return m.Bounds
- }
- return nil
-}
-
-type DistributionValue_Bucket struct {
- // The number of values in each bucket of the histogram, as described in
- // bucket_bounds.
- Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
- // If the distribution does not have a histogram, then omit this field.
- Exemplar *DistributionValue_Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DistributionValue_Bucket) Reset() { *m = DistributionValue_Bucket{} }
-func (m *DistributionValue_Bucket) String() string { return proto.CompactTextString(m) }
-func (*DistributionValue_Bucket) ProtoMessage() {}
-func (*DistributionValue_Bucket) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{6, 1}
-}
-
-func (m *DistributionValue_Bucket) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DistributionValue_Bucket.Unmarshal(m, b)
-}
-func (m *DistributionValue_Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DistributionValue_Bucket.Marshal(b, m, deterministic)
-}
-func (m *DistributionValue_Bucket) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DistributionValue_Bucket.Merge(m, src)
-}
-func (m *DistributionValue_Bucket) XXX_Size() int {
- return xxx_messageInfo_DistributionValue_Bucket.Size(m)
-}
-func (m *DistributionValue_Bucket) XXX_DiscardUnknown() {
- xxx_messageInfo_DistributionValue_Bucket.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DistributionValue_Bucket proto.InternalMessageInfo
-
-func (m *DistributionValue_Bucket) GetCount() int64 {
- if m != nil {
- return m.Count
- }
- return 0
-}
-
-func (m *DistributionValue_Bucket) GetExemplar() *DistributionValue_Exemplar {
- if m != nil {
- return m.Exemplar
- }
- return nil
-}
-
-// Exemplars are example points that may be used to annotate aggregated
-// Distribution values. They are metadata that gives information about a
-// particular value added to a Distribution bucket.
-type DistributionValue_Exemplar struct {
- // Value of the exemplar point. It determines which bucket the exemplar
- // belongs to.
- Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
- // The observation (sampling) time of the above value.
- Timestamp *timestamp.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
- // Contextual information about the example value.
- Attachments map[string]string `protobuf:"bytes,3,rep,name=attachments,proto3" json:"attachments,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DistributionValue_Exemplar) Reset() { *m = DistributionValue_Exemplar{} }
-func (m *DistributionValue_Exemplar) String() string { return proto.CompactTextString(m) }
-func (*DistributionValue_Exemplar) ProtoMessage() {}
-func (*DistributionValue_Exemplar) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{6, 2}
-}
-
-func (m *DistributionValue_Exemplar) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DistributionValue_Exemplar.Unmarshal(m, b)
-}
-func (m *DistributionValue_Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DistributionValue_Exemplar.Marshal(b, m, deterministic)
-}
-func (m *DistributionValue_Exemplar) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DistributionValue_Exemplar.Merge(m, src)
-}
-func (m *DistributionValue_Exemplar) XXX_Size() int {
- return xxx_messageInfo_DistributionValue_Exemplar.Size(m)
-}
-func (m *DistributionValue_Exemplar) XXX_DiscardUnknown() {
- xxx_messageInfo_DistributionValue_Exemplar.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DistributionValue_Exemplar proto.InternalMessageInfo
-
-func (m *DistributionValue_Exemplar) GetValue() float64 {
- if m != nil {
- return m.Value
- }
- return 0
-}
-
-func (m *DistributionValue_Exemplar) GetTimestamp() *timestamp.Timestamp {
- if m != nil {
- return m.Timestamp
- }
- return nil
-}
-
-func (m *DistributionValue_Exemplar) GetAttachments() map[string]string {
- if m != nil {
- return m.Attachments
- }
- return nil
-}
-
-// The start_timestamp only applies to the count and sum in the SummaryValue.
-type SummaryValue struct {
- // The total number of recorded values since start_time. Optional since
- // some systems don't expose this.
- Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"`
- // The total sum of recorded values since start_time. Optional since some
- // systems don't expose this. If count is zero then this field must be zero.
- // This field must be unset if the sum is not available.
- Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"`
- // Values calculated over an arbitrary time window.
- Snapshot *SummaryValue_Snapshot `protobuf:"bytes,3,opt,name=snapshot,proto3" json:"snapshot,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SummaryValue) Reset() { *m = SummaryValue{} }
-func (m *SummaryValue) String() string { return proto.CompactTextString(m) }
-func (*SummaryValue) ProtoMessage() {}
-func (*SummaryValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{7}
-}
-
-func (m *SummaryValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SummaryValue.Unmarshal(m, b)
-}
-func (m *SummaryValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SummaryValue.Marshal(b, m, deterministic)
-}
-func (m *SummaryValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SummaryValue.Merge(m, src)
-}
-func (m *SummaryValue) XXX_Size() int {
- return xxx_messageInfo_SummaryValue.Size(m)
-}
-func (m *SummaryValue) XXX_DiscardUnknown() {
- xxx_messageInfo_SummaryValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SummaryValue proto.InternalMessageInfo
-
-func (m *SummaryValue) GetCount() *wrappers.Int64Value {
- if m != nil {
- return m.Count
- }
- return nil
-}
-
-func (m *SummaryValue) GetSum() *wrappers.DoubleValue {
- if m != nil {
- return m.Sum
- }
- return nil
-}
-
-func (m *SummaryValue) GetSnapshot() *SummaryValue_Snapshot {
- if m != nil {
- return m.Snapshot
- }
- return nil
-}
-
-// The values in this message can be reset at arbitrary unknown times, with
-// the requirement that all of them are reset at the same time.
-type SummaryValue_Snapshot struct {
- // The number of values in the snapshot. Optional since some systems don't
- // expose this.
- Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"`
- // The sum of values in the snapshot. Optional since some systems don't
- // expose this. If count is zero then this field must be zero or not set
- // (if not supported).
- Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"`
- // A list of values at different percentiles of the distribution calculated
- // from the current snapshot. The percentiles must be strictly increasing.
- PercentileValues []*SummaryValue_Snapshot_ValueAtPercentile `protobuf:"bytes,3,rep,name=percentile_values,json=percentileValues,proto3" json:"percentile_values,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SummaryValue_Snapshot) Reset() { *m = SummaryValue_Snapshot{} }
-func (m *SummaryValue_Snapshot) String() string { return proto.CompactTextString(m) }
-func (*SummaryValue_Snapshot) ProtoMessage() {}
-func (*SummaryValue_Snapshot) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{7, 0}
-}
-
-func (m *SummaryValue_Snapshot) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SummaryValue_Snapshot.Unmarshal(m, b)
-}
-func (m *SummaryValue_Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SummaryValue_Snapshot.Marshal(b, m, deterministic)
-}
-func (m *SummaryValue_Snapshot) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SummaryValue_Snapshot.Merge(m, src)
-}
-func (m *SummaryValue_Snapshot) XXX_Size() int {
- return xxx_messageInfo_SummaryValue_Snapshot.Size(m)
-}
-func (m *SummaryValue_Snapshot) XXX_DiscardUnknown() {
- xxx_messageInfo_SummaryValue_Snapshot.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SummaryValue_Snapshot proto.InternalMessageInfo
-
-func (m *SummaryValue_Snapshot) GetCount() *wrappers.Int64Value {
- if m != nil {
- return m.Count
- }
- return nil
-}
-
-func (m *SummaryValue_Snapshot) GetSum() *wrappers.DoubleValue {
- if m != nil {
- return m.Sum
- }
- return nil
-}
-
-func (m *SummaryValue_Snapshot) GetPercentileValues() []*SummaryValue_Snapshot_ValueAtPercentile {
- if m != nil {
- return m.PercentileValues
- }
- return nil
-}
-
-// Represents the value at a given percentile of a distribution.
-type SummaryValue_Snapshot_ValueAtPercentile struct {
- // The percentile of a distribution. Must be in the interval
- // (0.0, 100.0].
- Percentile float64 `protobuf:"fixed64,1,opt,name=percentile,proto3" json:"percentile,omitempty"`
- // The value at the given percentile of a distribution.
- Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SummaryValue_Snapshot_ValueAtPercentile) Reset() {
- *m = SummaryValue_Snapshot_ValueAtPercentile{}
-}
-func (m *SummaryValue_Snapshot_ValueAtPercentile) String() string { return proto.CompactTextString(m) }
-func (*SummaryValue_Snapshot_ValueAtPercentile) ProtoMessage() {}
-func (*SummaryValue_Snapshot_ValueAtPercentile) Descriptor() ([]byte, []int) {
- return fileDescriptor_0ee3deb72053811a, []int{7, 0, 0}
-}
-
-func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Unmarshal(m, b)
-}
-func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Marshal(b, m, deterministic)
-}
-func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Merge(m, src)
-}
-func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Size() int {
- return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Size(m)
-}
-func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_DiscardUnknown() {
- xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile proto.InternalMessageInfo
-
-func (m *SummaryValue_Snapshot_ValueAtPercentile) GetPercentile() float64 {
- if m != nil {
- return m.Percentile
- }
- return 0
-}
-
-func (m *SummaryValue_Snapshot_ValueAtPercentile) GetValue() float64 {
- if m != nil {
- return m.Value
- }
- return 0
-}
-
-func init() {
- proto.RegisterEnum("opencensus.proto.metrics.v1.MetricDescriptor_Type", MetricDescriptor_Type_name, MetricDescriptor_Type_value)
- proto.RegisterType((*Metric)(nil), "opencensus.proto.metrics.v1.Metric")
- proto.RegisterType((*MetricDescriptor)(nil), "opencensus.proto.metrics.v1.MetricDescriptor")
- proto.RegisterType((*LabelKey)(nil), "opencensus.proto.metrics.v1.LabelKey")
- proto.RegisterType((*TimeSeries)(nil), "opencensus.proto.metrics.v1.TimeSeries")
- proto.RegisterType((*LabelValue)(nil), "opencensus.proto.metrics.v1.LabelValue")
- proto.RegisterType((*Point)(nil), "opencensus.proto.metrics.v1.Point")
- proto.RegisterType((*DistributionValue)(nil), "opencensus.proto.metrics.v1.DistributionValue")
- proto.RegisterType((*DistributionValue_BucketOptions)(nil), "opencensus.proto.metrics.v1.DistributionValue.BucketOptions")
- proto.RegisterType((*DistributionValue_BucketOptions_Explicit)(nil), "opencensus.proto.metrics.v1.DistributionValue.BucketOptions.Explicit")
- proto.RegisterType((*DistributionValue_Bucket)(nil), "opencensus.proto.metrics.v1.DistributionValue.Bucket")
- proto.RegisterType((*DistributionValue_Exemplar)(nil), "opencensus.proto.metrics.v1.DistributionValue.Exemplar")
- proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.metrics.v1.DistributionValue.Exemplar.AttachmentsEntry")
- proto.RegisterType((*SummaryValue)(nil), "opencensus.proto.metrics.v1.SummaryValue")
- proto.RegisterType((*SummaryValue_Snapshot)(nil), "opencensus.proto.metrics.v1.SummaryValue.Snapshot")
- proto.RegisterType((*SummaryValue_Snapshot_ValueAtPercentile)(nil), "opencensus.proto.metrics.v1.SummaryValue.Snapshot.ValueAtPercentile")
-}
-
-func init() {
- proto.RegisterFile("opencensus/proto/metrics/v1/metrics.proto", fileDescriptor_0ee3deb72053811a)
-}
-
-var fileDescriptor_0ee3deb72053811a = []byte{
- // 1118 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xdd, 0x6e, 0x1b, 0xc5,
- 0x17, 0xcf, 0xda, 0x8e, 0xe3, 0x9c, 0x75, 0xd2, 0xf5, 0xa8, 0xed, 0xdf, 0x72, 0xfe, 0x0a, 0x61,
- 0x11, 0x90, 0x0a, 0x65, 0xad, 0x98, 0xd2, 0x56, 0x15, 0x2a, 0x8a, 0x63, 0x37, 0x31, 0x24, 0xb1,
- 0x35, 0xb6, 0x23, 0xd1, 0x1b, 0x6b, 0xbd, 0x9e, 0x24, 0x4b, 0xbc, 0x1f, 0xdd, 0x99, 0x0d, 0xf8,
- 0x05, 0x78, 0x02, 0xc4, 0x35, 0xb7, 0x88, 0xe7, 0xe0, 0x8a, 0x27, 0xe0, 0x15, 0xb8, 0x41, 0xbc,
- 0x01, 0xda, 0x99, 0xd9, 0x8f, 0xc4, 0x60, 0xea, 0x22, 0x71, 0x77, 0xe6, 0xcc, 0x39, 0xbf, 0xfd,
- 0x9d, 0xcf, 0x1d, 0x78, 0xe4, 0xf9, 0xc4, 0xb5, 0x88, 0x4b, 0x43, 0x5a, 0xf7, 0x03, 0x8f, 0x79,
- 0x75, 0x87, 0xb0, 0xc0, 0xb6, 0x68, 0xfd, 0x66, 0x3f, 0x16, 0x0d, 0x7e, 0x81, 0xb6, 0x52, 0x53,
- 0xa1, 0x31, 0xe2, 0xfb, 0x9b, 0xfd, 0xda, 0x3b, 0x97, 0x9e, 0x77, 0x39, 0x25, 0x02, 0x63, 0x1c,
- 0x5e, 0xd4, 0x99, 0xed, 0x10, 0xca, 0x4c, 0xc7, 0x17, 0xb6, 0xb5, 0xed, 0xbb, 0x06, 0x5f, 0x07,
- 0xa6, 0xef, 0x93, 0x40, 0x62, 0xd5, 0x3e, 0x9a, 0x23, 0x12, 0x10, 0xea, 0x85, 0x81, 0x45, 0x22,
- 0x26, 0xb1, 0x2c, 0x8c, 0xf5, 0x3f, 0x14, 0x28, 0x9e, 0xf2, 0x8f, 0xa3, 0x57, 0x50, 0x11, 0x34,
- 0x46, 0x13, 0x42, 0xad, 0xc0, 0xf6, 0x99, 0x17, 0x54, 0x95, 0x1d, 0x65, 0x57, 0x6d, 0xec, 0x19,
- 0x0b, 0x18, 0x1b, 0xc2, 0xbf, 0x95, 0x38, 0x61, 0xcd, 0xb9, 0xa3, 0x41, 0x47, 0x00, 0x3c, 0x0c,
- 0x12, 0xd8, 0x84, 0x56, 0x73, 0x3b, 0xf9, 0x5d, 0xb5, 0xf1, 0xe1, 0x42, 0xd0, 0x81, 0xed, 0x90,
- 0x3e, 0x37, 0xc7, 0x19, 0x57, 0xd4, 0x84, 0x52, 0x1c, 0x41, 0x35, 0xcf, 0xb9, 0x7d, 0x30, 0x0f,
- 0x93, 0xc4, 0x78, 0xb3, 0x6f, 0x60, 0x29, 0xe3, 0xc4, 0x4f, 0xff, 0x3e, 0x0f, 0xda, 0x5d, 0xce,
- 0x08, 0x41, 0xc1, 0x35, 0x1d, 0xc2, 0x03, 0x5e, 0xc7, 0x5c, 0x46, 0x3b, 0xa0, 0xc6, 0xa9, 0xb0,
- 0x3d, 0xb7, 0x9a, 0xe3, 0x57, 0x59, 0x55, 0xe4, 0x15, 0xba, 0x36, 0xe3, 0x54, 0xd6, 0x31, 0x97,
- 0xd1, 0x4b, 0x28, 0xb0, 0x99, 0x4f, 0xaa, 0x85, 0x1d, 0x65, 0x77, 0xb3, 0xd1, 0x58, 0x2a, 0x75,
- 0xc6, 0x60, 0xe6, 0x13, 0xcc, 0xfd, 0x51, 0x0b, 0x60, 0x6a, 0x8e, 0xc9, 0x74, 0x74, 0x4d, 0x66,
- 0xb4, 0xba, 0xca, 0x73, 0xf6, 0xfe, 0x42, 0xb4, 0x93, 0xc8, 0xfc, 0x0b, 0x32, 0xc3, 0xeb, 0x53,
- 0x29, 0x51, 0xfd, 0x47, 0x05, 0x0a, 0x11, 0x28, 0xba, 0x07, 0xea, 0xf0, 0xac, 0xdf, 0x6b, 0x1f,
- 0x76, 0x5e, 0x76, 0xda, 0x2d, 0x6d, 0x25, 0x52, 0x1c, 0x1d, 0x0c, 0x8f, 0xda, 0xa3, 0xce, 0xd9,
- 0xe0, 0xc9, 0x63, 0x4d, 0x41, 0x1a, 0x94, 0x85, 0xa2, 0xd5, 0x1d, 0x36, 0x4f, 0xda, 0x5a, 0x0e,
- 0x3d, 0x04, 0x24, 0x35, 0x9d, 0xfe, 0x00, 0x77, 0x9a, 0xc3, 0x41, 0xa7, 0x7b, 0xa6, 0xe5, 0xd1,
- 0x7d, 0xd0, 0x0e, 0x87, 0xa7, 0xc3, 0x93, 0x83, 0x41, 0xe7, 0x3c, 0xf6, 0x2f, 0xa0, 0x07, 0x50,
- 0xc9, 0x68, 0x25, 0xc8, 0x2a, 0xda, 0x82, 0xff, 0x65, 0xd5, 0x59, 0xa4, 0x22, 0x52, 0x61, 0xad,
- 0x3f, 0x3c, 0x3d, 0x3d, 0xc0, 0x5f, 0x6a, 0x6b, 0xfa, 0x0b, 0x28, 0xc5, 0x21, 0x20, 0x0d, 0xf2,
- 0xd7, 0x64, 0x26, 0xcb, 0x11, 0x89, 0xff, 0x5c, 0x0d, 0xfd, 0x57, 0x05, 0x20, 0xed, 0x1b, 0x74,
- 0x08, 0xf7, 0x28, 0x33, 0x03, 0x36, 0x4a, 0x26, 0x48, 0xb6, 0x73, 0xcd, 0x10, 0x23, 0x64, 0xc4,
- 0x23, 0xc4, 0xbb, 0x8d, 0x5b, 0xe0, 0x4d, 0xee, 0x92, 0x9c, 0xd1, 0xe7, 0x50, 0x16, 0x55, 0xb8,
- 0x31, 0xa7, 0xe1, 0x1b, 0xf6, 0x2e, 0x0f, 0xe2, 0x3c, 0xb2, 0xc7, 0xea, 0x34, 0x91, 0x29, 0x7a,
- 0x0e, 0x45, 0xdf, 0xb3, 0x5d, 0x46, 0xab, 0x79, 0x8e, 0xa2, 0x2f, 0x44, 0xe9, 0x45, 0xa6, 0x58,
- 0x7a, 0xe8, 0x9f, 0x01, 0xa4, 0xb0, 0xe8, 0x3e, 0xac, 0x72, 0x3e, 0x32, 0x3f, 0xe2, 0x80, 0xb6,
- 0x60, 0xfd, 0xca, 0xa4, 0x82, 0x29, 0xcf, 0x4f, 0x09, 0x97, 0xae, 0x4c, 0xca, 0x5d, 0xf4, 0x9f,
- 0x73, 0xb0, 0xca, 0x21, 0xd1, 0x33, 0x58, 0x5f, 0x26, 0x23, 0xa9, 0x31, 0x7a, 0x17, 0x54, 0xdb,
- 0x65, 0x4f, 0x1e, 0x67, 0x3e, 0x91, 0x3f, 0x5e, 0xc1, 0xc0, 0x95, 0x82, 0xd9, 0x7b, 0x50, 0x9e,
- 0x78, 0xe1, 0x78, 0x4a, 0xa4, 0x4d, 0x34, 0x19, 0xca, 0xf1, 0x0a, 0x56, 0x85, 0x56, 0x18, 0x8d,
- 0x00, 0x4d, 0x6c, 0xca, 0x02, 0x7b, 0x1c, 0x46, 0x85, 0x93, 0xa6, 0x05, 0x4e, 0xc5, 0x58, 0x98,
- 0x94, 0x56, 0xc6, 0x8d, 0x63, 0x1d, 0xaf, 0xe0, 0xca, 0xe4, 0xae, 0x12, 0xf5, 0x60, 0x83, 0x86,
- 0x8e, 0x63, 0x06, 0x33, 0x89, 0xbd, 0xca, 0xb1, 0x1f, 0x2d, 0xc4, 0xee, 0x0b, 0x8f, 0x18, 0xb6,
- 0x4c, 0x33, 0xe7, 0xe6, 0x9a, 0xcc, 0xb8, 0xfe, 0x4b, 0x11, 0x2a, 0x73, 0x2c, 0xa2, 0x82, 0x58,
- 0x5e, 0xe8, 0x32, 0x9e, 0xcf, 0x3c, 0x16, 0x87, 0xa8, 0x89, 0x69, 0xe8, 0xf0, 0x3c, 0x29, 0x38,
- 0x12, 0xd1, 0x53, 0xa8, 0xd2, 0xd0, 0x19, 0x79, 0x17, 0x23, 0xfa, 0x3a, 0x34, 0x03, 0x32, 0x19,
- 0x4d, 0xc8, 0x8d, 0x6d, 0xf2, 0x8e, 0xe6, 0xa9, 0xc2, 0x0f, 0x68, 0xe8, 0x74, 0x2f, 0xfa, 0xe2,
- 0xb6, 0x15, 0x5f, 0x22, 0x0b, 0x36, 0xc7, 0xa1, 0x75, 0x4d, 0xd8, 0xc8, 0xe3, 0xcd, 0x4e, 0x65,
- 0xba, 0x3e, 0x5d, 0x2e, 0x5d, 0x46, 0x93, 0x83, 0x74, 0x05, 0x06, 0xde, 0x18, 0x67, 0x8f, 0xa8,
- 0x0b, 0x6b, 0x42, 0x11, 0xef, 0x9b, 0x4f, 0xde, 0x0a, 0x1d, 0xc7, 0x28, 0xb5, 0x1f, 0x14, 0xd8,
- 0xb8, 0xf5, 0x45, 0x64, 0x41, 0x89, 0x7c, 0xe3, 0x4f, 0x6d, 0xcb, 0x66, 0xb2, 0xf7, 0xda, 0xff,
- 0x26, 0x02, 0xa3, 0x2d, 0xc1, 0x8e, 0x57, 0x70, 0x02, 0x5c, 0xd3, 0xa1, 0x14, 0xeb, 0xd1, 0x43,
- 0x28, 0x8e, 0xbd, 0xd0, 0x9d, 0xd0, 0xaa, 0xb2, 0x93, 0xdf, 0x55, 0xb0, 0x3c, 0x35, 0x8b, 0x62,
- 0x4d, 0xd7, 0x28, 0x14, 0x05, 0xe2, 0xdf, 0xd4, 0xb0, 0x1f, 0x11, 0x26, 0x8e, 0x3f, 0x35, 0x03,
- 0x5e, 0x48, 0xb5, 0xf1, 0x74, 0x49, 0xc2, 0x6d, 0xe9, 0x8e, 0x13, 0xa0, 0xda, 0xb7, 0xb9, 0x88,
- 0xa1, 0x38, 0xdc, 0x1e, 0x66, 0x25, 0x1e, 0xe6, 0x5b, 0x53, 0x9a, 0x5b, 0x66, 0x4a, 0xbf, 0x02,
- 0xd5, 0x64, 0xcc, 0xb4, 0xae, 0x1c, 0x92, 0xee, 0x9a, 0xe3, 0xb7, 0x24, 0x6d, 0x1c, 0xa4, 0x50,
- 0x6d, 0x97, 0x05, 0x33, 0x9c, 0x05, 0xaf, 0xbd, 0x00, 0xed, 0xae, 0xc1, 0x5f, 0xac, 0xee, 0x24,
- 0xc2, 0x5c, 0x66, 0x5d, 0x3d, 0xcf, 0x3d, 0x53, 0xf4, 0xdf, 0xf3, 0x50, 0xce, 0xce, 0x1d, 0xda,
- 0xcf, 0x16, 0x41, 0x6d, 0x6c, 0xcd, 0x85, 0xdc, 0x49, 0x76, 0x4d, 0x5c, 0x21, 0x23, 0x9d, 0x32,
- 0xb5, 0xf1, 0xff, 0x39, 0x87, 0x56, 0xba, 0x78, 0xc4, 0x0c, 0x9e, 0x41, 0x89, 0xba, 0xa6, 0x4f,
- 0xaf, 0x3c, 0x26, 0xdf, 0x10, 0x8d, 0x37, 0xde, 0x0b, 0x46, 0x5f, 0x7a, 0xe2, 0x04, 0xa3, 0xf6,
- 0x53, 0x0e, 0x4a, 0xb1, 0xfa, 0xbf, 0xe0, 0xff, 0x1a, 0x2a, 0x3e, 0x09, 0x2c, 0xe2, 0x32, 0x3b,
- 0x5e, 0xb3, 0x71, 0x95, 0x5b, 0xcb, 0x07, 0x62, 0xf0, 0xe3, 0x01, 0xeb, 0x25, 0x90, 0x58, 0x4b,
- 0xe1, 0xc5, 0x9f, 0xab, 0xd6, 0x81, 0xca, 0x9c, 0x19, 0xda, 0x06, 0x48, 0x0d, 0x65, 0xf3, 0x66,
- 0x34, 0xb7, 0xab, 0x1e, 0xf7, 0x75, 0xf3, 0x3b, 0x05, 0xb6, 0x6d, 0x6f, 0x11, 0xcf, 0x66, 0x59,
- 0x3c, 0x8b, 0x68, 0x2f, 0xba, 0xe8, 0x29, 0xaf, 0x5a, 0x97, 0x36, 0xbb, 0x0a, 0xc7, 0x86, 0xe5,
- 0x39, 0x75, 0xe1, 0xb3, 0x67, 0xbb, 0x94, 0x05, 0x61, 0xd4, 0x74, 0x7c, 0x3d, 0xd6, 0x53, 0xb8,
- 0x3d, 0xf1, 0xe6, 0xbd, 0x24, 0xee, 0xde, 0x65, 0xf6, 0x0d, 0xfe, 0x5b, 0x6e, 0xab, 0xeb, 0x13,
- 0xf7, 0x50, 0x7c, 0x93, 0x43, 0xcb, 0xe7, 0x17, 0x35, 0xce, 0xf7, 0xc7, 0x45, 0xee, 0xf6, 0xf1,
- 0x9f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf8, 0xd0, 0xb4, 0x8d, 0xc7, 0x0b, 0x00, 0x00,
-}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go
deleted file mode 100644
index 5dba6a2a0..000000000
--- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: opencensus/proto/resource/v1/resource.proto
-
-package v1
-
-import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-// Resource information.
-type Resource struct {
- // Type identifier for the resource.
- Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
- // Set of labels that describe the resource.
- Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Resource) Reset() { *m = Resource{} }
-func (m *Resource) String() string { return proto.CompactTextString(m) }
-func (*Resource) ProtoMessage() {}
-func (*Resource) Descriptor() ([]byte, []int) {
- return fileDescriptor_584700775a2fc762, []int{0}
-}
-
-func (m *Resource) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Resource.Unmarshal(m, b)
-}
-func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Resource.Marshal(b, m, deterministic)
-}
-func (m *Resource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Resource.Merge(m, src)
-}
-func (m *Resource) XXX_Size() int {
- return xxx_messageInfo_Resource.Size(m)
-}
-func (m *Resource) XXX_DiscardUnknown() {
- xxx_messageInfo_Resource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Resource proto.InternalMessageInfo
-
-func (m *Resource) GetType() string {
- if m != nil {
- return m.Type
- }
- return ""
-}
-
-func (m *Resource) GetLabels() map[string]string {
- if m != nil {
- return m.Labels
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*Resource)(nil), "opencensus.proto.resource.v1.Resource")
- proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.resource.v1.Resource.LabelsEntry")
-}
-
-func init() {
- proto.RegisterFile("opencensus/proto/resource/v1/resource.proto", fileDescriptor_584700775a2fc762)
-}
-
-var fileDescriptor_584700775a2fc762 = []byte{
- // 251 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xce, 0x2f, 0x48, 0xcd,
- 0x4b, 0x4e, 0xcd, 0x2b, 0x2e, 0x2d, 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x4a, 0x2d,
- 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2f, 0x33, 0x84, 0xb3, 0xf5, 0xc0, 0x52, 0x42, 0x32, 0x08,
- 0xc5, 0x10, 0x11, 0x3d, 0xb8, 0x82, 0x32, 0x43, 0xa5, 0xa5, 0x8c, 0x5c, 0x1c, 0x41, 0x50, 0xbe,
- 0x90, 0x10, 0x17, 0x4b, 0x49, 0x65, 0x41, 0xaa, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x98,
- 0x2d, 0xe4, 0xc5, 0xc5, 0x96, 0x93, 0x98, 0x94, 0x9a, 0x53, 0x2c, 0xc1, 0xa4, 0xc0, 0xac, 0xc1,
- 0x6d, 0x64, 0xa4, 0x87, 0xcf, 0x3c, 0x3d, 0x98, 0x59, 0x7a, 0x3e, 0x60, 0x4d, 0xae, 0x79, 0x25,
- 0x45, 0x95, 0x41, 0x50, 0x13, 0xa4, 0x2c, 0xb9, 0xb8, 0x91, 0x84, 0x85, 0x04, 0xb8, 0x98, 0xb3,
- 0x53, 0x2b, 0xa1, 0xb6, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac, 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12,
- 0x4c, 0x60, 0x31, 0x08, 0xc7, 0x8a, 0xc9, 0x82, 0xd1, 0x69, 0x06, 0x23, 0x97, 0x7c, 0x66, 0x3e,
- 0x5e, 0xbb, 0x9d, 0x78, 0x61, 0x96, 0x07, 0x80, 0xa4, 0x02, 0x18, 0xa3, 0x5c, 0xd3, 0x33, 0x4b,
- 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0x21, 0xba, 0x74, 0x33, 0xf3, 0x8a, 0x4b, 0x8a,
- 0x4a, 0x73, 0x53, 0xf3, 0x4a, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0xf4, 0x11, 0x06, 0xea, 0x42, 0x42,
- 0x32, 0x3d, 0x35, 0x4f, 0x37, 0x1d, 0x25, 0x40, 0x5f, 0x31, 0xc9, 0xf8, 0x17, 0xa4, 0xe6, 0x39,
- 0x43, 0xac, 0x05, 0x9b, 0x8d, 0xf0, 0x66, 0x98, 0x61, 0x12, 0x1b, 0x58, 0xa3, 0x31, 0x20, 0x00,
- 0x00, 0xff, 0xff, 0xcf, 0x32, 0xff, 0x46, 0x96, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go
deleted file mode 100644
index 2f4ab19b5..000000000
--- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go
+++ /dev/null
@@ -1,1553 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: opencensus/proto/trace/v1/trace.proto
-
-package v1
-
-import (
- fmt "fmt"
- v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
- proto "github.com/golang/protobuf/proto"
- timestamp "github.com/golang/protobuf/ptypes/timestamp"
- wrappers "github.com/golang/protobuf/ptypes/wrappers"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-// Type of span. Can be used to specify additional relationships between spans
-// in addition to a parent/child relationship.
-type Span_SpanKind int32
-
-const (
- // Unspecified.
- Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0
- // Indicates that the span covers server-side handling of an RPC or other
- // remote network request.
- Span_SERVER Span_SpanKind = 1
- // Indicates that the span covers the client-side wrapper around an RPC or
- // other remote request.
- Span_CLIENT Span_SpanKind = 2
-)
-
-var Span_SpanKind_name = map[int32]string{
- 0: "SPAN_KIND_UNSPECIFIED",
- 1: "SERVER",
- 2: "CLIENT",
-}
-
-var Span_SpanKind_value = map[string]int32{
- "SPAN_KIND_UNSPECIFIED": 0,
- "SERVER": 1,
- "CLIENT": 2,
-}
-
-func (x Span_SpanKind) String() string {
- return proto.EnumName(Span_SpanKind_name, int32(x))
-}
-
-func (Span_SpanKind) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0, 0}
-}
-
-// Indicates whether the message was sent or received.
-type Span_TimeEvent_MessageEvent_Type int32
-
-const (
- // Unknown event type.
- Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED Span_TimeEvent_MessageEvent_Type = 0
- // Indicates a sent message.
- Span_TimeEvent_MessageEvent_SENT Span_TimeEvent_MessageEvent_Type = 1
- // Indicates a received message.
- Span_TimeEvent_MessageEvent_RECEIVED Span_TimeEvent_MessageEvent_Type = 2
-)
-
-var Span_TimeEvent_MessageEvent_Type_name = map[int32]string{
- 0: "TYPE_UNSPECIFIED",
- 1: "SENT",
- 2: "RECEIVED",
-}
-
-var Span_TimeEvent_MessageEvent_Type_value = map[string]int32{
- "TYPE_UNSPECIFIED": 0,
- "SENT": 1,
- "RECEIVED": 2,
-}
-
-func (x Span_TimeEvent_MessageEvent_Type) String() string {
- return proto.EnumName(Span_TimeEvent_MessageEvent_Type_name, int32(x))
-}
-
-func (Span_TimeEvent_MessageEvent_Type) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 1, 0}
-}
-
-// The relationship of the current span relative to the linked span: child,
-// parent, or unspecified.
-type Span_Link_Type int32
-
-const (
- // The relationship of the two spans is unknown, or known but other
- // than parent-child.
- Span_Link_TYPE_UNSPECIFIED Span_Link_Type = 0
- // The linked span is a child of the current span.
- Span_Link_CHILD_LINKED_SPAN Span_Link_Type = 1
- // The linked span is a parent of the current span.
- Span_Link_PARENT_LINKED_SPAN Span_Link_Type = 2
-)
-
-var Span_Link_Type_name = map[int32]string{
- 0: "TYPE_UNSPECIFIED",
- 1: "CHILD_LINKED_SPAN",
- 2: "PARENT_LINKED_SPAN",
-}
-
-var Span_Link_Type_value = map[string]int32{
- "TYPE_UNSPECIFIED": 0,
- "CHILD_LINKED_SPAN": 1,
- "PARENT_LINKED_SPAN": 2,
-}
-
-func (x Span_Link_Type) String() string {
- return proto.EnumName(Span_Link_Type_name, int32(x))
-}
-
-func (Span_Link_Type) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0, 4, 0}
-}
-
-// A span represents a single operation within a trace. Spans can be
-// nested to form a trace tree. Spans may also be linked to other spans
-// from the same or different trace. And form graphs. Often, a trace
-// contains a root span that describes the end-to-end latency, and one
-// or more subspans for its sub-operations. A trace can also contain
-// multiple root spans, or none at all. Spans do not need to be
-// contiguous - there may be gaps or overlaps between spans in a trace.
-//
-// The next id is 17.
-// TODO(bdrutu): Add an example.
-type Span struct {
- // A unique identifier for a trace. All spans from the same trace share
- // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes
- // is considered invalid.
- //
- // This field is semantically required. Receiver should generate new
- // random trace_id if empty or invalid trace_id was received.
- //
- // This field is required.
- TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
- // A unique identifier for a span within a trace, assigned when the span
- // is created. The ID is an 8-byte array. An ID with all zeroes is considered
- // invalid.
- //
- // This field is semantically required. Receiver should generate new
- // random span_id if empty or invalid span_id was received.
- //
- // This field is required.
- SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
- // The Tracestate on the span.
- Tracestate *Span_Tracestate `protobuf:"bytes,15,opt,name=tracestate,proto3" json:"tracestate,omitempty"`
- // The `span_id` of this span's parent span. If this is a root span, then this
- // field must be empty. The ID is an 8-byte array.
- ParentSpanId []byte `protobuf:"bytes,3,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"`
- // A description of the span's operation.
- //
- // For example, the name can be a qualified method name or a file name
- // and a line number where the operation is called. A best practice is to use
- // the same display name at the same call point in an application.
- // This makes it easier to correlate spans in different traces.
- //
- // This field is semantically required to be set to non-empty string.
- // When null or empty string received - receiver may use string "name"
- // as a replacement. There might be smarted algorithms implemented by
- // receiver to fix the empty span name.
- //
- // This field is required.
- Name *TruncatableString `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
- // Distinguishes between spans generated in a particular context. For example,
- // two spans with the same name may be distinguished using `CLIENT` (caller)
- // and `SERVER` (callee) to identify queueing latency associated with the span.
- Kind Span_SpanKind `protobuf:"varint,14,opt,name=kind,proto3,enum=opencensus.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"`
- // The start time of the span. On the client side, this is the time kept by
- // the local machine where the span execution starts. On the server side, this
- // is the time when the server's application handler starts running.
- //
- // This field is semantically required. When not set on receive -
- // receiver should set it to the value of end_time field if it was
- // set. Or to the current time if neither was set. It is important to
- // keep end_time > start_time for consistency.
- //
- // This field is required.
- StartTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
- // The end time of the span. On the client side, this is the time kept by
- // the local machine where the span execution ends. On the server side, this
- // is the time when the server application handler stops running.
- //
- // This field is semantically required. When not set on receive -
- // receiver should set it to start_time value. It is important to
- // keep end_time > start_time for consistency.
- //
- // This field is required.
- EndTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
- // A set of attributes on the span.
- Attributes *Span_Attributes `protobuf:"bytes,7,opt,name=attributes,proto3" json:"attributes,omitempty"`
- // A stack trace captured at the start of the span.
- StackTrace *StackTrace `protobuf:"bytes,8,opt,name=stack_trace,json=stackTrace,proto3" json:"stack_trace,omitempty"`
- // The included time events.
- TimeEvents *Span_TimeEvents `protobuf:"bytes,9,opt,name=time_events,json=timeEvents,proto3" json:"time_events,omitempty"`
- // The included links.
- Links *Span_Links `protobuf:"bytes,10,opt,name=links,proto3" json:"links,omitempty"`
- // An optional final status for this span. Semantically when Status
- // wasn't set it is means span ended without errors and assume
- // Status.Ok (code = 0).
- Status *Status `protobuf:"bytes,11,opt,name=status,proto3" json:"status,omitempty"`
- // An optional resource that is associated with this span. If not set, this span
- // should be part of a batch that does include the resource information, unless resource
- // information is unknown.
- Resource *v1.Resource `protobuf:"bytes,16,opt,name=resource,proto3" json:"resource,omitempty"`
- // A highly recommended but not required flag that identifies when a
- // trace crosses a process boundary. True when the parent_span belongs
- // to the same process as the current span. This flag is most commonly
- // used to indicate the need to adjust time as clocks in different
- // processes may not be synchronized.
- SameProcessAsParentSpan *wrappers.BoolValue `protobuf:"bytes,12,opt,name=same_process_as_parent_span,json=sameProcessAsParentSpan,proto3" json:"same_process_as_parent_span,omitempty"`
- // An optional number of child spans that were generated while this span
- // was active. If set, allows an implementation to detect missing child spans.
- ChildSpanCount *wrappers.UInt32Value `protobuf:"bytes,13,opt,name=child_span_count,json=childSpanCount,proto3" json:"child_span_count,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Span) Reset() { *m = Span{} }
-func (m *Span) String() string { return proto.CompactTextString(m) }
-func (*Span) ProtoMessage() {}
-func (*Span) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0}
-}
-
-func (m *Span) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Span.Unmarshal(m, b)
-}
-func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Span.Marshal(b, m, deterministic)
-}
-func (m *Span) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span.Merge(m, src)
-}
-func (m *Span) XXX_Size() int {
- return xxx_messageInfo_Span.Size(m)
-}
-func (m *Span) XXX_DiscardUnknown() {
- xxx_messageInfo_Span.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span proto.InternalMessageInfo
-
-func (m *Span) GetTraceId() []byte {
- if m != nil {
- return m.TraceId
- }
- return nil
-}
-
-func (m *Span) GetSpanId() []byte {
- if m != nil {
- return m.SpanId
- }
- return nil
-}
-
-func (m *Span) GetTracestate() *Span_Tracestate {
- if m != nil {
- return m.Tracestate
- }
- return nil
-}
-
-func (m *Span) GetParentSpanId() []byte {
- if m != nil {
- return m.ParentSpanId
- }
- return nil
-}
-
-func (m *Span) GetName() *TruncatableString {
- if m != nil {
- return m.Name
- }
- return nil
-}
-
-func (m *Span) GetKind() Span_SpanKind {
- if m != nil {
- return m.Kind
- }
- return Span_SPAN_KIND_UNSPECIFIED
-}
-
-func (m *Span) GetStartTime() *timestamp.Timestamp {
- if m != nil {
- return m.StartTime
- }
- return nil
-}
-
-func (m *Span) GetEndTime() *timestamp.Timestamp {
- if m != nil {
- return m.EndTime
- }
- return nil
-}
-
-func (m *Span) GetAttributes() *Span_Attributes {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-func (m *Span) GetStackTrace() *StackTrace {
- if m != nil {
- return m.StackTrace
- }
- return nil
-}
-
-func (m *Span) GetTimeEvents() *Span_TimeEvents {
- if m != nil {
- return m.TimeEvents
- }
- return nil
-}
-
-func (m *Span) GetLinks() *Span_Links {
- if m != nil {
- return m.Links
- }
- return nil
-}
-
-func (m *Span) GetStatus() *Status {
- if m != nil {
- return m.Status
- }
- return nil
-}
-
-func (m *Span) GetResource() *v1.Resource {
- if m != nil {
- return m.Resource
- }
- return nil
-}
-
-func (m *Span) GetSameProcessAsParentSpan() *wrappers.BoolValue {
- if m != nil {
- return m.SameProcessAsParentSpan
- }
- return nil
-}
-
-func (m *Span) GetChildSpanCount() *wrappers.UInt32Value {
- if m != nil {
- return m.ChildSpanCount
- }
- return nil
-}
-
-// This field conveys information about request position in multiple distributed tracing graphs.
-// It is a list of Tracestate.Entry with a maximum of 32 members in the list.
-//
-// See the https://github.com/w3c/distributed-tracing for more details about this field.
-type Span_Tracestate struct {
- // A list of entries that represent the Tracestate.
- Entries []*Span_Tracestate_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Span_Tracestate) Reset() { *m = Span_Tracestate{} }
-func (m *Span_Tracestate) String() string { return proto.CompactTextString(m) }
-func (*Span_Tracestate) ProtoMessage() {}
-func (*Span_Tracestate) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0, 0}
-}
-
-func (m *Span_Tracestate) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Span_Tracestate.Unmarshal(m, b)
-}
-func (m *Span_Tracestate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Span_Tracestate.Marshal(b, m, deterministic)
-}
-func (m *Span_Tracestate) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span_Tracestate.Merge(m, src)
-}
-func (m *Span_Tracestate) XXX_Size() int {
- return xxx_messageInfo_Span_Tracestate.Size(m)
-}
-func (m *Span_Tracestate) XXX_DiscardUnknown() {
- xxx_messageInfo_Span_Tracestate.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span_Tracestate proto.InternalMessageInfo
-
-func (m *Span_Tracestate) GetEntries() []*Span_Tracestate_Entry {
- if m != nil {
- return m.Entries
- }
- return nil
-}
-
-type Span_Tracestate_Entry struct {
- // The key must begin with a lowercase letter, and can only contain
- // lowercase letters 'a'-'z', digits '0'-'9', underscores '_', dashes
- // '-', asterisks '*', and forward slashes '/'.
- Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- // The value is opaque string up to 256 characters printable ASCII
- // RFC0020 characters (i.e., the range 0x20 to 0x7E) except ',' and '='.
- // Note that this also excludes tabs, newlines, carriage returns, etc.
- Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Span_Tracestate_Entry) Reset() { *m = Span_Tracestate_Entry{} }
-func (m *Span_Tracestate_Entry) String() string { return proto.CompactTextString(m) }
-func (*Span_Tracestate_Entry) ProtoMessage() {}
-func (*Span_Tracestate_Entry) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0, 0, 0}
-}
-
-func (m *Span_Tracestate_Entry) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Span_Tracestate_Entry.Unmarshal(m, b)
-}
-func (m *Span_Tracestate_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Span_Tracestate_Entry.Marshal(b, m, deterministic)
-}
-func (m *Span_Tracestate_Entry) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span_Tracestate_Entry.Merge(m, src)
-}
-func (m *Span_Tracestate_Entry) XXX_Size() int {
- return xxx_messageInfo_Span_Tracestate_Entry.Size(m)
-}
-func (m *Span_Tracestate_Entry) XXX_DiscardUnknown() {
- xxx_messageInfo_Span_Tracestate_Entry.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span_Tracestate_Entry proto.InternalMessageInfo
-
-func (m *Span_Tracestate_Entry) GetKey() string {
- if m != nil {
- return m.Key
- }
- return ""
-}
-
-func (m *Span_Tracestate_Entry) GetValue() string {
- if m != nil {
- return m.Value
- }
- return ""
-}
-
-// A set of attributes, each with a key and a value.
-type Span_Attributes struct {
- // The set of attributes. The value can be a string, an integer, a double
- // or the Boolean values `true` or `false`. Note, global attributes like
- // server name can be set as tags using resource API. Examples of attributes:
- //
- // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
- // "/http/server_latency": 300
- // "abc.com/myattribute": true
- // "abc.com/score": 10.239
- AttributeMap map[string]*AttributeValue `protobuf:"bytes,1,rep,name=attribute_map,json=attributeMap,proto3" json:"attribute_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- // The number of attributes that were discarded. Attributes can be discarded
- // because their keys are too long or because there are too many attributes.
- // If this value is 0, then no attributes were dropped.
- DroppedAttributesCount int32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Span_Attributes) Reset() { *m = Span_Attributes{} }
-func (m *Span_Attributes) String() string { return proto.CompactTextString(m) }
-func (*Span_Attributes) ProtoMessage() {}
-func (*Span_Attributes) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0, 1}
-}
-
-func (m *Span_Attributes) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Span_Attributes.Unmarshal(m, b)
-}
-func (m *Span_Attributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Span_Attributes.Marshal(b, m, deterministic)
-}
-func (m *Span_Attributes) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span_Attributes.Merge(m, src)
-}
-func (m *Span_Attributes) XXX_Size() int {
- return xxx_messageInfo_Span_Attributes.Size(m)
-}
-func (m *Span_Attributes) XXX_DiscardUnknown() {
- xxx_messageInfo_Span_Attributes.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span_Attributes proto.InternalMessageInfo
-
-func (m *Span_Attributes) GetAttributeMap() map[string]*AttributeValue {
- if m != nil {
- return m.AttributeMap
- }
- return nil
-}
-
-func (m *Span_Attributes) GetDroppedAttributesCount() int32 {
- if m != nil {
- return m.DroppedAttributesCount
- }
- return 0
-}
-
-// A time-stamped annotation or message event in the Span.
-type Span_TimeEvent struct {
- // The time the event occurred.
- Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"`
- // A `TimeEvent` can contain either an `Annotation` object or a
- // `MessageEvent` object, but not both.
- //
- // Types that are valid to be assigned to Value:
- // *Span_TimeEvent_Annotation_
- // *Span_TimeEvent_MessageEvent_
- Value isSpan_TimeEvent_Value `protobuf_oneof:"value"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Span_TimeEvent) Reset() { *m = Span_TimeEvent{} }
-func (m *Span_TimeEvent) String() string { return proto.CompactTextString(m) }
-func (*Span_TimeEvent) ProtoMessage() {}
-func (*Span_TimeEvent) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0, 2}
-}
-
-func (m *Span_TimeEvent) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Span_TimeEvent.Unmarshal(m, b)
-}
-func (m *Span_TimeEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Span_TimeEvent.Marshal(b, m, deterministic)
-}
-func (m *Span_TimeEvent) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span_TimeEvent.Merge(m, src)
-}
-func (m *Span_TimeEvent) XXX_Size() int {
- return xxx_messageInfo_Span_TimeEvent.Size(m)
-}
-func (m *Span_TimeEvent) XXX_DiscardUnknown() {
- xxx_messageInfo_Span_TimeEvent.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span_TimeEvent proto.InternalMessageInfo
-
-func (m *Span_TimeEvent) GetTime() *timestamp.Timestamp {
- if m != nil {
- return m.Time
- }
- return nil
-}
-
-type isSpan_TimeEvent_Value interface {
- isSpan_TimeEvent_Value()
-}
-
-type Span_TimeEvent_Annotation_ struct {
- Annotation *Span_TimeEvent_Annotation `protobuf:"bytes,2,opt,name=annotation,proto3,oneof"`
-}
-
-type Span_TimeEvent_MessageEvent_ struct {
- MessageEvent *Span_TimeEvent_MessageEvent `protobuf:"bytes,3,opt,name=message_event,json=messageEvent,proto3,oneof"`
-}
-
-func (*Span_TimeEvent_Annotation_) isSpan_TimeEvent_Value() {}
-
-func (*Span_TimeEvent_MessageEvent_) isSpan_TimeEvent_Value() {}
-
-func (m *Span_TimeEvent) GetValue() isSpan_TimeEvent_Value {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *Span_TimeEvent) GetAnnotation() *Span_TimeEvent_Annotation {
- if x, ok := m.GetValue().(*Span_TimeEvent_Annotation_); ok {
- return x.Annotation
- }
- return nil
-}
-
-func (m *Span_TimeEvent) GetMessageEvent() *Span_TimeEvent_MessageEvent {
- if x, ok := m.GetValue().(*Span_TimeEvent_MessageEvent_); ok {
- return x.MessageEvent
- }
- return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*Span_TimeEvent) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*Span_TimeEvent_Annotation_)(nil),
- (*Span_TimeEvent_MessageEvent_)(nil),
- }
-}
-
-// A text annotation with a set of attributes.
-type Span_TimeEvent_Annotation struct {
- // A user-supplied message describing the event.
- Description *TruncatableString `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
- // A set of attributes on the annotation.
- Attributes *Span_Attributes `protobuf:"bytes,2,opt,name=attributes,proto3" json:"attributes,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Span_TimeEvent_Annotation) Reset() { *m = Span_TimeEvent_Annotation{} }
-func (m *Span_TimeEvent_Annotation) String() string { return proto.CompactTextString(m) }
-func (*Span_TimeEvent_Annotation) ProtoMessage() {}
-func (*Span_TimeEvent_Annotation) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 0}
-}
-
-func (m *Span_TimeEvent_Annotation) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Span_TimeEvent_Annotation.Unmarshal(m, b)
-}
-func (m *Span_TimeEvent_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Span_TimeEvent_Annotation.Marshal(b, m, deterministic)
-}
-func (m *Span_TimeEvent_Annotation) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span_TimeEvent_Annotation.Merge(m, src)
-}
-func (m *Span_TimeEvent_Annotation) XXX_Size() int {
- return xxx_messageInfo_Span_TimeEvent_Annotation.Size(m)
-}
-func (m *Span_TimeEvent_Annotation) XXX_DiscardUnknown() {
- xxx_messageInfo_Span_TimeEvent_Annotation.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span_TimeEvent_Annotation proto.InternalMessageInfo
-
-func (m *Span_TimeEvent_Annotation) GetDescription() *TruncatableString {
- if m != nil {
- return m.Description
- }
- return nil
-}
-
-func (m *Span_TimeEvent_Annotation) GetAttributes() *Span_Attributes {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-// An event describing a message sent/received between Spans.
-type Span_TimeEvent_MessageEvent struct {
- // The type of MessageEvent. Indicates whether the message was sent or
- // received.
- Type Span_TimeEvent_MessageEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_TimeEvent_MessageEvent_Type" json:"type,omitempty"`
- // An identifier for the MessageEvent's message that can be used to match
- // SENT and RECEIVED MessageEvents. For example, this field could
- // represent a sequence ID for a streaming RPC. It is recommended to be
- // unique within a Span.
- Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
- // The number of uncompressed bytes sent or received.
- UncompressedSize uint64 `protobuf:"varint,3,opt,name=uncompressed_size,json=uncompressedSize,proto3" json:"uncompressed_size,omitempty"`
- // The number of compressed bytes sent or received. If zero, assumed to
- // be the same size as uncompressed.
- CompressedSize uint64 `protobuf:"varint,4,opt,name=compressed_size,json=compressedSize,proto3" json:"compressed_size,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Span_TimeEvent_MessageEvent) Reset() { *m = Span_TimeEvent_MessageEvent{} }
-func (m *Span_TimeEvent_MessageEvent) String() string { return proto.CompactTextString(m) }
-func (*Span_TimeEvent_MessageEvent) ProtoMessage() {}
-func (*Span_TimeEvent_MessageEvent) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 1}
-}
-
-func (m *Span_TimeEvent_MessageEvent) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Span_TimeEvent_MessageEvent.Unmarshal(m, b)
-}
-func (m *Span_TimeEvent_MessageEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Span_TimeEvent_MessageEvent.Marshal(b, m, deterministic)
-}
-func (m *Span_TimeEvent_MessageEvent) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span_TimeEvent_MessageEvent.Merge(m, src)
-}
-func (m *Span_TimeEvent_MessageEvent) XXX_Size() int {
- return xxx_messageInfo_Span_TimeEvent_MessageEvent.Size(m)
-}
-func (m *Span_TimeEvent_MessageEvent) XXX_DiscardUnknown() {
- xxx_messageInfo_Span_TimeEvent_MessageEvent.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span_TimeEvent_MessageEvent proto.InternalMessageInfo
-
-func (m *Span_TimeEvent_MessageEvent) GetType() Span_TimeEvent_MessageEvent_Type {
- if m != nil {
- return m.Type
- }
- return Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED
-}
-
-func (m *Span_TimeEvent_MessageEvent) GetId() uint64 {
- if m != nil {
- return m.Id
- }
- return 0
-}
-
-func (m *Span_TimeEvent_MessageEvent) GetUncompressedSize() uint64 {
- if m != nil {
- return m.UncompressedSize
- }
- return 0
-}
-
-func (m *Span_TimeEvent_MessageEvent) GetCompressedSize() uint64 {
- if m != nil {
- return m.CompressedSize
- }
- return 0
-}
-
-// A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation
-// on the span, consisting of either user-supplied key-value pairs, or
-// details of a message sent/received between Spans.
-type Span_TimeEvents struct {
- // A collection of `TimeEvent`s.
- TimeEvent []*Span_TimeEvent `protobuf:"bytes,1,rep,name=time_event,json=timeEvent,proto3" json:"time_event,omitempty"`
- // The number of dropped annotations in all the included time events.
- // If the value is 0, then no annotations were dropped.
- DroppedAnnotationsCount int32 `protobuf:"varint,2,opt,name=dropped_annotations_count,json=droppedAnnotationsCount,proto3" json:"dropped_annotations_count,omitempty"`
- // The number of dropped message events in all the included time events.
- // If the value is 0, then no message events were dropped.
- DroppedMessageEventsCount int32 `protobuf:"varint,3,opt,name=dropped_message_events_count,json=droppedMessageEventsCount,proto3" json:"dropped_message_events_count,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Span_TimeEvents) Reset() { *m = Span_TimeEvents{} }
-func (m *Span_TimeEvents) String() string { return proto.CompactTextString(m) }
-func (*Span_TimeEvents) ProtoMessage() {}
-func (*Span_TimeEvents) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0, 3}
-}
-
-func (m *Span_TimeEvents) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Span_TimeEvents.Unmarshal(m, b)
-}
-func (m *Span_TimeEvents) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Span_TimeEvents.Marshal(b, m, deterministic)
-}
-func (m *Span_TimeEvents) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span_TimeEvents.Merge(m, src)
-}
-func (m *Span_TimeEvents) XXX_Size() int {
- return xxx_messageInfo_Span_TimeEvents.Size(m)
-}
-func (m *Span_TimeEvents) XXX_DiscardUnknown() {
- xxx_messageInfo_Span_TimeEvents.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span_TimeEvents proto.InternalMessageInfo
-
-func (m *Span_TimeEvents) GetTimeEvent() []*Span_TimeEvent {
- if m != nil {
- return m.TimeEvent
- }
- return nil
-}
-
-func (m *Span_TimeEvents) GetDroppedAnnotationsCount() int32 {
- if m != nil {
- return m.DroppedAnnotationsCount
- }
- return 0
-}
-
-func (m *Span_TimeEvents) GetDroppedMessageEventsCount() int32 {
- if m != nil {
- return m.DroppedMessageEventsCount
- }
- return 0
-}
-
-// A pointer from the current span to another span in the same trace or in a
-// different trace. For example, this can be used in batching operations,
-// where a single batch handler processes multiple requests from different
-// traces or when the handler receives a request from a different project.
-type Span_Link struct {
- // A unique identifier of a trace that this linked span is part of. The ID is a
- // 16-byte array.
- TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
- // A unique identifier for the linked span. The ID is an 8-byte array.
- SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
- // The relationship of the current span relative to the linked span.
- Type Span_Link_Type `protobuf:"varint,3,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_Link_Type" json:"type,omitempty"`
- // A set of attributes on the link.
- Attributes *Span_Attributes `protobuf:"bytes,4,opt,name=attributes,proto3" json:"attributes,omitempty"`
- // The Tracestate associated with the link.
- Tracestate *Span_Tracestate `protobuf:"bytes,5,opt,name=tracestate,proto3" json:"tracestate,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Span_Link) Reset() { *m = Span_Link{} }
-func (m *Span_Link) String() string { return proto.CompactTextString(m) }
-func (*Span_Link) ProtoMessage() {}
-func (*Span_Link) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0, 4}
-}
-
-func (m *Span_Link) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Span_Link.Unmarshal(m, b)
-}
-func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic)
-}
-func (m *Span_Link) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span_Link.Merge(m, src)
-}
-func (m *Span_Link) XXX_Size() int {
- return xxx_messageInfo_Span_Link.Size(m)
-}
-func (m *Span_Link) XXX_DiscardUnknown() {
- xxx_messageInfo_Span_Link.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span_Link proto.InternalMessageInfo
-
-func (m *Span_Link) GetTraceId() []byte {
- if m != nil {
- return m.TraceId
- }
- return nil
-}
-
-func (m *Span_Link) GetSpanId() []byte {
- if m != nil {
- return m.SpanId
- }
- return nil
-}
-
-func (m *Span_Link) GetType() Span_Link_Type {
- if m != nil {
- return m.Type
- }
- return Span_Link_TYPE_UNSPECIFIED
-}
-
-func (m *Span_Link) GetAttributes() *Span_Attributes {
- if m != nil {
- return m.Attributes
- }
- return nil
-}
-
-func (m *Span_Link) GetTracestate() *Span_Tracestate {
- if m != nil {
- return m.Tracestate
- }
- return nil
-}
-
-// A collection of links, which are references from this span to a span
-// in the same or different trace.
-type Span_Links struct {
- // A collection of links.
- Link []*Span_Link `protobuf:"bytes,1,rep,name=link,proto3" json:"link,omitempty"`
- // The number of dropped links after the maximum size was enforced. If
- // this value is 0, then no links were dropped.
- DroppedLinksCount int32 `protobuf:"varint,2,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Span_Links) Reset() { *m = Span_Links{} }
-func (m *Span_Links) String() string { return proto.CompactTextString(m) }
-func (*Span_Links) ProtoMessage() {}
-func (*Span_Links) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{0, 5}
-}
-
-func (m *Span_Links) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Span_Links.Unmarshal(m, b)
-}
-func (m *Span_Links) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Span_Links.Marshal(b, m, deterministic)
-}
-func (m *Span_Links) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Span_Links.Merge(m, src)
-}
-func (m *Span_Links) XXX_Size() int {
- return xxx_messageInfo_Span_Links.Size(m)
-}
-func (m *Span_Links) XXX_DiscardUnknown() {
- xxx_messageInfo_Span_Links.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Span_Links proto.InternalMessageInfo
-
-func (m *Span_Links) GetLink() []*Span_Link {
- if m != nil {
- return m.Link
- }
- return nil
-}
-
-func (m *Span_Links) GetDroppedLinksCount() int32 {
- if m != nil {
- return m.DroppedLinksCount
- }
- return 0
-}
-
-// The `Status` type defines a logical error model that is suitable for different
-// programming environments, including REST APIs and RPC APIs. This proto's fields
-// are a subset of those of
-// [google.rpc.Status](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto),
-// which is used by [gRPC](https://github.com/grpc).
-type Status struct {
- // The status code. This is optional field. It is safe to assume 0 (OK)
- // when not set.
- Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
- // A developer-facing error message, which should be in English.
- Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Status) Reset() { *m = Status{} }
-func (m *Status) String() string { return proto.CompactTextString(m) }
-func (*Status) ProtoMessage() {}
-func (*Status) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{1}
-}
-
-func (m *Status) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Status.Unmarshal(m, b)
-}
-func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Status.Marshal(b, m, deterministic)
-}
-func (m *Status) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Status.Merge(m, src)
-}
-func (m *Status) XXX_Size() int {
- return xxx_messageInfo_Status.Size(m)
-}
-func (m *Status) XXX_DiscardUnknown() {
- xxx_messageInfo_Status.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Status proto.InternalMessageInfo
-
-func (m *Status) GetCode() int32 {
- if m != nil {
- return m.Code
- }
- return 0
-}
-
-func (m *Status) GetMessage() string {
- if m != nil {
- return m.Message
- }
- return ""
-}
-
-// The value of an Attribute.
-type AttributeValue struct {
- // The type of the value.
- //
- // Types that are valid to be assigned to Value:
- // *AttributeValue_StringValue
- // *AttributeValue_IntValue
- // *AttributeValue_BoolValue
- // *AttributeValue_DoubleValue
- Value isAttributeValue_Value `protobuf_oneof:"value"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AttributeValue) Reset() { *m = AttributeValue{} }
-func (m *AttributeValue) String() string { return proto.CompactTextString(m) }
-func (*AttributeValue) ProtoMessage() {}
-func (*AttributeValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{2}
-}
-
-func (m *AttributeValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_AttributeValue.Unmarshal(m, b)
-}
-func (m *AttributeValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_AttributeValue.Marshal(b, m, deterministic)
-}
-func (m *AttributeValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AttributeValue.Merge(m, src)
-}
-func (m *AttributeValue) XXX_Size() int {
- return xxx_messageInfo_AttributeValue.Size(m)
-}
-func (m *AttributeValue) XXX_DiscardUnknown() {
- xxx_messageInfo_AttributeValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AttributeValue proto.InternalMessageInfo
-
-type isAttributeValue_Value interface {
- isAttributeValue_Value()
-}
-
-type AttributeValue_StringValue struct {
- StringValue *TruncatableString `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"`
-}
-
-type AttributeValue_IntValue struct {
- IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof"`
-}
-
-type AttributeValue_BoolValue struct {
- BoolValue bool `protobuf:"varint,3,opt,name=bool_value,json=boolValue,proto3,oneof"`
-}
-
-type AttributeValue_DoubleValue struct {
- DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof"`
-}
-
-func (*AttributeValue_StringValue) isAttributeValue_Value() {}
-
-func (*AttributeValue_IntValue) isAttributeValue_Value() {}
-
-func (*AttributeValue_BoolValue) isAttributeValue_Value() {}
-
-func (*AttributeValue_DoubleValue) isAttributeValue_Value() {}
-
-func (m *AttributeValue) GetValue() isAttributeValue_Value {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *AttributeValue) GetStringValue() *TruncatableString {
- if x, ok := m.GetValue().(*AttributeValue_StringValue); ok {
- return x.StringValue
- }
- return nil
-}
-
-func (m *AttributeValue) GetIntValue() int64 {
- if x, ok := m.GetValue().(*AttributeValue_IntValue); ok {
- return x.IntValue
- }
- return 0
-}
-
-func (m *AttributeValue) GetBoolValue() bool {
- if x, ok := m.GetValue().(*AttributeValue_BoolValue); ok {
- return x.BoolValue
- }
- return false
-}
-
-func (m *AttributeValue) GetDoubleValue() float64 {
- if x, ok := m.GetValue().(*AttributeValue_DoubleValue); ok {
- return x.DoubleValue
- }
- return 0
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*AttributeValue) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*AttributeValue_StringValue)(nil),
- (*AttributeValue_IntValue)(nil),
- (*AttributeValue_BoolValue)(nil),
- (*AttributeValue_DoubleValue)(nil),
- }
-}
-
-// The call stack which originated this span.
-type StackTrace struct {
- // Stack frames in this stack trace.
- StackFrames *StackTrace_StackFrames `protobuf:"bytes,1,opt,name=stack_frames,json=stackFrames,proto3" json:"stack_frames,omitempty"`
- // The hash ID is used to conserve network bandwidth for duplicate
- // stack traces within a single trace.
- //
- // Often multiple spans will have identical stack traces.
- // The first occurrence of a stack trace should contain both
- // `stack_frames` and a value in `stack_trace_hash_id`.
- //
- // Subsequent spans within the same request can refer
- // to that stack trace by setting only `stack_trace_hash_id`.
- //
- // TODO: describe how to deal with the case where stack_trace_hash_id is
- // zero because it was not set.
- StackTraceHashId uint64 `protobuf:"varint,2,opt,name=stack_trace_hash_id,json=stackTraceHashId,proto3" json:"stack_trace_hash_id,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StackTrace) Reset() { *m = StackTrace{} }
-func (m *StackTrace) String() string { return proto.CompactTextString(m) }
-func (*StackTrace) ProtoMessage() {}
-func (*StackTrace) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{3}
-}
-
-func (m *StackTrace) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StackTrace.Unmarshal(m, b)
-}
-func (m *StackTrace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StackTrace.Marshal(b, m, deterministic)
-}
-func (m *StackTrace) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StackTrace.Merge(m, src)
-}
-func (m *StackTrace) XXX_Size() int {
- return xxx_messageInfo_StackTrace.Size(m)
-}
-func (m *StackTrace) XXX_DiscardUnknown() {
- xxx_messageInfo_StackTrace.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StackTrace proto.InternalMessageInfo
-
-func (m *StackTrace) GetStackFrames() *StackTrace_StackFrames {
- if m != nil {
- return m.StackFrames
- }
- return nil
-}
-
-func (m *StackTrace) GetStackTraceHashId() uint64 {
- if m != nil {
- return m.StackTraceHashId
- }
- return 0
-}
-
-// A single stack frame in a stack trace.
-type StackTrace_StackFrame struct {
- // The fully-qualified name that uniquely identifies the function or
- // method that is active in this frame.
- FunctionName *TruncatableString `protobuf:"bytes,1,opt,name=function_name,json=functionName,proto3" json:"function_name,omitempty"`
- // An un-mangled function name, if `function_name` is
- // [mangled](http://www.avabodh.com/cxxin/namemangling.html). The name can
- // be fully qualified.
- OriginalFunctionName *TruncatableString `protobuf:"bytes,2,opt,name=original_function_name,json=originalFunctionName,proto3" json:"original_function_name,omitempty"`
- // The name of the source file where the function call appears.
- FileName *TruncatableString `protobuf:"bytes,3,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"`
- // The line number in `file_name` where the function call appears.
- LineNumber int64 `protobuf:"varint,4,opt,name=line_number,json=lineNumber,proto3" json:"line_number,omitempty"`
- // The column number where the function call appears, if available.
- // This is important in JavaScript because of its anonymous functions.
- ColumnNumber int64 `protobuf:"varint,5,opt,name=column_number,json=columnNumber,proto3" json:"column_number,omitempty"`
- // The binary module from where the code was loaded.
- LoadModule *Module `protobuf:"bytes,6,opt,name=load_module,json=loadModule,proto3" json:"load_module,omitempty"`
- // The version of the deployed source code.
- SourceVersion *TruncatableString `protobuf:"bytes,7,opt,name=source_version,json=sourceVersion,proto3" json:"source_version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StackTrace_StackFrame) Reset() { *m = StackTrace_StackFrame{} }
-func (m *StackTrace_StackFrame) String() string { return proto.CompactTextString(m) }
-func (*StackTrace_StackFrame) ProtoMessage() {}
-func (*StackTrace_StackFrame) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{3, 0}
-}
-
-func (m *StackTrace_StackFrame) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StackTrace_StackFrame.Unmarshal(m, b)
-}
-func (m *StackTrace_StackFrame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StackTrace_StackFrame.Marshal(b, m, deterministic)
-}
-func (m *StackTrace_StackFrame) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StackTrace_StackFrame.Merge(m, src)
-}
-func (m *StackTrace_StackFrame) XXX_Size() int {
- return xxx_messageInfo_StackTrace_StackFrame.Size(m)
-}
-func (m *StackTrace_StackFrame) XXX_DiscardUnknown() {
- xxx_messageInfo_StackTrace_StackFrame.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StackTrace_StackFrame proto.InternalMessageInfo
-
-func (m *StackTrace_StackFrame) GetFunctionName() *TruncatableString {
- if m != nil {
- return m.FunctionName
- }
- return nil
-}
-
-func (m *StackTrace_StackFrame) GetOriginalFunctionName() *TruncatableString {
- if m != nil {
- return m.OriginalFunctionName
- }
- return nil
-}
-
-func (m *StackTrace_StackFrame) GetFileName() *TruncatableString {
- if m != nil {
- return m.FileName
- }
- return nil
-}
-
-func (m *StackTrace_StackFrame) GetLineNumber() int64 {
- if m != nil {
- return m.LineNumber
- }
- return 0
-}
-
-func (m *StackTrace_StackFrame) GetColumnNumber() int64 {
- if m != nil {
- return m.ColumnNumber
- }
- return 0
-}
-
-func (m *StackTrace_StackFrame) GetLoadModule() *Module {
- if m != nil {
- return m.LoadModule
- }
- return nil
-}
-
-func (m *StackTrace_StackFrame) GetSourceVersion() *TruncatableString {
- if m != nil {
- return m.SourceVersion
- }
- return nil
-}
-
-// A collection of stack frames, which can be truncated.
-type StackTrace_StackFrames struct {
- // Stack frames in this call stack.
- Frame []*StackTrace_StackFrame `protobuf:"bytes,1,rep,name=frame,proto3" json:"frame,omitempty"`
- // The number of stack frames that were dropped because there
- // were too many stack frames.
- // If this value is 0, then no stack frames were dropped.
- DroppedFramesCount int32 `protobuf:"varint,2,opt,name=dropped_frames_count,json=droppedFramesCount,proto3" json:"dropped_frames_count,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StackTrace_StackFrames) Reset() { *m = StackTrace_StackFrames{} }
-func (m *StackTrace_StackFrames) String() string { return proto.CompactTextString(m) }
-func (*StackTrace_StackFrames) ProtoMessage() {}
-func (*StackTrace_StackFrames) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{3, 1}
-}
-
-func (m *StackTrace_StackFrames) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StackTrace_StackFrames.Unmarshal(m, b)
-}
-func (m *StackTrace_StackFrames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StackTrace_StackFrames.Marshal(b, m, deterministic)
-}
-func (m *StackTrace_StackFrames) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StackTrace_StackFrames.Merge(m, src)
-}
-func (m *StackTrace_StackFrames) XXX_Size() int {
- return xxx_messageInfo_StackTrace_StackFrames.Size(m)
-}
-func (m *StackTrace_StackFrames) XXX_DiscardUnknown() {
- xxx_messageInfo_StackTrace_StackFrames.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StackTrace_StackFrames proto.InternalMessageInfo
-
-func (m *StackTrace_StackFrames) GetFrame() []*StackTrace_StackFrame {
- if m != nil {
- return m.Frame
- }
- return nil
-}
-
-func (m *StackTrace_StackFrames) GetDroppedFramesCount() int32 {
- if m != nil {
- return m.DroppedFramesCount
- }
- return 0
-}
-
-// A description of a binary module.
-type Module struct {
- // TODO: document the meaning of this field.
- // For example: main binary, kernel modules, and dynamic libraries
- // such as libc.so, sharedlib.so.
- Module *TruncatableString `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"`
- // A unique identifier for the module, usually a hash of its
- // contents.
- BuildId *TruncatableString `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Module) Reset() { *m = Module{} }
-func (m *Module) String() string { return proto.CompactTextString(m) }
-func (*Module) ProtoMessage() {}
-func (*Module) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{4}
-}
-
-func (m *Module) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Module.Unmarshal(m, b)
-}
-func (m *Module) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Module.Marshal(b, m, deterministic)
-}
-func (m *Module) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Module.Merge(m, src)
-}
-func (m *Module) XXX_Size() int {
- return xxx_messageInfo_Module.Size(m)
-}
-func (m *Module) XXX_DiscardUnknown() {
- xxx_messageInfo_Module.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Module proto.InternalMessageInfo
-
-func (m *Module) GetModule() *TruncatableString {
- if m != nil {
- return m.Module
- }
- return nil
-}
-
-func (m *Module) GetBuildId() *TruncatableString {
- if m != nil {
- return m.BuildId
- }
- return nil
-}
-
-// A string that might be shortened to a specified length.
-type TruncatableString struct {
- // The shortened string. For example, if the original string was 500 bytes long and
- // the limit of the string was 128 bytes, then this value contains the first 128
- // bytes of the 500-byte string. Note that truncation always happens on a
- // character boundary, to ensure that a truncated string is still valid UTF-8.
- // Because it may contain multi-byte characters, the size of the truncated string
- // may be less than the truncation limit.
- Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
- // The number of bytes removed from the original string. If this
- // value is 0, then the string was not shortened.
- TruncatedByteCount int32 `protobuf:"varint,2,opt,name=truncated_byte_count,json=truncatedByteCount,proto3" json:"truncated_byte_count,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *TruncatableString) Reset() { *m = TruncatableString{} }
-func (m *TruncatableString) String() string { return proto.CompactTextString(m) }
-func (*TruncatableString) ProtoMessage() {}
-func (*TruncatableString) Descriptor() ([]byte, []int) {
- return fileDescriptor_8ea38bbb821bf584, []int{5}
-}
-
-func (m *TruncatableString) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_TruncatableString.Unmarshal(m, b)
-}
-func (m *TruncatableString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_TruncatableString.Marshal(b, m, deterministic)
-}
-func (m *TruncatableString) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TruncatableString.Merge(m, src)
-}
-func (m *TruncatableString) XXX_Size() int {
- return xxx_messageInfo_TruncatableString.Size(m)
-}
-func (m *TruncatableString) XXX_DiscardUnknown() {
- xxx_messageInfo_TruncatableString.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TruncatableString proto.InternalMessageInfo
-
-func (m *TruncatableString) GetValue() string {
- if m != nil {
- return m.Value
- }
- return ""
-}
-
-func (m *TruncatableString) GetTruncatedByteCount() int32 {
- if m != nil {
- return m.TruncatedByteCount
- }
- return 0
-}
-
-func init() {
- proto.RegisterEnum("opencensus.proto.trace.v1.Span_SpanKind", Span_SpanKind_name, Span_SpanKind_value)
- proto.RegisterEnum("opencensus.proto.trace.v1.Span_TimeEvent_MessageEvent_Type", Span_TimeEvent_MessageEvent_Type_name, Span_TimeEvent_MessageEvent_Type_value)
- proto.RegisterEnum("opencensus.proto.trace.v1.Span_Link_Type", Span_Link_Type_name, Span_Link_Type_value)
- proto.RegisterType((*Span)(nil), "opencensus.proto.trace.v1.Span")
- proto.RegisterType((*Span_Tracestate)(nil), "opencensus.proto.trace.v1.Span.Tracestate")
- proto.RegisterType((*Span_Tracestate_Entry)(nil), "opencensus.proto.trace.v1.Span.Tracestate.Entry")
- proto.RegisterType((*Span_Attributes)(nil), "opencensus.proto.trace.v1.Span.Attributes")
- proto.RegisterMapType((map[string]*AttributeValue)(nil), "opencensus.proto.trace.v1.Span.Attributes.AttributeMapEntry")
- proto.RegisterType((*Span_TimeEvent)(nil), "opencensus.proto.trace.v1.Span.TimeEvent")
- proto.RegisterType((*Span_TimeEvent_Annotation)(nil), "opencensus.proto.trace.v1.Span.TimeEvent.Annotation")
- proto.RegisterType((*Span_TimeEvent_MessageEvent)(nil), "opencensus.proto.trace.v1.Span.TimeEvent.MessageEvent")
- proto.RegisterType((*Span_TimeEvents)(nil), "opencensus.proto.trace.v1.Span.TimeEvents")
- proto.RegisterType((*Span_Link)(nil), "opencensus.proto.trace.v1.Span.Link")
- proto.RegisterType((*Span_Links)(nil), "opencensus.proto.trace.v1.Span.Links")
- proto.RegisterType((*Status)(nil), "opencensus.proto.trace.v1.Status")
- proto.RegisterType((*AttributeValue)(nil), "opencensus.proto.trace.v1.AttributeValue")
- proto.RegisterType((*StackTrace)(nil), "opencensus.proto.trace.v1.StackTrace")
- proto.RegisterType((*StackTrace_StackFrame)(nil), "opencensus.proto.trace.v1.StackTrace.StackFrame")
- proto.RegisterType((*StackTrace_StackFrames)(nil), "opencensus.proto.trace.v1.StackTrace.StackFrames")
- proto.RegisterType((*Module)(nil), "opencensus.proto.trace.v1.Module")
- proto.RegisterType((*TruncatableString)(nil), "opencensus.proto.trace.v1.TruncatableString")
-}
-
-func init() {
- proto.RegisterFile("opencensus/proto/trace/v1/trace.proto", fileDescriptor_8ea38bbb821bf584)
-}
-
-var fileDescriptor_8ea38bbb821bf584 = []byte{
- // 1581 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0xdb, 0x6e, 0x1b, 0x41,
- 0x19, 0xce, 0xfa, 0xec, 0xdf, 0x8e, 0xeb, 0x4c, 0xd3, 0x74, 0x63, 0x0a, 0x0d, 0x6e, 0x0b, 0x29,
- 0x25, 0x9b, 0x26, 0x2d, 0x55, 0x8f, 0x2a, 0x71, 0xe2, 0x60, 0x37, 0xa9, 0xeb, 0x8e, 0xdd, 0x88,
- 0x83, 0xd0, 0x6a, 0xed, 0x9d, 0x38, 0x4b, 0xec, 0xd9, 0x65, 0x77, 0x36, 0x28, 0x7d, 0x01, 0x84,
- 0xe0, 0x86, 0x0b, 0xc4, 0x0b, 0x70, 0xc1, 0xeb, 0x20, 0xee, 0x79, 0x00, 0x24, 0x9e, 0x80, 0x1b,
- 0x34, 0x33, 0x7b, 0x72, 0xd2, 0x26, 0xc6, 0xbd, 0xb1, 0xe6, 0xf0, 0x7f, 0xdf, 0x3f, 0xff, 0xcc,
- 0x7f, 0x5a, 0xc3, 0x03, 0xdb, 0x21, 0x74, 0x48, 0xa8, 0xe7, 0x7b, 0x9b, 0x8e, 0x6b, 0x33, 0x7b,
- 0x93, 0xb9, 0xc6, 0x90, 0x6c, 0x9e, 0x6d, 0xc9, 0x81, 0x26, 0x16, 0xd1, 0x6a, 0x2c, 0x26, 0x57,
- 0x34, 0xb9, 0x7b, 0xb6, 0x55, 0x7b, 0x74, 0x89, 0xc1, 0x25, 0x9e, 0xed, 0xbb, 0x92, 0x24, 0x1c,
- 0x4b, 0x54, 0xed, 0xee, 0xc8, 0xb6, 0x47, 0x63, 0x22, 0x05, 0x07, 0xfe, 0xf1, 0x26, 0xb3, 0x26,
- 0xc4, 0x63, 0xc6, 0xc4, 0x09, 0x04, 0xbe, 0x77, 0x51, 0xe0, 0x77, 0xae, 0xe1, 0x38, 0xc4, 0x0d,
- 0xd4, 0xd6, 0xff, 0xbc, 0x02, 0x99, 0x9e, 0x63, 0x50, 0xb4, 0x0a, 0x05, 0x71, 0x04, 0xdd, 0x32,
- 0x55, 0x65, 0x4d, 0x59, 0x2f, 0xe3, 0xbc, 0x98, 0xb7, 0x4d, 0x74, 0x1b, 0xf2, 0x9e, 0x63, 0x50,
- 0xbe, 0x93, 0x12, 0x3b, 0x39, 0x3e, 0x6d, 0x9b, 0xe8, 0x1d, 0x80, 0x90, 0xf1, 0x98, 0xc1, 0x88,
- 0x7a, 0x63, 0x4d, 0x59, 0x2f, 0x6d, 0xff, 0x48, 0xfb, 0xaa, 0x69, 0x1a, 0x57, 0xa4, 0xf5, 0x23,
- 0x04, 0x4e, 0xa0, 0xd1, 0x7d, 0xa8, 0x38, 0x86, 0x4b, 0x28, 0xd3, 0x43, 0x5d, 0x69, 0xa1, 0xab,
- 0x2c, 0x57, 0x7b, 0x52, 0xe3, 0x4f, 0x21, 0x43, 0x8d, 0x09, 0x51, 0x33, 0x42, 0xd7, 0x8f, 0xaf,
- 0xd0, 0xd5, 0x77, 0x7d, 0x3a, 0x34, 0x98, 0x31, 0x18, 0x93, 0x1e, 0x73, 0x2d, 0x3a, 0xc2, 0x02,
- 0x89, 0x5e, 0x43, 0xe6, 0xd4, 0xa2, 0xa6, 0x5a, 0x59, 0x53, 0xd6, 0x2b, 0xdb, 0xeb, 0xd7, 0x9d,
- 0x96, 0xff, 0x1c, 0x58, 0xd4, 0xc4, 0x02, 0x85, 0x5e, 0x00, 0x78, 0xcc, 0x70, 0x99, 0xce, 0xef,
- 0x59, 0xcd, 0x8a, 0x53, 0xd4, 0x34, 0x79, 0xc7, 0x5a, 0x78, 0xc7, 0x5a, 0x3f, 0x7c, 0x04, 0x5c,
- 0x14, 0xd2, 0x7c, 0x8e, 0x7e, 0x02, 0x05, 0x42, 0x4d, 0x09, 0xcc, 0x5d, 0x0b, 0xcc, 0x13, 0x6a,
- 0x0a, 0xd8, 0x3b, 0x00, 0x83, 0x31, 0xd7, 0x1a, 0xf8, 0x8c, 0x78, 0x6a, 0x7e, 0xb6, 0x3b, 0xde,
- 0x89, 0x10, 0x38, 0x81, 0x46, 0xfb, 0x50, 0xf2, 0x98, 0x31, 0x3c, 0xd5, 0x85, 0xb4, 0x5a, 0x10,
- 0x64, 0x0f, 0xae, 0x22, 0xe3, 0xd2, 0xe2, 0xc1, 0x30, 0x78, 0xd1, 0x18, 0x1d, 0x40, 0x89, 0x9b,
- 0xa1, 0x93, 0x33, 0x42, 0x99, 0xa7, 0x16, 0x67, 0x7c, 0x78, 0x6b, 0x42, 0x9a, 0x02, 0x81, 0x81,
- 0x45, 0x63, 0xf4, 0x0a, 0xb2, 0x63, 0x8b, 0x9e, 0x7a, 0x2a, 0x5c, 0x7f, 0x1c, 0x4e, 0x73, 0xc8,
- 0x85, 0xb1, 0xc4, 0xa0, 0x17, 0x90, 0xe3, 0xee, 0xe3, 0x7b, 0x6a, 0x49, 0xa0, 0xbf, 0x7f, 0xb5,
- 0x31, 0xcc, 0xf7, 0x70, 0x00, 0x40, 0x0d, 0x28, 0x84, 0xc1, 0xa4, 0x56, 0x05, 0xf8, 0x07, 0x97,
- 0xc1, 0x51, 0xb8, 0x9d, 0x6d, 0x69, 0x38, 0x18, 0xe3, 0x08, 0x87, 0x7e, 0x0e, 0xdf, 0xf1, 0x8c,
- 0x09, 0xd1, 0x1d, 0xd7, 0x1e, 0x12, 0xcf, 0xd3, 0x0d, 0x4f, 0x4f, 0x38, 0xb1, 0x5a, 0xfe, 0xca,
- 0x33, 0x37, 0x6c, 0x7b, 0x7c, 0x64, 0x8c, 0x7d, 0x82, 0x6f, 0x73, 0x78, 0x57, 0xa2, 0x77, 0xbc,
- 0x6e, 0xe4, 0xea, 0x68, 0x1f, 0xaa, 0xc3, 0x13, 0x6b, 0x6c, 0xca, 0x68, 0x18, 0xda, 0x3e, 0x65,
- 0xea, 0xa2, 0xa0, 0xbb, 0x73, 0x89, 0xee, 0x53, 0x9b, 0xb2, 0x27, 0xdb, 0x92, 0xb0, 0x22, 0x50,
- 0x9c, 0x62, 0x97, 0x63, 0x6a, 0x7f, 0x50, 0x00, 0xe2, 0x88, 0x43, 0xef, 0x20, 0x4f, 0x28, 0x73,
- 0x2d, 0xe2, 0xa9, 0xca, 0x5a, 0x7a, 0xbd, 0xb4, 0xfd, 0x78, 0xf6, 0x70, 0xd5, 0x9a, 0x94, 0xb9,
- 0xe7, 0x38, 0x24, 0xa8, 0x6d, 0x42, 0x56, 0xac, 0xa0, 0x2a, 0xa4, 0x4f, 0xc9, 0xb9, 0xc8, 0x1a,
- 0x45, 0xcc, 0x87, 0x68, 0x19, 0xb2, 0x67, 0xfc, 0x38, 0x22, 0x5f, 0x14, 0xb1, 0x9c, 0xd4, 0xfe,
- 0x92, 0x02, 0x88, 0x3d, 0x13, 0x19, 0xb0, 0x18, 0xf9, 0xa6, 0x3e, 0x31, 0x9c, 0xe0, 0x44, 0xaf,
- 0x67, 0x77, 0xee, 0x78, 0xf8, 0xde, 0x70, 0xe4, 0xe9, 0xca, 0x46, 0x62, 0x09, 0x3d, 0x07, 0xd5,
- 0x74, 0x6d, 0xc7, 0x21, 0xa6, 0x1e, 0x87, 0x41, 0x70, 0x9b, 0xfc, 0x68, 0x59, 0xbc, 0x12, 0xec,
- 0xc7, 0xa4, 0xf2, 0xde, 0x7e, 0x03, 0x4b, 0x97, 0xc8, 0xbf, 0x60, 0xe8, 0xdb, 0xa4, 0xa1, 0xa5,
- 0xed, 0x87, 0x57, 0x9c, 0x3d, 0xa2, 0x93, 0x0f, 0x25, 0x71, 0x2f, 0x53, 0xcf, 0x95, 0xda, 0xdf,
- 0xb2, 0x50, 0x8c, 0x82, 0x03, 0x69, 0x90, 0x11, 0x39, 0x42, 0xb9, 0x36, 0x47, 0x08, 0x39, 0x74,
- 0x04, 0x60, 0x50, 0x6a, 0x33, 0x83, 0x59, 0x36, 0x0d, 0xce, 0xf1, 0x74, 0xe6, 0x58, 0xd4, 0x76,
- 0x22, 0x6c, 0x6b, 0x01, 0x27, 0x98, 0xd0, 0xaf, 0x61, 0x71, 0x42, 0x3c, 0xcf, 0x18, 0x05, 0x71,
- 0x2e, 0xf2, 0x71, 0x69, 0xfb, 0xd9, 0xec, 0xd4, 0xef, 0x25, 0x5c, 0x4c, 0x5a, 0x0b, 0xb8, 0x3c,
- 0x49, 0xcc, 0x6b, 0x7f, 0x57, 0x00, 0x62, 0xdd, 0xa8, 0x03, 0x25, 0x93, 0x78, 0x43, 0xd7, 0x72,
- 0x84, 0x19, 0xca, 0x1c, 0xf9, 0x3d, 0x49, 0x70, 0x21, 0x6d, 0xa6, 0xbe, 0x25, 0x6d, 0xd6, 0xfe,
- 0xab, 0x40, 0x39, 0x69, 0x0b, 0xfa, 0x00, 0x19, 0x76, 0xee, 0xc8, 0x27, 0xaa, 0x6c, 0xbf, 0x9a,
- 0xef, 0x46, 0xb4, 0xfe, 0xb9, 0x43, 0xb0, 0x20, 0x42, 0x15, 0x48, 0x05, 0xc5, 0x35, 0x83, 0x53,
- 0x96, 0x89, 0x1e, 0xc1, 0x92, 0x4f, 0x87, 0xf6, 0xc4, 0x71, 0x89, 0xe7, 0x11, 0x53, 0xf7, 0xac,
- 0xcf, 0x44, 0xdc, 0x7f, 0x06, 0x57, 0x93, 0x1b, 0x3d, 0xeb, 0x33, 0x41, 0x3f, 0x84, 0x1b, 0x17,
- 0x45, 0x33, 0x42, 0xb4, 0x32, 0x2d, 0x58, 0x7f, 0x0a, 0x19, 0xae, 0x13, 0x2d, 0x43, 0xb5, 0xff,
- 0x8b, 0x6e, 0x53, 0xff, 0xd4, 0xe9, 0x75, 0x9b, 0xbb, 0xed, 0xfd, 0x76, 0x73, 0xaf, 0xba, 0x80,
- 0x0a, 0x90, 0xe9, 0x35, 0x3b, 0xfd, 0xaa, 0x82, 0xca, 0x50, 0xc0, 0xcd, 0xdd, 0x66, 0xfb, 0xa8,
- 0xb9, 0x57, 0x4d, 0x35, 0xf2, 0x81, 0x8b, 0xd7, 0xfe, 0xc9, 0x53, 0x49, 0x9c, 0xb7, 0x5b, 0x00,
- 0x71, 0x11, 0x08, 0x62, 0xf7, 0xe1, 0xcc, 0x57, 0x81, 0x8b, 0x51, 0x09, 0x40, 0x2f, 0x61, 0x35,
- 0x8a, 0xd2, 0xc8, 0x23, 0xa6, 0xc3, 0xf4, 0x76, 0x18, 0xa6, 0xf1, 0xbe, 0x88, 0x53, 0xf4, 0x16,
- 0xee, 0x84, 0xd8, 0x29, 0x6f, 0x0d, 0xe1, 0x69, 0x01, 0x0f, 0xf9, 0x93, 0xf7, 0x1f, 0x04, 0xfa,
- 0xbf, 0x52, 0x90, 0xe1, 0x25, 0x65, 0xae, 0x06, 0xe8, 0x4d, 0xe0, 0x08, 0x69, 0xe1, 0x08, 0x0f,
- 0x67, 0x29, 0x5d, 0xc9, 0x67, 0x9f, 0x76, 0xd2, 0xcc, 0x37, 0xd5, 0xf6, 0xe9, 0x5e, 0x2c, 0xfb,
- 0x2d, 0xbd, 0x58, 0xfd, 0xe0, 0x4a, 0x47, 0xb9, 0x05, 0x4b, 0xbb, 0xad, 0xf6, 0xe1, 0x9e, 0x7e,
- 0xd8, 0xee, 0x1c, 0x34, 0xf7, 0xf4, 0x5e, 0x77, 0xa7, 0x53, 0x55, 0xd0, 0x0a, 0xa0, 0xee, 0x0e,
- 0x6e, 0x76, 0xfa, 0x53, 0xeb, 0xa9, 0xda, 0x6f, 0x21, 0x2b, 0x4a, 0x36, 0x7a, 0x0e, 0x19, 0x5e,
- 0xb4, 0x03, 0x57, 0xb9, 0x3f, 0xcb, 0x65, 0x61, 0x81, 0x40, 0x1a, 0xdc, 0x0c, 0x1f, 0x59, 0x94,
- 0xfd, 0x29, 0xd7, 0x58, 0x0a, 0xb6, 0x84, 0x12, 0xf1, 0xa6, 0xf5, 0x37, 0x50, 0x08, 0xfb, 0x36,
- 0xb4, 0x0a, 0xb7, 0xf8, 0x41, 0xf4, 0x83, 0x76, 0x67, 0xef, 0x82, 0x21, 0x00, 0xb9, 0x5e, 0x13,
- 0x1f, 0x35, 0x71, 0x55, 0xe1, 0xe3, 0xdd, 0xc3, 0x36, 0xf7, 0xff, 0x54, 0xfd, 0x19, 0xe4, 0x64,
- 0xaf, 0x80, 0x10, 0x64, 0x86, 0xb6, 0x29, 0x03, 0x3d, 0x8b, 0xc5, 0x18, 0xa9, 0x90, 0x0f, 0x3c,
- 0x2d, 0xa8, 0x6e, 0xe1, 0xb4, 0xfe, 0x0f, 0x05, 0x2a, 0xd3, 0x59, 0x1e, 0x7d, 0x84, 0xb2, 0x27,
- 0xb2, 0x93, 0x2e, 0xcb, 0xc4, 0x1c, 0x79, 0xad, 0xb5, 0x80, 0x4b, 0x92, 0x43, 0x52, 0x7e, 0x17,
- 0x8a, 0x16, 0x65, 0x7a, 0x5c, 0x76, 0xd2, 0xad, 0x05, 0x5c, 0xb0, 0x28, 0x93, 0xdb, 0x77, 0x01,
- 0x06, 0xb6, 0x3d, 0x0e, 0xf6, 0xb9, 0x63, 0x16, 0x5a, 0x0b, 0xb8, 0x38, 0x08, 0x5b, 0x0e, 0x74,
- 0x0f, 0xca, 0xa6, 0xed, 0x0f, 0xc6, 0x24, 0x10, 0xe1, 0x6e, 0xa7, 0x70, 0x25, 0x72, 0x55, 0x08,
- 0x45, 0x41, 0x5f, 0xff, 0x63, 0x0e, 0x20, 0xee, 0x02, 0x51, 0x9f, 0xdb, 0xc3, 0x3b, 0xc8, 0x63,
- 0xd7, 0x98, 0x88, 0x26, 0x82, 0xdb, 0xb3, 0x35, 0x53, 0x0b, 0x29, 0x87, 0xfb, 0x02, 0x88, 0x65,
- 0x23, 0x2a, 0x27, 0x68, 0x03, 0x6e, 0x26, 0xfa, 0x52, 0xfd, 0xc4, 0xf0, 0x4e, 0xf4, 0x28, 0x1f,
- 0x56, 0xe3, 0xc6, 0xb3, 0x65, 0x78, 0x27, 0x6d, 0xb3, 0xf6, 0x9f, 0x74, 0x70, 0x26, 0x01, 0x47,
- 0x1f, 0x61, 0xf1, 0xd8, 0xa7, 0x43, 0x9e, 0x14, 0x74, 0xf1, 0x71, 0x30, 0x4f, 0xf1, 0x28, 0x87,
- 0x14, 0x1d, 0x4e, 0x39, 0x80, 0x15, 0xdb, 0xb5, 0x46, 0x16, 0x35, 0xc6, 0xfa, 0x34, 0x77, 0x6a,
- 0x0e, 0xee, 0xe5, 0x90, 0x6b, 0x3f, 0xa9, 0xa3, 0x0d, 0xc5, 0x63, 0x6b, 0x4c, 0x24, 0x6d, 0x7a,
- 0x0e, 0xda, 0x02, 0x87, 0x0b, 0xaa, 0xbb, 0x50, 0x1a, 0x5b, 0x94, 0xe8, 0xd4, 0x9f, 0x0c, 0x88,
- 0x2b, 0x5e, 0x34, 0x8d, 0x81, 0x2f, 0x75, 0xc4, 0x0a, 0xba, 0x07, 0x8b, 0x43, 0x7b, 0xec, 0x4f,
- 0x68, 0x28, 0x92, 0x15, 0x22, 0x65, 0xb9, 0x18, 0x08, 0x35, 0xa0, 0x34, 0xb6, 0x0d, 0x53, 0x9f,
- 0xd8, 0xa6, 0x3f, 0x0e, 0xbf, 0x51, 0xae, 0x6a, 0xa8, 0xdf, 0x0b, 0x41, 0x0c, 0x1c, 0x25, 0xc7,
- 0xa8, 0x07, 0x15, 0xd9, 0x1a, 0xeb, 0x67, 0xc4, 0xf5, 0x78, 0x25, 0xcf, 0xcf, 0x61, 0xd9, 0xa2,
- 0xe4, 0x38, 0x92, 0x14, 0xb5, 0xdf, 0x2b, 0x50, 0x4a, 0xf8, 0x0e, 0xda, 0x87, 0xac, 0x70, 0xbf,
- 0x59, 0x5a, 0xd8, 0x2f, 0x79, 0x1f, 0x96, 0x70, 0xf4, 0x18, 0x96, 0xc3, 0xb4, 0x22, 0xdd, 0x79,
- 0x2a, 0xaf, 0xa0, 0x60, 0x4f, 0x2a, 0x95, 0x89, 0xe5, 0xaf, 0x0a, 0xe4, 0x02, 0x4b, 0xf7, 0x20,
- 0x17, 0x5c, 0xd4, 0x3c, 0xee, 0x16, 0x60, 0xd1, 0xcf, 0xa0, 0x30, 0xf0, 0x79, 0x9b, 0x1f, 0xb8,
- 0xfb, 0xff, 0xcb, 0x93, 0x17, 0xe8, 0xb6, 0x59, 0xff, 0x15, 0x2c, 0x5d, 0xda, 0x8d, 0xdb, 0x70,
- 0x25, 0xd1, 0x86, 0x73, 0xb3, 0x99, 0x14, 0x25, 0xa6, 0x3e, 0x38, 0x67, 0x64, 0xda, 0xec, 0x68,
- 0xaf, 0x71, 0xce, 0x88, 0x30, 0xbb, 0xf1, 0x27, 0x05, 0xee, 0x58, 0xf6, 0xd7, 0x0f, 0xd6, 0x90,
- 0x9f, 0x18, 0x5d, 0xbe, 0xd8, 0x55, 0x7e, 0xd9, 0x18, 0x59, 0xec, 0xc4, 0x1f, 0x68, 0x43, 0x7b,
- 0xb2, 0x29, 0xe5, 0x37, 0x2c, 0xea, 0x31, 0xd7, 0x9f, 0x10, 0x2a, 0x8b, 0xf7, 0x66, 0x4c, 0xb5,
- 0x21, 0xff, 0xe3, 0x18, 0x11, 0xba, 0x31, 0x8a, 0xff, 0x2c, 0xf9, 0x77, 0x6a, 0xf5, 0x83, 0x43,
- 0xe8, 0xae, 0xd4, 0x26, 0x88, 0x65, 0xb1, 0xd2, 0x8e, 0xb6, 0x06, 0x39, 0x01, 0x79, 0xf2, 0xbf,
- 0x00, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x56, 0xb6, 0xfd, 0x6c, 0x11, 0x00, 0x00,
-}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go
deleted file mode 100644
index 025387784..000000000
--- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go
+++ /dev/null
@@ -1,359 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: opencensus/proto/trace/v1/trace_config.proto
-
-package v1
-
-import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-// How spans should be sampled:
-// - Always off
-// - Always on
-// - Always follow the parent Span's decision (off if no parent).
-type ConstantSampler_ConstantDecision int32
-
-const (
- ConstantSampler_ALWAYS_OFF ConstantSampler_ConstantDecision = 0
- ConstantSampler_ALWAYS_ON ConstantSampler_ConstantDecision = 1
- ConstantSampler_ALWAYS_PARENT ConstantSampler_ConstantDecision = 2
-)
-
-var ConstantSampler_ConstantDecision_name = map[int32]string{
- 0: "ALWAYS_OFF",
- 1: "ALWAYS_ON",
- 2: "ALWAYS_PARENT",
-}
-
-var ConstantSampler_ConstantDecision_value = map[string]int32{
- "ALWAYS_OFF": 0,
- "ALWAYS_ON": 1,
- "ALWAYS_PARENT": 2,
-}
-
-func (x ConstantSampler_ConstantDecision) String() string {
- return proto.EnumName(ConstantSampler_ConstantDecision_name, int32(x))
-}
-
-func (ConstantSampler_ConstantDecision) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_5359209b41ff50c5, []int{2, 0}
-}
-
-// Global configuration of the trace service. All fields must be specified, or
-// the default (zero) values will be used for each type.
-type TraceConfig struct {
- // The global default sampler used to make decisions on span sampling.
- //
- // Types that are valid to be assigned to Sampler:
- // *TraceConfig_ProbabilitySampler
- // *TraceConfig_ConstantSampler
- // *TraceConfig_RateLimitingSampler
- Sampler isTraceConfig_Sampler `protobuf_oneof:"sampler"`
- // The global default max number of attributes per span.
- MaxNumberOfAttributes int64 `protobuf:"varint,4,opt,name=max_number_of_attributes,json=maxNumberOfAttributes,proto3" json:"max_number_of_attributes,omitempty"`
- // The global default max number of annotation events per span.
- MaxNumberOfAnnotations int64 `protobuf:"varint,5,opt,name=max_number_of_annotations,json=maxNumberOfAnnotations,proto3" json:"max_number_of_annotations,omitempty"`
- // The global default max number of message events per span.
- MaxNumberOfMessageEvents int64 `protobuf:"varint,6,opt,name=max_number_of_message_events,json=maxNumberOfMessageEvents,proto3" json:"max_number_of_message_events,omitempty"`
- // The global default max number of link entries per span.
- MaxNumberOfLinks int64 `protobuf:"varint,7,opt,name=max_number_of_links,json=maxNumberOfLinks,proto3" json:"max_number_of_links,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *TraceConfig) Reset() { *m = TraceConfig{} }
-func (m *TraceConfig) String() string { return proto.CompactTextString(m) }
-func (*TraceConfig) ProtoMessage() {}
-func (*TraceConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_5359209b41ff50c5, []int{0}
-}
-
-func (m *TraceConfig) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_TraceConfig.Unmarshal(m, b)
-}
-func (m *TraceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_TraceConfig.Marshal(b, m, deterministic)
-}
-func (m *TraceConfig) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TraceConfig.Merge(m, src)
-}
-func (m *TraceConfig) XXX_Size() int {
- return xxx_messageInfo_TraceConfig.Size(m)
-}
-func (m *TraceConfig) XXX_DiscardUnknown() {
- xxx_messageInfo_TraceConfig.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TraceConfig proto.InternalMessageInfo
-
-type isTraceConfig_Sampler interface {
- isTraceConfig_Sampler()
-}
-
-type TraceConfig_ProbabilitySampler struct {
- ProbabilitySampler *ProbabilitySampler `protobuf:"bytes,1,opt,name=probability_sampler,json=probabilitySampler,proto3,oneof"`
-}
-
-type TraceConfig_ConstantSampler struct {
- ConstantSampler *ConstantSampler `protobuf:"bytes,2,opt,name=constant_sampler,json=constantSampler,proto3,oneof"`
-}
-
-type TraceConfig_RateLimitingSampler struct {
- RateLimitingSampler *RateLimitingSampler `protobuf:"bytes,3,opt,name=rate_limiting_sampler,json=rateLimitingSampler,proto3,oneof"`
-}
-
-func (*TraceConfig_ProbabilitySampler) isTraceConfig_Sampler() {}
-
-func (*TraceConfig_ConstantSampler) isTraceConfig_Sampler() {}
-
-func (*TraceConfig_RateLimitingSampler) isTraceConfig_Sampler() {}
-
-func (m *TraceConfig) GetSampler() isTraceConfig_Sampler {
- if m != nil {
- return m.Sampler
- }
- return nil
-}
-
-func (m *TraceConfig) GetProbabilitySampler() *ProbabilitySampler {
- if x, ok := m.GetSampler().(*TraceConfig_ProbabilitySampler); ok {
- return x.ProbabilitySampler
- }
- return nil
-}
-
-func (m *TraceConfig) GetConstantSampler() *ConstantSampler {
- if x, ok := m.GetSampler().(*TraceConfig_ConstantSampler); ok {
- return x.ConstantSampler
- }
- return nil
-}
-
-func (m *TraceConfig) GetRateLimitingSampler() *RateLimitingSampler {
- if x, ok := m.GetSampler().(*TraceConfig_RateLimitingSampler); ok {
- return x.RateLimitingSampler
- }
- return nil
-}
-
-func (m *TraceConfig) GetMaxNumberOfAttributes() int64 {
- if m != nil {
- return m.MaxNumberOfAttributes
- }
- return 0
-}
-
-func (m *TraceConfig) GetMaxNumberOfAnnotations() int64 {
- if m != nil {
- return m.MaxNumberOfAnnotations
- }
- return 0
-}
-
-func (m *TraceConfig) GetMaxNumberOfMessageEvents() int64 {
- if m != nil {
- return m.MaxNumberOfMessageEvents
- }
- return 0
-}
-
-func (m *TraceConfig) GetMaxNumberOfLinks() int64 {
- if m != nil {
- return m.MaxNumberOfLinks
- }
- return 0
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*TraceConfig) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*TraceConfig_ProbabilitySampler)(nil),
- (*TraceConfig_ConstantSampler)(nil),
- (*TraceConfig_RateLimitingSampler)(nil),
- }
-}
-
-// Sampler that tries to uniformly sample traces with a given probability.
-// The probability of sampling a trace is equal to that of the specified probability.
-type ProbabilitySampler struct {
- // The desired probability of sampling. Must be within [0.0, 1.0].
- SamplingProbability float64 `protobuf:"fixed64,1,opt,name=samplingProbability,proto3" json:"samplingProbability,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ProbabilitySampler) Reset() { *m = ProbabilitySampler{} }
-func (m *ProbabilitySampler) String() string { return proto.CompactTextString(m) }
-func (*ProbabilitySampler) ProtoMessage() {}
-func (*ProbabilitySampler) Descriptor() ([]byte, []int) {
- return fileDescriptor_5359209b41ff50c5, []int{1}
-}
-
-func (m *ProbabilitySampler) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ProbabilitySampler.Unmarshal(m, b)
-}
-func (m *ProbabilitySampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ProbabilitySampler.Marshal(b, m, deterministic)
-}
-func (m *ProbabilitySampler) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ProbabilitySampler.Merge(m, src)
-}
-func (m *ProbabilitySampler) XXX_Size() int {
- return xxx_messageInfo_ProbabilitySampler.Size(m)
-}
-func (m *ProbabilitySampler) XXX_DiscardUnknown() {
- xxx_messageInfo_ProbabilitySampler.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ProbabilitySampler proto.InternalMessageInfo
-
-func (m *ProbabilitySampler) GetSamplingProbability() float64 {
- if m != nil {
- return m.SamplingProbability
- }
- return 0
-}
-
-// Sampler that always makes a constant decision on span sampling.
-type ConstantSampler struct {
- Decision ConstantSampler_ConstantDecision `protobuf:"varint,1,opt,name=decision,proto3,enum=opencensus.proto.trace.v1.ConstantSampler_ConstantDecision" json:"decision,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ConstantSampler) Reset() { *m = ConstantSampler{} }
-func (m *ConstantSampler) String() string { return proto.CompactTextString(m) }
-func (*ConstantSampler) ProtoMessage() {}
-func (*ConstantSampler) Descriptor() ([]byte, []int) {
- return fileDescriptor_5359209b41ff50c5, []int{2}
-}
-
-func (m *ConstantSampler) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ConstantSampler.Unmarshal(m, b)
-}
-func (m *ConstantSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ConstantSampler.Marshal(b, m, deterministic)
-}
-func (m *ConstantSampler) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ConstantSampler.Merge(m, src)
-}
-func (m *ConstantSampler) XXX_Size() int {
- return xxx_messageInfo_ConstantSampler.Size(m)
-}
-func (m *ConstantSampler) XXX_DiscardUnknown() {
- xxx_messageInfo_ConstantSampler.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ConstantSampler proto.InternalMessageInfo
-
-func (m *ConstantSampler) GetDecision() ConstantSampler_ConstantDecision {
- if m != nil {
- return m.Decision
- }
- return ConstantSampler_ALWAYS_OFF
-}
-
-// Sampler that tries to sample with a rate per time window.
-type RateLimitingSampler struct {
- // Rate per second.
- Qps int64 `protobuf:"varint,1,opt,name=qps,proto3" json:"qps,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *RateLimitingSampler) Reset() { *m = RateLimitingSampler{} }
-func (m *RateLimitingSampler) String() string { return proto.CompactTextString(m) }
-func (*RateLimitingSampler) ProtoMessage() {}
-func (*RateLimitingSampler) Descriptor() ([]byte, []int) {
- return fileDescriptor_5359209b41ff50c5, []int{3}
-}
-
-func (m *RateLimitingSampler) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_RateLimitingSampler.Unmarshal(m, b)
-}
-func (m *RateLimitingSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_RateLimitingSampler.Marshal(b, m, deterministic)
-}
-func (m *RateLimitingSampler) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RateLimitingSampler.Merge(m, src)
-}
-func (m *RateLimitingSampler) XXX_Size() int {
- return xxx_messageInfo_RateLimitingSampler.Size(m)
-}
-func (m *RateLimitingSampler) XXX_DiscardUnknown() {
- xxx_messageInfo_RateLimitingSampler.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RateLimitingSampler proto.InternalMessageInfo
-
-func (m *RateLimitingSampler) GetQps() int64 {
- if m != nil {
- return m.Qps
- }
- return 0
-}
-
-func init() {
- proto.RegisterEnum("opencensus.proto.trace.v1.ConstantSampler_ConstantDecision", ConstantSampler_ConstantDecision_name, ConstantSampler_ConstantDecision_value)
- proto.RegisterType((*TraceConfig)(nil), "opencensus.proto.trace.v1.TraceConfig")
- proto.RegisterType((*ProbabilitySampler)(nil), "opencensus.proto.trace.v1.ProbabilitySampler")
- proto.RegisterType((*ConstantSampler)(nil), "opencensus.proto.trace.v1.ConstantSampler")
- proto.RegisterType((*RateLimitingSampler)(nil), "opencensus.proto.trace.v1.RateLimitingSampler")
-}
-
-func init() {
- proto.RegisterFile("opencensus/proto/trace/v1/trace_config.proto", fileDescriptor_5359209b41ff50c5)
-}
-
-var fileDescriptor_5359209b41ff50c5 = []byte{
- // 506 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xc1, 0x6e, 0xd3, 0x30,
- 0x18, 0xc7, 0x97, 0x76, 0x6c, 0xec, 0x9b, 0xb6, 0x05, 0x57, 0x43, 0xa9, 0xb4, 0xc3, 0x94, 0x0b,
- 0x13, 0x22, 0x09, 0x1d, 0x07, 0x84, 0x90, 0x90, 0xda, 0x6e, 0x15, 0x87, 0xd2, 0x56, 0xd9, 0x44,
- 0x05, 0x97, 0xe0, 0x64, 0x6e, 0xb0, 0x68, 0xec, 0x60, 0x3b, 0xd5, 0x78, 0x0d, 0xce, 0x3c, 0x04,
- 0xcf, 0xc5, 0x53, 0xa0, 0x3a, 0x21, 0x49, 0xdb, 0x6d, 0xe2, 0x96, 0xef, 0xfb, 0x7f, 0xbf, 0x9f,
- 0xad, 0xd8, 0x86, 0x17, 0x3c, 0x25, 0x2c, 0x22, 0x4c, 0x66, 0xd2, 0x4b, 0x05, 0x57, 0xdc, 0x53,
- 0x02, 0x47, 0xc4, 0x5b, 0x74, 0xf2, 0x8f, 0x20, 0xe2, 0x6c, 0x46, 0x63, 0x57, 0x67, 0xa8, 0x5d,
- 0x4d, 0xe7, 0x1d, 0x57, 0x0f, 0xb9, 0x8b, 0x8e, 0xfd, 0x6b, 0x1b, 0xf6, 0xaf, 0x97, 0x45, 0x5f,
- 0x03, 0xe8, 0x0b, 0xb4, 0x52, 0xc1, 0x43, 0x1c, 0xd2, 0x39, 0x55, 0x3f, 0x02, 0x89, 0x93, 0x74,
- 0x4e, 0x84, 0x65, 0x9c, 0x1a, 0x67, 0xfb, 0xe7, 0x8e, 0x7b, 0xaf, 0xc8, 0x9d, 0x54, 0xd4, 0x55,
- 0x0e, 0xbd, 0xdf, 0xf2, 0x51, 0xba, 0xd1, 0x45, 0x53, 0x30, 0x23, 0xce, 0xa4, 0xc2, 0x4c, 0x95,
- 0xfa, 0x86, 0xd6, 0x3f, 0x7f, 0x40, 0xdf, 0x2f, 0x90, 0xca, 0x7d, 0x14, 0xad, 0xb6, 0xd0, 0x0d,
- 0x1c, 0x0b, 0xac, 0x48, 0x30, 0xa7, 0x09, 0x55, 0x94, 0xc5, 0xa5, 0xbd, 0xa9, 0xed, 0xee, 0x03,
- 0x76, 0x1f, 0x2b, 0x32, 0x2c, 0xb0, 0x6a, 0x85, 0x96, 0xd8, 0x6c, 0xa3, 0xd7, 0x60, 0x25, 0xf8,
- 0x36, 0x60, 0x59, 0x12, 0x12, 0x11, 0xf0, 0x59, 0x80, 0x95, 0x12, 0x34, 0xcc, 0x14, 0x91, 0xd6,
- 0xf6, 0xa9, 0x71, 0xd6, 0xf4, 0x8f, 0x13, 0x7c, 0x3b, 0xd2, 0xf1, 0x78, 0xd6, 0x2d, 0x43, 0xf4,
- 0x06, 0xda, 0x6b, 0x20, 0x63, 0x5c, 0x61, 0x45, 0x39, 0x93, 0xd6, 0x23, 0x4d, 0x3e, 0xad, 0x93,
- 0x55, 0x8a, 0xde, 0xc1, 0xc9, 0x2a, 0x9a, 0x10, 0x29, 0x71, 0x4c, 0x02, 0xb2, 0x20, 0x4c, 0x49,
- 0x6b, 0x47, 0xd3, 0x56, 0x8d, 0xfe, 0x90, 0x0f, 0x5c, 0xea, 0x1c, 0x39, 0xd0, 0x5a, 0xe5, 0xe7,
- 0x94, 0x7d, 0x93, 0xd6, 0xae, 0xc6, 0xcc, 0x1a, 0x36, 0x5c, 0xf6, 0x7b, 0x7b, 0xb0, 0x5b, 0xfc,
- 0x3a, 0x7b, 0x00, 0x68, 0xf3, 0x60, 0xd1, 0x4b, 0x68, 0xe9, 0x01, 0xca, 0xe2, 0x5a, 0xaa, 0x2f,
- 0x89, 0xe1, 0xdf, 0x15, 0xd9, 0xbf, 0x0d, 0x38, 0x5a, 0x3b, 0x42, 0x34, 0x85, 0xc7, 0x37, 0x24,
- 0xa2, 0x92, 0x72, 0xa6, 0xd1, 0xc3, 0xf3, 0xb7, 0xff, 0x7f, 0x01, 0xca, 0xfa, 0xa2, 0x50, 0xf8,
- 0xa5, 0xcc, 0xbe, 0x00, 0x73, 0x3d, 0x45, 0x87, 0x00, 0xdd, 0xe1, 0xb4, 0xfb, 0xe9, 0x2a, 0x18,
- 0x0f, 0x06, 0xe6, 0x16, 0x3a, 0x80, 0xbd, 0x7f, 0xf5, 0xc8, 0x34, 0xd0, 0x13, 0x38, 0x28, 0xca,
- 0x49, 0xd7, 0xbf, 0x1c, 0x5d, 0x9b, 0x0d, 0xfb, 0x19, 0xb4, 0xee, 0xb8, 0x16, 0xc8, 0x84, 0xe6,
- 0xf7, 0x54, 0xea, 0x0d, 0x37, 0xfd, 0xe5, 0x67, 0xef, 0xa7, 0x01, 0x27, 0x94, 0xdf, 0xbf, 0xf5,
- 0x9e, 0x59, 0x7b, 0x60, 0x93, 0x65, 0x34, 0x31, 0x3e, 0xf7, 0x62, 0xaa, 0xbe, 0x66, 0xa1, 0x1b,
- 0xf1, 0xc4, 0xcb, 0x29, 0x87, 0x32, 0xa9, 0x44, 0x96, 0x10, 0x96, 0x1f, 0xbb, 0x57, 0x09, 0x9d,
- 0xfc, 0x89, 0xc7, 0x84, 0x39, 0x71, 0xf5, 0xd2, 0xff, 0x34, 0xda, 0xe3, 0x94, 0xb0, 0x7e, 0xbe,
- 0xa6, 0x16, 0xbb, 0x7a, 0x25, 0xf7, 0x63, 0x27, 0xdc, 0xd1, 0xc8, 0xab, 0xbf, 0x01, 0x00, 0x00,
- 0xff, 0xff, 0x50, 0x0c, 0xfe, 0x32, 0x29, 0x04, 0x00, 0x00,
-}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
deleted file mode 100644
index f0d66befb..000000000
--- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
+++ /dev/null
@@ -1,1290 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2015 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-/*
-Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON.
-It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json.
-
-This package produces a different output than the standard "encoding/json" package,
-which does not operate correctly on protocol buffers.
-*/
-package jsonpb
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "math"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "time"
-
- "github.com/golang/protobuf/proto"
-
- stpb "github.com/golang/protobuf/ptypes/struct"
-)
-
-const secondInNanos = int64(time.Second / time.Nanosecond)
-const maxSecondsInDuration = 315576000000
-
-// Marshaler is a configurable object for converting between
-// protocol buffer objects and a JSON representation for them.
-type Marshaler struct {
- // Whether to render enum values as integers, as opposed to string values.
- EnumsAsInts bool
-
- // Whether to render fields with zero values.
- EmitDefaults bool
-
- // A string to indent each level by. The presence of this field will
- // also cause a space to appear between the field separator and
- // value, and for newlines to be appear between fields and array
- // elements.
- Indent string
-
- // Whether to use the original (.proto) name for fields.
- OrigName bool
-
- // A custom URL resolver to use when marshaling Any messages to JSON.
- // If unset, the default resolution strategy is to extract the
- // fully-qualified type name from the type URL and pass that to
- // proto.MessageType(string).
- AnyResolver AnyResolver
-}
-
-// AnyResolver takes a type URL, present in an Any message, and resolves it into
-// an instance of the associated message.
-type AnyResolver interface {
- Resolve(typeUrl string) (proto.Message, error)
-}
-
-func defaultResolveAny(typeUrl string) (proto.Message, error) {
- // Only the part of typeUrl after the last slash is relevant.
- mname := typeUrl
- if slash := strings.LastIndex(mname, "/"); slash >= 0 {
- mname = mname[slash+1:]
- }
- mt := proto.MessageType(mname)
- if mt == nil {
- return nil, fmt.Errorf("unknown message type %q", mname)
- }
- return reflect.New(mt.Elem()).Interface().(proto.Message), nil
-}
-
-// JSONPBMarshaler is implemented by protobuf messages that customize the
-// way they are marshaled to JSON. Messages that implement this should
-// also implement JSONPBUnmarshaler so that the custom format can be
-// parsed.
-//
-// The JSON marshaling must follow the proto to JSON specification:
-// https://developers.google.com/protocol-buffers/docs/proto3#json
-type JSONPBMarshaler interface {
- MarshalJSONPB(*Marshaler) ([]byte, error)
-}
-
-// JSONPBUnmarshaler is implemented by protobuf messages that customize
-// the way they are unmarshaled from JSON. Messages that implement this
-// should also implement JSONPBMarshaler so that the custom format can be
-// produced.
-//
-// The JSON unmarshaling must follow the JSON to proto specification:
-// https://developers.google.com/protocol-buffers/docs/proto3#json
-type JSONPBUnmarshaler interface {
- UnmarshalJSONPB(*Unmarshaler, []byte) error
-}
-
-// Marshal marshals a protocol buffer into JSON.
-func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error {
- v := reflect.ValueOf(pb)
- if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
- return errors.New("Marshal called with nil")
- }
- // Check for unset required fields first.
- if err := checkRequiredFields(pb); err != nil {
- return err
- }
- writer := &errWriter{writer: out}
- return m.marshalObject(writer, pb, "", "")
-}
-
-// MarshalToString converts a protocol buffer object to JSON string.
-func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) {
- var buf bytes.Buffer
- if err := m.Marshal(&buf, pb); err != nil {
- return "", err
- }
- return buf.String(), nil
-}
-
-type int32Slice []int32
-
-var nonFinite = map[string]float64{
- `"NaN"`: math.NaN(),
- `"Infinity"`: math.Inf(1),
- `"-Infinity"`: math.Inf(-1),
-}
-
-// For sorting extensions ids to ensure stable output.
-func (s int32Slice) Len() int { return len(s) }
-func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
-func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-type wkt interface {
- XXX_WellKnownType() string
-}
-
-var (
- wktType = reflect.TypeOf((*wkt)(nil)).Elem()
- messageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
-)
-
-// marshalObject writes a struct to the Writer.
-func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error {
- if jsm, ok := v.(JSONPBMarshaler); ok {
- b, err := jsm.MarshalJSONPB(m)
- if err != nil {
- return err
- }
- if typeURL != "" {
- // we are marshaling this object to an Any type
- var js map[string]*json.RawMessage
- if err = json.Unmarshal(b, &js); err != nil {
- return fmt.Errorf("type %T produced invalid JSON: %v", v, err)
- }
- turl, err := json.Marshal(typeURL)
- if err != nil {
- return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err)
- }
- js["@type"] = (*json.RawMessage)(&turl)
- if m.Indent != "" {
- b, err = json.MarshalIndent(js, indent, m.Indent)
- } else {
- b, err = json.Marshal(js)
- }
- if err != nil {
- return err
- }
- }
-
- out.write(string(b))
- return out.err
- }
-
- s := reflect.ValueOf(v).Elem()
-
- // Handle well-known types.
- if wkt, ok := v.(wkt); ok {
- switch wkt.XXX_WellKnownType() {
- case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
- "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
- // "Wrappers use the same representation in JSON
- // as the wrapped primitive type, ..."
- sprop := proto.GetProperties(s.Type())
- return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent)
- case "Any":
- // Any is a bit more involved.
- return m.marshalAny(out, v, indent)
- case "Duration":
- s, ns := s.Field(0).Int(), s.Field(1).Int()
- if s < -maxSecondsInDuration || s > maxSecondsInDuration {
- return fmt.Errorf("seconds out of range %v", s)
- }
- if ns <= -secondInNanos || ns >= secondInNanos {
- return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos)
- }
- if (s > 0 && ns < 0) || (s < 0 && ns > 0) {
- return errors.New("signs of seconds and nanos do not match")
- }
- // Generated output always contains 0, 3, 6, or 9 fractional digits,
- // depending on required precision, followed by the suffix "s".
- f := "%d.%09d"
- if ns < 0 {
- ns = -ns
- if s == 0 {
- f = "-%d.%09d"
- }
- }
- x := fmt.Sprintf(f, s, ns)
- x = strings.TrimSuffix(x, "000")
- x = strings.TrimSuffix(x, "000")
- x = strings.TrimSuffix(x, ".000")
- out.write(`"`)
- out.write(x)
- out.write(`s"`)
- return out.err
- case "Struct", "ListValue":
- // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice.
- // TODO: pass the correct Properties if needed.
- return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent)
- case "Timestamp":
- // "RFC 3339, where generated output will always be Z-normalized
- // and uses 0, 3, 6 or 9 fractional digits."
- s, ns := s.Field(0).Int(), s.Field(1).Int()
- if ns < 0 || ns >= secondInNanos {
- return fmt.Errorf("ns out of range [0, %v)", secondInNanos)
- }
- t := time.Unix(s, ns).UTC()
- // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
- x := t.Format("2006-01-02T15:04:05.000000000")
- x = strings.TrimSuffix(x, "000")
- x = strings.TrimSuffix(x, "000")
- x = strings.TrimSuffix(x, ".000")
- out.write(`"`)
- out.write(x)
- out.write(`Z"`)
- return out.err
- case "Value":
- // Value has a single oneof.
- kind := s.Field(0)
- if kind.IsNil() {
- // "absence of any variant indicates an error"
- return errors.New("nil Value")
- }
- // oneof -> *T -> T -> T.F
- x := kind.Elem().Elem().Field(0)
- // TODO: pass the correct Properties if needed.
- return m.marshalValue(out, &proto.Properties{}, x, indent)
- }
- }
-
- out.write("{")
- if m.Indent != "" {
- out.write("\n")
- }
-
- firstField := true
-
- if typeURL != "" {
- if err := m.marshalTypeURL(out, indent, typeURL); err != nil {
- return err
- }
- firstField = false
- }
-
- for i := 0; i < s.NumField(); i++ {
- value := s.Field(i)
- valueField := s.Type().Field(i)
- if strings.HasPrefix(valueField.Name, "XXX_") {
- continue
- }
-
- // IsNil will panic on most value kinds.
- switch value.Kind() {
- case reflect.Chan, reflect.Func, reflect.Interface:
- if value.IsNil() {
- continue
- }
- }
-
- if !m.EmitDefaults {
- switch value.Kind() {
- case reflect.Bool:
- if !value.Bool() {
- continue
- }
- case reflect.Int32, reflect.Int64:
- if value.Int() == 0 {
- continue
- }
- case reflect.Uint32, reflect.Uint64:
- if value.Uint() == 0 {
- continue
- }
- case reflect.Float32, reflect.Float64:
- if value.Float() == 0 {
- continue
- }
- case reflect.String:
- if value.Len() == 0 {
- continue
- }
- case reflect.Map, reflect.Ptr, reflect.Slice:
- if value.IsNil() {
- continue
- }
- }
- }
-
- // Oneof fields need special handling.
- if valueField.Tag.Get("protobuf_oneof") != "" {
- // value is an interface containing &T{real_value}.
- sv := value.Elem().Elem() // interface -> *T -> T
- value = sv.Field(0)
- valueField = sv.Type().Field(0)
- }
- prop := jsonProperties(valueField, m.OrigName)
- if !firstField {
- m.writeSep(out)
- }
- if err := m.marshalField(out, prop, value, indent); err != nil {
- return err
- }
- firstField = false
- }
-
- // Handle proto2 extensions.
- if ep, ok := v.(proto.Message); ok {
- extensions := proto.RegisteredExtensions(v)
- // Sort extensions for stable output.
- ids := make([]int32, 0, len(extensions))
- for id, desc := range extensions {
- if !proto.HasExtension(ep, desc) {
- continue
- }
- ids = append(ids, id)
- }
- sort.Sort(int32Slice(ids))
- for _, id := range ids {
- desc := extensions[id]
- if desc == nil {
- // unknown extension
- continue
- }
- ext, extErr := proto.GetExtension(ep, desc)
- if extErr != nil {
- return extErr
- }
- value := reflect.ValueOf(ext)
- var prop proto.Properties
- prop.Parse(desc.Tag)
- prop.JSONName = fmt.Sprintf("[%s]", desc.Name)
- if !firstField {
- m.writeSep(out)
- }
- if err := m.marshalField(out, &prop, value, indent); err != nil {
- return err
- }
- firstField = false
- }
-
- }
-
- if m.Indent != "" {
- out.write("\n")
- out.write(indent)
- }
- out.write("}")
- return out.err
-}
-
-func (m *Marshaler) writeSep(out *errWriter) {
- if m.Indent != "" {
- out.write(",\n")
- } else {
- out.write(",")
- }
-}
-
-func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error {
- // "If the Any contains a value that has a special JSON mapping,
- // it will be converted as follows: {"@type": xxx, "value": yyy}.
- // Otherwise, the value will be converted into a JSON object,
- // and the "@type" field will be inserted to indicate the actual data type."
- v := reflect.ValueOf(any).Elem()
- turl := v.Field(0).String()
- val := v.Field(1).Bytes()
-
- var msg proto.Message
- var err error
- if m.AnyResolver != nil {
- msg, err = m.AnyResolver.Resolve(turl)
- } else {
- msg, err = defaultResolveAny(turl)
- }
- if err != nil {
- return err
- }
-
- if err := proto.Unmarshal(val, msg); err != nil {
- return err
- }
-
- if _, ok := msg.(wkt); ok {
- out.write("{")
- if m.Indent != "" {
- out.write("\n")
- }
- if err := m.marshalTypeURL(out, indent, turl); err != nil {
- return err
- }
- m.writeSep(out)
- if m.Indent != "" {
- out.write(indent)
- out.write(m.Indent)
- out.write(`"value": `)
- } else {
- out.write(`"value":`)
- }
- if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil {
- return err
- }
- if m.Indent != "" {
- out.write("\n")
- out.write(indent)
- }
- out.write("}")
- return out.err
- }
-
- return m.marshalObject(out, msg, indent, turl)
-}
-
-func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error {
- if m.Indent != "" {
- out.write(indent)
- out.write(m.Indent)
- }
- out.write(`"@type":`)
- if m.Indent != "" {
- out.write(" ")
- }
- b, err := json.Marshal(typeURL)
- if err != nil {
- return err
- }
- out.write(string(b))
- return out.err
-}
-
-// marshalField writes field description and value to the Writer.
-func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
- if m.Indent != "" {
- out.write(indent)
- out.write(m.Indent)
- }
- out.write(`"`)
- out.write(prop.JSONName)
- out.write(`":`)
- if m.Indent != "" {
- out.write(" ")
- }
- if err := m.marshalValue(out, prop, v, indent); err != nil {
- return err
- }
- return nil
-}
-
-// marshalValue writes the value to the Writer.
-func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
- var err error
- v = reflect.Indirect(v)
-
- // Handle nil pointer
- if v.Kind() == reflect.Invalid {
- out.write("null")
- return out.err
- }
-
- // Handle repeated elements.
- if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
- out.write("[")
- comma := ""
- for i := 0; i < v.Len(); i++ {
- sliceVal := v.Index(i)
- out.write(comma)
- if m.Indent != "" {
- out.write("\n")
- out.write(indent)
- out.write(m.Indent)
- out.write(m.Indent)
- }
- if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil {
- return err
- }
- comma = ","
- }
- if m.Indent != "" {
- out.write("\n")
- out.write(indent)
- out.write(m.Indent)
- }
- out.write("]")
- return out.err
- }
-
- // Handle well-known types.
- // Most are handled up in marshalObject (because 99% are messages).
- if v.Type().Implements(wktType) {
- wkt := v.Interface().(wkt)
- switch wkt.XXX_WellKnownType() {
- case "NullValue":
- out.write("null")
- return out.err
- }
- }
-
- // Handle enumerations.
- if !m.EnumsAsInts && prop.Enum != "" {
- // Unknown enum values will are stringified by the proto library as their
- // value. Such values should _not_ be quoted or they will be interpreted
- // as an enum string instead of their value.
- enumStr := v.Interface().(fmt.Stringer).String()
- var valStr string
- if v.Kind() == reflect.Ptr {
- valStr = strconv.Itoa(int(v.Elem().Int()))
- } else {
- valStr = strconv.Itoa(int(v.Int()))
- }
- isKnownEnum := enumStr != valStr
- if isKnownEnum {
- out.write(`"`)
- }
- out.write(enumStr)
- if isKnownEnum {
- out.write(`"`)
- }
- return out.err
- }
-
- // Handle nested messages.
- if v.Kind() == reflect.Struct {
- return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "")
- }
-
- // Handle maps.
- // Since Go randomizes map iteration, we sort keys for stable output.
- if v.Kind() == reflect.Map {
- out.write(`{`)
- keys := v.MapKeys()
- sort.Sort(mapKeys(keys))
- for i, k := range keys {
- if i > 0 {
- out.write(`,`)
- }
- if m.Indent != "" {
- out.write("\n")
- out.write(indent)
- out.write(m.Indent)
- out.write(m.Indent)
- }
-
- // TODO handle map key prop properly
- b, err := json.Marshal(k.Interface())
- if err != nil {
- return err
- }
- s := string(b)
-
- // If the JSON is not a string value, encode it again to make it one.
- if !strings.HasPrefix(s, `"`) {
- b, err := json.Marshal(s)
- if err != nil {
- return err
- }
- s = string(b)
- }
-
- out.write(s)
- out.write(`:`)
- if m.Indent != "" {
- out.write(` `)
- }
-
- vprop := prop
- if prop != nil && prop.MapValProp != nil {
- vprop = prop.MapValProp
- }
- if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil {
- return err
- }
- }
- if m.Indent != "" {
- out.write("\n")
- out.write(indent)
- out.write(m.Indent)
- }
- out.write(`}`)
- return out.err
- }
-
- // Handle non-finite floats, e.g. NaN, Infinity and -Infinity.
- if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
- f := v.Float()
- var sval string
- switch {
- case math.IsInf(f, 1):
- sval = `"Infinity"`
- case math.IsInf(f, -1):
- sval = `"-Infinity"`
- case math.IsNaN(f):
- sval = `"NaN"`
- }
- if sval != "" {
- out.write(sval)
- return out.err
- }
- }
-
- // Default handling defers to the encoding/json library.
- b, err := json.Marshal(v.Interface())
- if err != nil {
- return err
- }
- needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64)
- if needToQuote {
- out.write(`"`)
- }
- out.write(string(b))
- if needToQuote {
- out.write(`"`)
- }
- return out.err
-}
-
-// Unmarshaler is a configurable object for converting from a JSON
-// representation to a protocol buffer object.
-type Unmarshaler struct {
- // Whether to allow messages to contain unknown fields, as opposed to
- // failing to unmarshal.
- AllowUnknownFields bool
-
- // A custom URL resolver to use when unmarshaling Any messages from JSON.
- // If unset, the default resolution strategy is to extract the
- // fully-qualified type name from the type URL and pass that to
- // proto.MessageType(string).
- AnyResolver AnyResolver
-}
-
-// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
-// This function is lenient and will decode any options permutations of the
-// related Marshaler.
-func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
- inputValue := json.RawMessage{}
- if err := dec.Decode(&inputValue); err != nil {
- return err
- }
- if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil {
- return err
- }
- return checkRequiredFields(pb)
-}
-
-// Unmarshal unmarshals a JSON object stream into a protocol
-// buffer. This function is lenient and will decode any options
-// permutations of the related Marshaler.
-func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error {
- dec := json.NewDecoder(r)
- return u.UnmarshalNext(dec, pb)
-}
-
-// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
-// This function is lenient and will decode any options permutations of the
-// related Marshaler.
-func UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
- return new(Unmarshaler).UnmarshalNext(dec, pb)
-}
-
-// Unmarshal unmarshals a JSON object stream into a protocol
-// buffer. This function is lenient and will decode any options
-// permutations of the related Marshaler.
-func Unmarshal(r io.Reader, pb proto.Message) error {
- return new(Unmarshaler).Unmarshal(r, pb)
-}
-
-// UnmarshalString will populate the fields of a protocol buffer based
-// on a JSON string. This function is lenient and will decode any options
-// permutations of the related Marshaler.
-func UnmarshalString(str string, pb proto.Message) error {
- return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb)
-}
-
-// unmarshalValue converts/copies a value into the target.
-// prop may be nil.
-func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error {
- targetType := target.Type()
-
- // Allocate memory for pointer fields.
- if targetType.Kind() == reflect.Ptr {
- // If input value is "null" and target is a pointer type, then the field should be treated as not set
- // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue.
- _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler)
- if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler {
- return nil
- }
- target.Set(reflect.New(targetType.Elem()))
-
- return u.unmarshalValue(target.Elem(), inputValue, prop)
- }
-
- if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok {
- return jsu.UnmarshalJSONPB(u, []byte(inputValue))
- }
-
- // Handle well-known types that are not pointers.
- if w, ok := target.Addr().Interface().(wkt); ok {
- switch w.XXX_WellKnownType() {
- case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
- "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
- return u.unmarshalValue(target.Field(0), inputValue, prop)
- case "Any":
- // Use json.RawMessage pointer type instead of value to support pre-1.8 version.
- // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see
- // https://github.com/golang/go/issues/14493
- var jsonFields map[string]*json.RawMessage
- if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
- return err
- }
-
- val, ok := jsonFields["@type"]
- if !ok || val == nil {
- return errors.New("Any JSON doesn't have '@type'")
- }
-
- var turl string
- if err := json.Unmarshal([]byte(*val), &turl); err != nil {
- return fmt.Errorf("can't unmarshal Any's '@type': %q", *val)
- }
- target.Field(0).SetString(turl)
-
- var m proto.Message
- var err error
- if u.AnyResolver != nil {
- m, err = u.AnyResolver.Resolve(turl)
- } else {
- m, err = defaultResolveAny(turl)
- }
- if err != nil {
- return err
- }
-
- if _, ok := m.(wkt); ok {
- val, ok := jsonFields["value"]
- if !ok {
- return errors.New("Any JSON doesn't have 'value'")
- }
-
- if err := u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil {
- return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err)
- }
- } else {
- delete(jsonFields, "@type")
- nestedProto, err := json.Marshal(jsonFields)
- if err != nil {
- return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err)
- }
-
- if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil {
- return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err)
- }
- }
-
- b, err := proto.Marshal(m)
- if err != nil {
- return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err)
- }
- target.Field(1).SetBytes(b)
-
- return nil
- case "Duration":
- unq, err := unquote(string(inputValue))
- if err != nil {
- return err
- }
-
- d, err := time.ParseDuration(unq)
- if err != nil {
- return fmt.Errorf("bad Duration: %v", err)
- }
-
- ns := d.Nanoseconds()
- s := ns / 1e9
- ns %= 1e9
- target.Field(0).SetInt(s)
- target.Field(1).SetInt(ns)
- return nil
- case "Timestamp":
- unq, err := unquote(string(inputValue))
- if err != nil {
- return err
- }
-
- t, err := time.Parse(time.RFC3339Nano, unq)
- if err != nil {
- return fmt.Errorf("bad Timestamp: %v", err)
- }
-
- target.Field(0).SetInt(t.Unix())
- target.Field(1).SetInt(int64(t.Nanosecond()))
- return nil
- case "Struct":
- var m map[string]json.RawMessage
- if err := json.Unmarshal(inputValue, &m); err != nil {
- return fmt.Errorf("bad StructValue: %v", err)
- }
-
- target.Field(0).Set(reflect.ValueOf(map[string]*stpb.Value{}))
- for k, jv := range m {
- pv := &stpb.Value{}
- if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil {
- return fmt.Errorf("bad value in StructValue for key %q: %v", k, err)
- }
- target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv))
- }
- return nil
- case "ListValue":
- var s []json.RawMessage
- if err := json.Unmarshal(inputValue, &s); err != nil {
- return fmt.Errorf("bad ListValue: %v", err)
- }
-
- target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s))))
- for i, sv := range s {
- if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil {
- return err
- }
- }
- return nil
- case "Value":
- ivStr := string(inputValue)
- if ivStr == "null" {
- target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{}))
- } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil {
- target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v}))
- } else if v, err := unquote(ivStr); err == nil {
- target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v}))
- } else if v, err := strconv.ParseBool(ivStr); err == nil {
- target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v}))
- } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil {
- lv := &stpb.ListValue{}
- target.Field(0).Set(reflect.ValueOf(&stpb.Value_ListValue{lv}))
- return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop)
- } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil {
- sv := &stpb.Struct{}
- target.Field(0).Set(reflect.ValueOf(&stpb.Value_StructValue{sv}))
- return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop)
- } else {
- return fmt.Errorf("unrecognized type for Value %q", ivStr)
- }
- return nil
- }
- }
-
- // Handle enums, which have an underlying type of int32,
- // and may appear as strings.
- // The case of an enum appearing as a number is handled
- // at the bottom of this function.
- if inputValue[0] == '"' && prop != nil && prop.Enum != "" {
- vmap := proto.EnumValueMap(prop.Enum)
- // Don't need to do unquoting; valid enum names
- // are from a limited character set.
- s := inputValue[1 : len(inputValue)-1]
- n, ok := vmap[string(s)]
- if !ok {
- return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum)
- }
- if target.Kind() == reflect.Ptr { // proto2
- target.Set(reflect.New(targetType.Elem()))
- target = target.Elem()
- }
- if targetType.Kind() != reflect.Int32 {
- return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum)
- }
- target.SetInt(int64(n))
- return nil
- }
-
- // Handle nested messages.
- if targetType.Kind() == reflect.Struct {
- var jsonFields map[string]json.RawMessage
- if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
- return err
- }
-
- consumeField := func(prop *proto.Properties) (json.RawMessage, bool) {
- // Be liberal in what names we accept; both orig_name and camelName are okay.
- fieldNames := acceptedJSONFieldNames(prop)
-
- vOrig, okOrig := jsonFields[fieldNames.orig]
- vCamel, okCamel := jsonFields[fieldNames.camel]
- if !okOrig && !okCamel {
- return nil, false
- }
- // If, for some reason, both are present in the data, favour the camelName.
- var raw json.RawMessage
- if okOrig {
- raw = vOrig
- delete(jsonFields, fieldNames.orig)
- }
- if okCamel {
- raw = vCamel
- delete(jsonFields, fieldNames.camel)
- }
- return raw, true
- }
-
- sprops := proto.GetProperties(targetType)
- for i := 0; i < target.NumField(); i++ {
- ft := target.Type().Field(i)
- if strings.HasPrefix(ft.Name, "XXX_") {
- continue
- }
-
- valueForField, ok := consumeField(sprops.Prop[i])
- if !ok {
- continue
- }
-
- if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil {
- return err
- }
- }
- // Check for any oneof fields.
- if len(jsonFields) > 0 {
- for _, oop := range sprops.OneofTypes {
- raw, ok := consumeField(oop.Prop)
- if !ok {
- continue
- }
- nv := reflect.New(oop.Type.Elem())
- target.Field(oop.Field).Set(nv)
- if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil {
- return err
- }
- }
- }
- // Handle proto2 extensions.
- if len(jsonFields) > 0 {
- if ep, ok := target.Addr().Interface().(proto.Message); ok {
- for _, ext := range proto.RegisteredExtensions(ep) {
- name := fmt.Sprintf("[%s]", ext.Name)
- raw, ok := jsonFields[name]
- if !ok {
- continue
- }
- delete(jsonFields, name)
- nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem())
- if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil {
- return err
- }
- if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil {
- return err
- }
- }
- }
- }
- if !u.AllowUnknownFields && len(jsonFields) > 0 {
- // Pick any field to be the scapegoat.
- var f string
- for fname := range jsonFields {
- f = fname
- break
- }
- return fmt.Errorf("unknown field %q in %v", f, targetType)
- }
- return nil
- }
-
- // Handle arrays (which aren't encoded bytes)
- if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 {
- var slc []json.RawMessage
- if err := json.Unmarshal(inputValue, &slc); err != nil {
- return err
- }
- if slc != nil {
- l := len(slc)
- target.Set(reflect.MakeSlice(targetType, l, l))
- for i := 0; i < l; i++ {
- if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil {
- return err
- }
- }
- }
- return nil
- }
-
- // Handle maps (whose keys are always strings)
- if targetType.Kind() == reflect.Map {
- var mp map[string]json.RawMessage
- if err := json.Unmarshal(inputValue, &mp); err != nil {
- return err
- }
- if mp != nil {
- target.Set(reflect.MakeMap(targetType))
- for ks, raw := range mp {
- // Unmarshal map key. The core json library already decoded the key into a
- // string, so we handle that specially. Other types were quoted post-serialization.
- var k reflect.Value
- if targetType.Key().Kind() == reflect.String {
- k = reflect.ValueOf(ks)
- } else {
- k = reflect.New(targetType.Key()).Elem()
- var kprop *proto.Properties
- if prop != nil && prop.MapKeyProp != nil {
- kprop = prop.MapKeyProp
- }
- if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil {
- return err
- }
- }
-
- // Unmarshal map value.
- v := reflect.New(targetType.Elem()).Elem()
- var vprop *proto.Properties
- if prop != nil && prop.MapValProp != nil {
- vprop = prop.MapValProp
- }
- if err := u.unmarshalValue(v, raw, vprop); err != nil {
- return err
- }
- target.SetMapIndex(k, v)
- }
- }
- return nil
- }
-
- // Non-finite numbers can be encoded as strings.
- isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64
- if isFloat {
- if num, ok := nonFinite[string(inputValue)]; ok {
- target.SetFloat(num)
- return nil
- }
- }
-
- // integers & floats can be encoded as strings. In this case we drop
- // the quotes and proceed as normal.
- isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 ||
- targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 ||
- targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64
- if isNum && strings.HasPrefix(string(inputValue), `"`) {
- inputValue = inputValue[1 : len(inputValue)-1]
- }
-
- // Use the encoding/json for parsing other value types.
- return json.Unmarshal(inputValue, target.Addr().Interface())
-}
-
-func unquote(s string) (string, error) {
- var ret string
- err := json.Unmarshal([]byte(s), &ret)
- return ret, err
-}
-
-// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute.
-func jsonProperties(f reflect.StructField, origName bool) *proto.Properties {
- var prop proto.Properties
- prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f)
- if origName || prop.JSONName == "" {
- prop.JSONName = prop.OrigName
- }
- return &prop
-}
-
-type fieldNames struct {
- orig, camel string
-}
-
-func acceptedJSONFieldNames(prop *proto.Properties) fieldNames {
- opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName}
- if prop.JSONName != "" {
- opts.camel = prop.JSONName
- }
- return opts
-}
-
-// Writer wrapper inspired by https://blog.golang.org/errors-are-values
-type errWriter struct {
- writer io.Writer
- err error
-}
-
-func (w *errWriter) write(str string) {
- if w.err != nil {
- return
- }
- _, w.err = w.writer.Write([]byte(str))
-}
-
-// Map fields may have key types of non-float scalars, strings and enums.
-// The easiest way to sort them in some deterministic order is to use fmt.
-// If this turns out to be inefficient we can always consider other options,
-// such as doing a Schwartzian transform.
-//
-// Numeric keys are sorted in numeric order per
-// https://developers.google.com/protocol-buffers/docs/proto#maps.
-type mapKeys []reflect.Value
-
-func (s mapKeys) Len() int { return len(s) }
-func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s mapKeys) Less(i, j int) bool {
- if k := s[i].Kind(); k == s[j].Kind() {
- switch k {
- case reflect.String:
- return s[i].String() < s[j].String()
- case reflect.Int32, reflect.Int64:
- return s[i].Int() < s[j].Int()
- case reflect.Uint32, reflect.Uint64:
- return s[i].Uint() < s[j].Uint()
- }
- }
- return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface())
-}
-
-// checkRequiredFields returns an error if any required field in the given proto message is not set.
-// This function is used by both Marshal and Unmarshal. While required fields only exist in a
-// proto2 message, a proto3 message can contain proto2 message(s).
-func checkRequiredFields(pb proto.Message) error {
- // Most well-known type messages do not contain required fields. The "Any" type may contain
- // a message that has required fields.
- //
- // When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value
- // field in order to transform that into JSON, and that should have returned an error if a
- // required field is not set in the embedded message.
- //
- // When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the
- // embedded message to store the serialized message in Any.Value field, and that should have
- // returned an error if a required field is not set.
- if _, ok := pb.(wkt); ok {
- return nil
- }
-
- v := reflect.ValueOf(pb)
- // Skip message if it is not a struct pointer.
- if v.Kind() != reflect.Ptr {
- return nil
- }
- v = v.Elem()
- if v.Kind() != reflect.Struct {
- return nil
- }
-
- for i := 0; i < v.NumField(); i++ {
- field := v.Field(i)
- sfield := v.Type().Field(i)
-
- if sfield.PkgPath != "" {
- // blank PkgPath means the field is exported; skip if not exported
- continue
- }
-
- if strings.HasPrefix(sfield.Name, "XXX_") {
- continue
- }
-
- // Oneof field is an interface implemented by wrapper structs containing the actual oneof
- // field, i.e. an interface containing &T{real_value}.
- if sfield.Tag.Get("protobuf_oneof") != "" {
- if field.Kind() != reflect.Interface {
- continue
- }
- v := field.Elem()
- if v.Kind() != reflect.Ptr || v.IsNil() {
- continue
- }
- v = v.Elem()
- if v.Kind() != reflect.Struct || v.NumField() < 1 {
- continue
- }
- field = v.Field(0)
- sfield = v.Type().Field(0)
- }
-
- protoTag := sfield.Tag.Get("protobuf")
- if protoTag == "" {
- continue
- }
- var prop proto.Properties
- prop.Init(sfield.Type, sfield.Name, protoTag, &sfield)
-
- switch field.Kind() {
- case reflect.Map:
- if field.IsNil() {
- continue
- }
- // Check each map value.
- keys := field.MapKeys()
- for _, k := range keys {
- v := field.MapIndex(k)
- if err := checkRequiredFieldsInValue(v); err != nil {
- return err
- }
- }
- case reflect.Slice:
- // Handle non-repeated type, e.g. bytes.
- if !prop.Repeated {
- if prop.Required && field.IsNil() {
- return fmt.Errorf("required field %q is not set", prop.Name)
- }
- continue
- }
-
- // Handle repeated type.
- if field.IsNil() {
- continue
- }
- // Check each slice item.
- for i := 0; i < field.Len(); i++ {
- v := field.Index(i)
- if err := checkRequiredFieldsInValue(v); err != nil {
- return err
- }
- }
- case reflect.Ptr:
- if field.IsNil() {
- if prop.Required {
- return fmt.Errorf("required field %q is not set", prop.Name)
- }
- continue
- }
- if err := checkRequiredFieldsInValue(field); err != nil {
- return err
- }
- }
- }
-
- // Handle proto2 extensions.
- for _, ext := range proto.RegisteredExtensions(pb) {
- if !proto.HasExtension(pb, ext) {
- continue
- }
- ep, err := proto.GetExtension(pb, ext)
- if err != nil {
- return err
- }
- err = checkRequiredFieldsInValue(reflect.ValueOf(ep))
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func checkRequiredFieldsInValue(v reflect.Value) error {
- if v.Type().Implements(messageType) {
- return checkRequiredFields(v.Interface().(proto.Message))
- }
- return nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
deleted file mode 100644
index d82d6176b..000000000
--- a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
+++ /dev/null
@@ -1,338 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/struct.proto
-
-package structpb
-
-import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-// `NullValue` is a singleton enumeration to represent the null value for the
-// `Value` type union.
-//
-// The JSON representation for `NullValue` is JSON `null`.
-type NullValue int32
-
-const (
- // Null value.
- NullValue_NULL_VALUE NullValue = 0
-)
-
-var NullValue_name = map[int32]string{
- 0: "NULL_VALUE",
-}
-
-var NullValue_value = map[string]int32{
- "NULL_VALUE": 0,
-}
-
-func (x NullValue) String() string {
- return proto.EnumName(NullValue_name, int32(x))
-}
-
-func (NullValue) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_df322afd6c9fb402, []int{0}
-}
-
-func (NullValue) XXX_WellKnownType() string { return "NullValue" }
-
-// `Struct` represents a structured data value, consisting of fields
-// which map to dynamically typed values. In some languages, `Struct`
-// might be supported by a native representation. For example, in
-// scripting languages like JS a struct is represented as an
-// object. The details of that representation are described together
-// with the proto support for the language.
-//
-// The JSON representation for `Struct` is JSON object.
-type Struct struct {
- // Unordered map of dynamically typed values.
- Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Struct) Reset() { *m = Struct{} }
-func (m *Struct) String() string { return proto.CompactTextString(m) }
-func (*Struct) ProtoMessage() {}
-func (*Struct) Descriptor() ([]byte, []int) {
- return fileDescriptor_df322afd6c9fb402, []int{0}
-}
-
-func (*Struct) XXX_WellKnownType() string { return "Struct" }
-
-func (m *Struct) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Struct.Unmarshal(m, b)
-}
-func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Struct.Marshal(b, m, deterministic)
-}
-func (m *Struct) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Struct.Merge(m, src)
-}
-func (m *Struct) XXX_Size() int {
- return xxx_messageInfo_Struct.Size(m)
-}
-func (m *Struct) XXX_DiscardUnknown() {
- xxx_messageInfo_Struct.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Struct proto.InternalMessageInfo
-
-func (m *Struct) GetFields() map[string]*Value {
- if m != nil {
- return m.Fields
- }
- return nil
-}
-
-// `Value` represents a dynamically typed value which can be either
-// null, a number, a string, a boolean, a recursive struct value, or a
-// list of values. A producer of value is expected to set one of that
-// variants, absence of any variant indicates an error.
-//
-// The JSON representation for `Value` is JSON value.
-type Value struct {
- // The kind of value.
- //
- // Types that are valid to be assigned to Kind:
- // *Value_NullValue
- // *Value_NumberValue
- // *Value_StringValue
- // *Value_BoolValue
- // *Value_StructValue
- // *Value_ListValue
- Kind isValue_Kind `protobuf_oneof:"kind"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Value) Reset() { *m = Value{} }
-func (m *Value) String() string { return proto.CompactTextString(m) }
-func (*Value) ProtoMessage() {}
-func (*Value) Descriptor() ([]byte, []int) {
- return fileDescriptor_df322afd6c9fb402, []int{1}
-}
-
-func (*Value) XXX_WellKnownType() string { return "Value" }
-
-func (m *Value) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Value.Unmarshal(m, b)
-}
-func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Value.Marshal(b, m, deterministic)
-}
-func (m *Value) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Value.Merge(m, src)
-}
-func (m *Value) XXX_Size() int {
- return xxx_messageInfo_Value.Size(m)
-}
-func (m *Value) XXX_DiscardUnknown() {
- xxx_messageInfo_Value.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Value proto.InternalMessageInfo
-
-type isValue_Kind interface {
- isValue_Kind()
-}
-
-type Value_NullValue struct {
- NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
-}
-
-type Value_NumberValue struct {
- NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"`
-}
-
-type Value_StringValue struct {
- StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"`
-}
-
-type Value_BoolValue struct {
- BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"`
-}
-
-type Value_StructValue struct {
- StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"`
-}
-
-type Value_ListValue struct {
- ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"`
-}
-
-func (*Value_NullValue) isValue_Kind() {}
-
-func (*Value_NumberValue) isValue_Kind() {}
-
-func (*Value_StringValue) isValue_Kind() {}
-
-func (*Value_BoolValue) isValue_Kind() {}
-
-func (*Value_StructValue) isValue_Kind() {}
-
-func (*Value_ListValue) isValue_Kind() {}
-
-func (m *Value) GetKind() isValue_Kind {
- if m != nil {
- return m.Kind
- }
- return nil
-}
-
-func (m *Value) GetNullValue() NullValue {
- if x, ok := m.GetKind().(*Value_NullValue); ok {
- return x.NullValue
- }
- return NullValue_NULL_VALUE
-}
-
-func (m *Value) GetNumberValue() float64 {
- if x, ok := m.GetKind().(*Value_NumberValue); ok {
- return x.NumberValue
- }
- return 0
-}
-
-func (m *Value) GetStringValue() string {
- if x, ok := m.GetKind().(*Value_StringValue); ok {
- return x.StringValue
- }
- return ""
-}
-
-func (m *Value) GetBoolValue() bool {
- if x, ok := m.GetKind().(*Value_BoolValue); ok {
- return x.BoolValue
- }
- return false
-}
-
-func (m *Value) GetStructValue() *Struct {
- if x, ok := m.GetKind().(*Value_StructValue); ok {
- return x.StructValue
- }
- return nil
-}
-
-func (m *Value) GetListValue() *ListValue {
- if x, ok := m.GetKind().(*Value_ListValue); ok {
- return x.ListValue
- }
- return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*Value) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*Value_NullValue)(nil),
- (*Value_NumberValue)(nil),
- (*Value_StringValue)(nil),
- (*Value_BoolValue)(nil),
- (*Value_StructValue)(nil),
- (*Value_ListValue)(nil),
- }
-}
-
-// `ListValue` is a wrapper around a repeated field of values.
-//
-// The JSON representation for `ListValue` is JSON array.
-type ListValue struct {
- // Repeated field of dynamically typed values.
- Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ListValue) Reset() { *m = ListValue{} }
-func (m *ListValue) String() string { return proto.CompactTextString(m) }
-func (*ListValue) ProtoMessage() {}
-func (*ListValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_df322afd6c9fb402, []int{2}
-}
-
-func (*ListValue) XXX_WellKnownType() string { return "ListValue" }
-
-func (m *ListValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ListValue.Unmarshal(m, b)
-}
-func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ListValue.Marshal(b, m, deterministic)
-}
-func (m *ListValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ListValue.Merge(m, src)
-}
-func (m *ListValue) XXX_Size() int {
- return xxx_messageInfo_ListValue.Size(m)
-}
-func (m *ListValue) XXX_DiscardUnknown() {
- xxx_messageInfo_ListValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ListValue proto.InternalMessageInfo
-
-func (m *ListValue) GetValues() []*Value {
- if m != nil {
- return m.Values
- }
- return nil
-}
-
-func init() {
- proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value)
- proto.RegisterType((*Struct)(nil), "google.protobuf.Struct")
- proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry")
- proto.RegisterType((*Value)(nil), "google.protobuf.Value")
- proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue")
-}
-
-func init() {
- proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402)
-}
-
-var fileDescriptor_df322afd6c9fb402 = []byte{
- // 417 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40,
- 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09,
- 0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94,
- 0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa,
- 0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff,
- 0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc,
- 0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15,
- 0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d,
- 0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce,
- 0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39,
- 0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab,
- 0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84,
- 0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48,
- 0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f,
- 0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59,
- 0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a,
- 0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64,
- 0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92,
- 0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25,
- 0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37,
- 0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6,
- 0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4,
- 0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda,
- 0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9,
- 0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53,
- 0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00,
- 0x00,
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
deleted file mode 100644
index ed990e31d..000000000
--- a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
+++ /dev/null
@@ -1,95 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option cc_enable_arenas = true;
-option go_package = "github.com/golang/protobuf/ptypes/struct;structpb";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "StructProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-
-// `Struct` represents a structured data value, consisting of fields
-// which map to dynamically typed values. In some languages, `Struct`
-// might be supported by a native representation. For example, in
-// scripting languages like JS a struct is represented as an
-// object. The details of that representation are described together
-// with the proto support for the language.
-//
-// The JSON representation for `Struct` is JSON object.
-message Struct {
- // Unordered map of dynamically typed values.
- map fields = 1;
-}
-
-// `Value` represents a dynamically typed value which can be either
-// null, a number, a string, a boolean, a recursive struct value, or a
-// list of values. A producer of value is expected to set one of that
-// variants, absence of any variant indicates an error.
-//
-// The JSON representation for `Value` is JSON value.
-message Value {
- // The kind of value.
- oneof kind {
- // Represents a null value.
- NullValue null_value = 1;
- // Represents a double value.
- double number_value = 2;
- // Represents a string value.
- string string_value = 3;
- // Represents a boolean value.
- bool bool_value = 4;
- // Represents a structured value.
- Struct struct_value = 5;
- // Represents a repeated `Value`.
- ListValue list_value = 6;
- }
-}
-
-// `NullValue` is a singleton enumeration to represent the null value for the
-// `Value` type union.
-//
-// The JSON representation for `NullValue` is JSON `null`.
-enum NullValue {
- // Null value.
- NULL_VALUE = 0;
-}
-
-// `ListValue` is a wrapper around a repeated field of values.
-//
-// The JSON representation for `ListValue` is JSON array.
-message ListValue {
- // Repeated field of dynamically typed values.
- repeated Value values = 1;
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt b/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt
deleted file mode 100644
index 364516251..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2015, Gengo, Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
- * Neither the name of Gengo, Inc. nor the names of its
- contributors may be used to endorse or promote products derived from this
- software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel
deleted file mode 100644
index 76cafe6ec..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel
+++ /dev/null
@@ -1,22 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
-
-package(default_visibility = ["//visibility:public"])
-
-proto_library(
- name = "internal_proto",
- srcs = ["stream_chunk.proto"],
- deps = ["@com_google_protobuf//:any_proto"],
-)
-
-go_proto_library(
- name = "internal_go_proto",
- importpath = "github.com/grpc-ecosystem/grpc-gateway/internal",
- proto = ":internal_proto",
-)
-
-go_library(
- name = "go_default_library",
- embed = [":internal_go_proto"],
- importpath = "github.com/grpc-ecosystem/grpc-gateway/internal",
-)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go
deleted file mode 100644
index 8858f0690..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: internal/stream_chunk.proto
-
-package internal
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import any "github.com/golang/protobuf/ptypes/any"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-// StreamError is a response type which is returned when
-// streaming rpc returns an error.
-type StreamError struct {
- GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode,proto3" json:"grpc_code,omitempty"`
- HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"`
- Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
- HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus,proto3" json:"http_status,omitempty"`
- Details []*any.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StreamError) Reset() { *m = StreamError{} }
-func (m *StreamError) String() string { return proto.CompactTextString(m) }
-func (*StreamError) ProtoMessage() {}
-func (*StreamError) Descriptor() ([]byte, []int) {
- return fileDescriptor_stream_chunk_a2afb657504565d7, []int{0}
-}
-func (m *StreamError) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StreamError.Unmarshal(m, b)
-}
-func (m *StreamError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StreamError.Marshal(b, m, deterministic)
-}
-func (dst *StreamError) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StreamError.Merge(dst, src)
-}
-func (m *StreamError) XXX_Size() int {
- return xxx_messageInfo_StreamError.Size(m)
-}
-func (m *StreamError) XXX_DiscardUnknown() {
- xxx_messageInfo_StreamError.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StreamError proto.InternalMessageInfo
-
-func (m *StreamError) GetGrpcCode() int32 {
- if m != nil {
- return m.GrpcCode
- }
- return 0
-}
-
-func (m *StreamError) GetHttpCode() int32 {
- if m != nil {
- return m.HttpCode
- }
- return 0
-}
-
-func (m *StreamError) GetMessage() string {
- if m != nil {
- return m.Message
- }
- return ""
-}
-
-func (m *StreamError) GetHttpStatus() string {
- if m != nil {
- return m.HttpStatus
- }
- return ""
-}
-
-func (m *StreamError) GetDetails() []*any.Any {
- if m != nil {
- return m.Details
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError")
-}
-
-func init() {
- proto.RegisterFile("internal/stream_chunk.proto", fileDescriptor_stream_chunk_a2afb657504565d7)
-}
-
-var fileDescriptor_stream_chunk_a2afb657504565d7 = []byte{
- // 223 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x90, 0x41, 0x4e, 0xc3, 0x30,
- 0x10, 0x45, 0x15, 0x4a, 0x69, 0x3b, 0xd9, 0x45, 0x5d, 0x18, 0xba, 0x20, 0x62, 0x95, 0x95, 0x23,
- 0xc1, 0x09, 0x00, 0x71, 0x81, 0x74, 0xc7, 0xa6, 0x9a, 0x26, 0x83, 0x13, 0x91, 0xd8, 0xd1, 0x78,
- 0x22, 0x94, 0x6b, 0x71, 0xc2, 0xca, 0x8e, 0xb2, 0xf4, 0x7b, 0x7f, 0xbe, 0xbe, 0x0c, 0xa7, 0xce,
- 0x0a, 0xb1, 0xc5, 0xbe, 0xf4, 0xc2, 0x84, 0xc3, 0xa5, 0x6e, 0x27, 0xfb, 0xab, 0x47, 0x76, 0xe2,
- 0xb2, 0xa3, 0xe1, 0xb1, 0xd6, 0x06, 0x85, 0xfe, 0x70, 0xd6, 0x3c, 0x59, 0xe9, 0x06, 0x7a, 0x7a,
- 0x34, 0xce, 0x99, 0x9e, 0xca, 0x98, 0xb9, 0x4e, 0x3f, 0x25, 0xda, 0x79, 0x39, 0x78, 0xf9, 0x4f,
- 0x20, 0x3d, 0xc7, 0x9e, 0x2f, 0x66, 0xc7, 0xd9, 0x09, 0x0e, 0xa1, 0xe2, 0x52, 0xbb, 0x86, 0x54,
- 0x92, 0x27, 0xc5, 0xb6, 0xda, 0x07, 0xf0, 0xe9, 0x1a, 0x0a, 0xb2, 0x15, 0x19, 0x17, 0x79, 0xb7,
- 0xc8, 0x00, 0xa2, 0x54, 0xb0, 0x1b, 0xc8, 0x7b, 0x34, 0xa4, 0x36, 0x79, 0x52, 0x1c, 0xaa, 0xf5,
- 0x99, 0x3d, 0x43, 0x1a, 0xcf, 0xbc, 0xa0, 0x4c, 0x5e, 0xdd, 0x47, 0x0b, 0x01, 0x9d, 0x23, 0xc9,
- 0x34, 0xec, 0x1a, 0x12, 0xec, 0x7a, 0xaf, 0xb6, 0xf9, 0xa6, 0x48, 0x5f, 0x8f, 0x7a, 0x59, 0xac,
- 0xd7, 0xc5, 0xfa, 0xdd, 0xce, 0xd5, 0x1a, 0xfa, 0x80, 0xef, 0xfd, 0xfa, 0x09, 0xd7, 0x87, 0x18,
- 0x79, 0xbb, 0x05, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x7d, 0xa5, 0x18, 0x17, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto
deleted file mode 100644
index 55f42ce63..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto
+++ /dev/null
@@ -1,15 +0,0 @@
-syntax = "proto3";
-package grpc.gateway.runtime;
-option go_package = "internal";
-
-import "google/protobuf/any.proto";
-
-// StreamError is a response type which is returned when
-// streaming rpc returns an error.
-message StreamError {
- int32 grpc_code = 1;
- int32 http_code = 2;
- string message = 3;
- string http_status = 4;
- repeated google.protobuf.Any details = 5;
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel
deleted file mode 100644
index 20862228e..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel
+++ /dev/null
@@ -1,84 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-package(default_visibility = ["//visibility:public"])
-
-go_library(
- name = "go_default_library",
- srcs = [
- "context.go",
- "convert.go",
- "doc.go",
- "errors.go",
- "fieldmask.go",
- "handler.go",
- "marshal_httpbodyproto.go",
- "marshal_json.go",
- "marshal_jsonpb.go",
- "marshal_proto.go",
- "marshaler.go",
- "marshaler_registry.go",
- "mux.go",
- "pattern.go",
- "proto2_convert.go",
- "proto_errors.go",
- "query.go",
- ],
- importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime",
- deps = [
- "//internal:go_default_library",
- "//utilities:go_default_library",
- "@com_github_golang_protobuf//jsonpb:go_default_library_gen",
- "@com_github_golang_protobuf//proto:go_default_library",
- "@com_github_golang_protobuf//protoc-gen-go/generator:go_default_library_gen",
- "@go_googleapis//google/api:httpbody_go_proto",
- "@io_bazel_rules_go//proto/wkt:any_go_proto",
- "@io_bazel_rules_go//proto/wkt:duration_go_proto",
- "@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
- "@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
- "@io_bazel_rules_go//proto/wkt:wrappers_go_proto",
- "@org_golang_google_grpc//codes:go_default_library",
- "@org_golang_google_grpc//grpclog:go_default_library",
- "@org_golang_google_grpc//metadata:go_default_library",
- "@org_golang_google_grpc//status:go_default_library",
- ],
-)
-
-go_test(
- name = "go_default_test",
- size = "small",
- srcs = [
- "context_test.go",
- "errors_test.go",
- "fieldmask_test.go",
- "handler_test.go",
- "marshal_httpbodyproto_test.go",
- "marshal_json_test.go",
- "marshal_jsonpb_test.go",
- "marshal_proto_test.go",
- "marshaler_registry_test.go",
- "mux_test.go",
- "pattern_test.go",
- "query_test.go",
- ],
- embed = [":go_default_library"],
- deps = [
- "//examples/proto/examplepb:go_default_library",
- "//internal:go_default_library",
- "//utilities:go_default_library",
- "@com_github_golang_protobuf//jsonpb:go_default_library_gen",
- "@com_github_golang_protobuf//proto:go_default_library",
- "@com_github_golang_protobuf//ptypes:go_default_library_gen",
- "@go_googleapis//google/api:httpbody_go_proto",
- "@go_googleapis//google/rpc:errdetails_go_proto",
- "@io_bazel_rules_go//proto/wkt:duration_go_proto",
- "@io_bazel_rules_go//proto/wkt:empty_go_proto",
- "@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
- "@io_bazel_rules_go//proto/wkt:struct_go_proto",
- "@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
- "@io_bazel_rules_go//proto/wkt:wrappers_go_proto",
- "@org_golang_google_grpc//:go_default_library",
- "@org_golang_google_grpc//codes:go_default_library",
- "@org_golang_google_grpc//metadata:go_default_library",
- "@org_golang_google_grpc//status:go_default_library",
- ],
-)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go
deleted file mode 100644
index 896057e1e..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go
+++ /dev/null
@@ -1,210 +0,0 @@
-package runtime
-
-import (
- "context"
- "encoding/base64"
- "fmt"
- "net"
- "net/http"
- "net/textproto"
- "strconv"
- "strings"
- "time"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/status"
-)
-
-// MetadataHeaderPrefix is the http prefix that represents custom metadata
-// parameters to or from a gRPC call.
-const MetadataHeaderPrefix = "Grpc-Metadata-"
-
-// MetadataPrefix is prepended to permanent HTTP header keys (as specified
-// by the IANA) when added to the gRPC context.
-const MetadataPrefix = "grpcgateway-"
-
-// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to
-// HTTP headers in a response handled by grpc-gateway
-const MetadataTrailerPrefix = "Grpc-Trailer-"
-
-const metadataGrpcTimeout = "Grpc-Timeout"
-const metadataHeaderBinarySuffix = "-Bin"
-
-const xForwardedFor = "X-Forwarded-For"
-const xForwardedHost = "X-Forwarded-Host"
-
-var (
- // DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
- // header isn't present. If the value is 0 the sent `context` will not have a timeout.
- DefaultContextTimeout = 0 * time.Second
-)
-
-func decodeBinHeader(v string) ([]byte, error) {
- if len(v)%4 == 0 {
- // Input was padded, or padding was not necessary.
- return base64.StdEncoding.DecodeString(v)
- }
- return base64.RawStdEncoding.DecodeString(v)
-}
-
-/*
-AnnotateContext adds context information such as metadata from the request.
-
-At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For",
-except that the forwarded destination is not another HTTP service but rather
-a gRPC service.
-*/
-func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) {
- var pairs []string
- timeout := DefaultContextTimeout
- if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
- var err error
- timeout, err = timeoutDecode(tm)
- if err != nil {
- return nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm)
- }
- }
-
- for key, vals := range req.Header {
- for _, val := range vals {
- key = textproto.CanonicalMIMEHeaderKey(key)
- // For backwards-compatibility, pass through 'authorization' header with no prefix.
- if key == "Authorization" {
- pairs = append(pairs, "authorization", val)
- }
- if h, ok := mux.incomingHeaderMatcher(key); ok {
- // Handles "-bin" metadata in grpc, since grpc will do another base64
- // encode before sending to server, we need to decode it first.
- if strings.HasSuffix(key, metadataHeaderBinarySuffix) {
- b, err := decodeBinHeader(val)
- if err != nil {
- return nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err)
- }
-
- val = string(b)
- }
- pairs = append(pairs, h, val)
- }
- }
- }
- if host := req.Header.Get(xForwardedHost); host != "" {
- pairs = append(pairs, strings.ToLower(xForwardedHost), host)
- } else if req.Host != "" {
- pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host)
- }
-
- if addr := req.RemoteAddr; addr != "" {
- if remoteIP, _, err := net.SplitHostPort(addr); err == nil {
- if fwd := req.Header.Get(xForwardedFor); fwd == "" {
- pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP)
- } else {
- pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP))
- }
- } else {
- grpclog.Infof("invalid remote addr: %s", addr)
- }
- }
-
- if timeout != 0 {
- ctx, _ = context.WithTimeout(ctx, timeout)
- }
- if len(pairs) == 0 {
- return ctx, nil
- }
- md := metadata.Pairs(pairs...)
- for _, mda := range mux.metadataAnnotators {
- md = metadata.Join(md, mda(ctx, req))
- }
- return metadata.NewOutgoingContext(ctx, md), nil
-}
-
-// ServerMetadata consists of metadata sent from gRPC server.
-type ServerMetadata struct {
- HeaderMD metadata.MD
- TrailerMD metadata.MD
-}
-
-type serverMetadataKey struct{}
-
-// NewServerMetadataContext creates a new context with ServerMetadata
-func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
- return context.WithValue(ctx, serverMetadataKey{}, md)
-}
-
-// ServerMetadataFromContext returns the ServerMetadata in ctx
-func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
- md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
- return
-}
-
-func timeoutDecode(s string) (time.Duration, error) {
- size := len(s)
- if size < 2 {
- return 0, fmt.Errorf("timeout string is too short: %q", s)
- }
- d, ok := timeoutUnitToDuration(s[size-1])
- if !ok {
- return 0, fmt.Errorf("timeout unit is not recognized: %q", s)
- }
- t, err := strconv.ParseInt(s[:size-1], 10, 64)
- if err != nil {
- return 0, err
- }
- return d * time.Duration(t), nil
-}
-
-func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) {
- switch u {
- case 'H':
- return time.Hour, true
- case 'M':
- return time.Minute, true
- case 'S':
- return time.Second, true
- case 'm':
- return time.Millisecond, true
- case 'u':
- return time.Microsecond, true
- case 'n':
- return time.Nanosecond, true
- default:
- }
- return
-}
-
-// isPermanentHTTPHeader checks whether hdr belongs to the list of
-// permenant request headers maintained by IANA.
-// http://www.iana.org/assignments/message-headers/message-headers.xml
-func isPermanentHTTPHeader(hdr string) bool {
- switch hdr {
- case
- "Accept",
- "Accept-Charset",
- "Accept-Language",
- "Accept-Ranges",
- "Authorization",
- "Cache-Control",
- "Content-Type",
- "Cookie",
- "Date",
- "Expect",
- "From",
- "Host",
- "If-Match",
- "If-Modified-Since",
- "If-None-Match",
- "If-Schedule-Tag-Match",
- "If-Unmodified-Since",
- "Max-Forwards",
- "Origin",
- "Pragma",
- "Referer",
- "User-Agent",
- "Via",
- "Warning":
- return true
- }
- return false
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go
deleted file mode 100644
index a5b3bd6a7..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go
+++ /dev/null
@@ -1,312 +0,0 @@
-package runtime
-
-import (
- "encoding/base64"
- "fmt"
- "strconv"
- "strings"
-
- "github.com/golang/protobuf/jsonpb"
- "github.com/golang/protobuf/ptypes/duration"
- "github.com/golang/protobuf/ptypes/timestamp"
- "github.com/golang/protobuf/ptypes/wrappers"
-)
-
-// String just returns the given string.
-// It is just for compatibility to other types.
-func String(val string) (string, error) {
- return val, nil
-}
-
-// StringSlice converts 'val' where individual strings are separated by
-// 'sep' into a string slice.
-func StringSlice(val, sep string) ([]string, error) {
- return strings.Split(val, sep), nil
-}
-
-// Bool converts the given string representation of a boolean value into bool.
-func Bool(val string) (bool, error) {
- return strconv.ParseBool(val)
-}
-
-// BoolSlice converts 'val' where individual booleans are separated by
-// 'sep' into a bool slice.
-func BoolSlice(val, sep string) ([]bool, error) {
- s := strings.Split(val, sep)
- values := make([]bool, len(s))
- for i, v := range s {
- value, err := Bool(v)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-// Float64 converts the given string representation into representation of a floating point number into float64.
-func Float64(val string) (float64, error) {
- return strconv.ParseFloat(val, 64)
-}
-
-// Float64Slice converts 'val' where individual floating point numbers are separated by
-// 'sep' into a float64 slice.
-func Float64Slice(val, sep string) ([]float64, error) {
- s := strings.Split(val, sep)
- values := make([]float64, len(s))
- for i, v := range s {
- value, err := Float64(v)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-// Float32 converts the given string representation of a floating point number into float32.
-func Float32(val string) (float32, error) {
- f, err := strconv.ParseFloat(val, 32)
- if err != nil {
- return 0, err
- }
- return float32(f), nil
-}
-
-// Float32Slice converts 'val' where individual floating point numbers are separated by
-// 'sep' into a float32 slice.
-func Float32Slice(val, sep string) ([]float32, error) {
- s := strings.Split(val, sep)
- values := make([]float32, len(s))
- for i, v := range s {
- value, err := Float32(v)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-// Int64 converts the given string representation of an integer into int64.
-func Int64(val string) (int64, error) {
- return strconv.ParseInt(val, 0, 64)
-}
-
-// Int64Slice converts 'val' where individual integers are separated by
-// 'sep' into a int64 slice.
-func Int64Slice(val, sep string) ([]int64, error) {
- s := strings.Split(val, sep)
- values := make([]int64, len(s))
- for i, v := range s {
- value, err := Int64(v)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-// Int32 converts the given string representation of an integer into int32.
-func Int32(val string) (int32, error) {
- i, err := strconv.ParseInt(val, 0, 32)
- if err != nil {
- return 0, err
- }
- return int32(i), nil
-}
-
-// Int32Slice converts 'val' where individual integers are separated by
-// 'sep' into a int32 slice.
-func Int32Slice(val, sep string) ([]int32, error) {
- s := strings.Split(val, sep)
- values := make([]int32, len(s))
- for i, v := range s {
- value, err := Int32(v)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-// Uint64 converts the given string representation of an integer into uint64.
-func Uint64(val string) (uint64, error) {
- return strconv.ParseUint(val, 0, 64)
-}
-
-// Uint64Slice converts 'val' where individual integers are separated by
-// 'sep' into a uint64 slice.
-func Uint64Slice(val, sep string) ([]uint64, error) {
- s := strings.Split(val, sep)
- values := make([]uint64, len(s))
- for i, v := range s {
- value, err := Uint64(v)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-// Uint32 converts the given string representation of an integer into uint32.
-func Uint32(val string) (uint32, error) {
- i, err := strconv.ParseUint(val, 0, 32)
- if err != nil {
- return 0, err
- }
- return uint32(i), nil
-}
-
-// Uint32Slice converts 'val' where individual integers are separated by
-// 'sep' into a uint32 slice.
-func Uint32Slice(val, sep string) ([]uint32, error) {
- s := strings.Split(val, sep)
- values := make([]uint32, len(s))
- for i, v := range s {
- value, err := Uint32(v)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-// Bytes converts the given string representation of a byte sequence into a slice of bytes
-// A bytes sequence is encoded in URL-safe base64 without padding
-func Bytes(val string) ([]byte, error) {
- b, err := base64.StdEncoding.DecodeString(val)
- if err != nil {
- b, err = base64.URLEncoding.DecodeString(val)
- if err != nil {
- return nil, err
- }
- }
- return b, nil
-}
-
-// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe
-// base64 without padding, are separated by 'sep' into a slice of bytes slices slice.
-func BytesSlice(val, sep string) ([][]byte, error) {
- s := strings.Split(val, sep)
- values := make([][]byte, len(s))
- for i, v := range s {
- value, err := Bytes(v)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp.
-func Timestamp(val string) (*timestamp.Timestamp, error) {
- var r *timestamp.Timestamp
- err := jsonpb.UnmarshalString(val, r)
- return r, err
-}
-
-// Duration converts the given string into a timestamp.Duration.
-func Duration(val string) (*duration.Duration, error) {
- var r *duration.Duration
- err := jsonpb.UnmarshalString(val, r)
- return r, err
-}
-
-// Enum converts the given string into an int32 that should be type casted into the
-// correct enum proto type.
-func Enum(val string, enumValMap map[string]int32) (int32, error) {
- e, ok := enumValMap[val]
- if ok {
- return e, nil
- }
-
- i, err := Int32(val)
- if err != nil {
- return 0, fmt.Errorf("%s is not valid", val)
- }
- for _, v := range enumValMap {
- if v == i {
- return i, nil
- }
- }
- return 0, fmt.Errorf("%s is not valid", val)
-}
-
-// EnumSlice converts 'val' where individual enums are separated by 'sep'
-// into a int32 slice. Each individual int32 should be type casted into the
-// correct enum proto type.
-func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
- s := strings.Split(val, sep)
- values := make([]int32, len(s))
- for i, v := range s {
- value, err := Enum(v, enumValMap)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-/*
- Support fot google.protobuf.wrappers on top of primitive types
-*/
-
-// StringValue well-known type support as wrapper around string type
-func StringValue(val string) (*wrappers.StringValue, error) {
- return &wrappers.StringValue{Value: val}, nil
-}
-
-// FloatValue well-known type support as wrapper around float32 type
-func FloatValue(val string) (*wrappers.FloatValue, error) {
- parsedVal, err := Float32(val)
- return &wrappers.FloatValue{Value: parsedVal}, err
-}
-
-// DoubleValue well-known type support as wrapper around float64 type
-func DoubleValue(val string) (*wrappers.DoubleValue, error) {
- parsedVal, err := Float64(val)
- return &wrappers.DoubleValue{Value: parsedVal}, err
-}
-
-// BoolValue well-known type support as wrapper around bool type
-func BoolValue(val string) (*wrappers.BoolValue, error) {
- parsedVal, err := Bool(val)
- return &wrappers.BoolValue{Value: parsedVal}, err
-}
-
-// Int32Value well-known type support as wrapper around int32 type
-func Int32Value(val string) (*wrappers.Int32Value, error) {
- parsedVal, err := Int32(val)
- return &wrappers.Int32Value{Value: parsedVal}, err
-}
-
-// UInt32Value well-known type support as wrapper around uint32 type
-func UInt32Value(val string) (*wrappers.UInt32Value, error) {
- parsedVal, err := Uint32(val)
- return &wrappers.UInt32Value{Value: parsedVal}, err
-}
-
-// Int64Value well-known type support as wrapper around int64 type
-func Int64Value(val string) (*wrappers.Int64Value, error) {
- parsedVal, err := Int64(val)
- return &wrappers.Int64Value{Value: parsedVal}, err
-}
-
-// UInt64Value well-known type support as wrapper around uint64 type
-func UInt64Value(val string) (*wrappers.UInt64Value, error) {
- parsedVal, err := Uint64(val)
- return &wrappers.UInt64Value{Value: parsedVal}, err
-}
-
-// BytesValue well-known type support as wrapper around bytes[] type
-func BytesValue(val string) (*wrappers.BytesValue, error) {
- parsedVal, err := Bytes(val)
- return &wrappers.BytesValue{Value: parsedVal}, err
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go
deleted file mode 100644
index b6e5ddf7a..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go
+++ /dev/null
@@ -1,5 +0,0 @@
-/*
-Package runtime contains runtime helper functions used by
-servers which protoc-gen-grpc-gateway generates.
-*/
-package runtime
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go
deleted file mode 100644
index 41d54ef91..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go
+++ /dev/null
@@ -1,145 +0,0 @@
-package runtime
-
-import (
- "context"
- "io"
- "net/http"
-
- "github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/ptypes/any"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/status"
-)
-
-// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
-// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
-func HTTPStatusFromCode(code codes.Code) int {
- switch code {
- case codes.OK:
- return http.StatusOK
- case codes.Canceled:
- return http.StatusRequestTimeout
- case codes.Unknown:
- return http.StatusInternalServerError
- case codes.InvalidArgument:
- return http.StatusBadRequest
- case codes.DeadlineExceeded:
- return http.StatusGatewayTimeout
- case codes.NotFound:
- return http.StatusNotFound
- case codes.AlreadyExists:
- return http.StatusConflict
- case codes.PermissionDenied:
- return http.StatusForbidden
- case codes.Unauthenticated:
- return http.StatusUnauthorized
- case codes.ResourceExhausted:
- return http.StatusTooManyRequests
- case codes.FailedPrecondition:
- return http.StatusPreconditionFailed
- case codes.Aborted:
- return http.StatusConflict
- case codes.OutOfRange:
- return http.StatusBadRequest
- case codes.Unimplemented:
- return http.StatusNotImplemented
- case codes.Internal:
- return http.StatusInternalServerError
- case codes.Unavailable:
- return http.StatusServiceUnavailable
- case codes.DataLoss:
- return http.StatusInternalServerError
- }
-
- grpclog.Infof("Unknown gRPC error code: %v", code)
- return http.StatusInternalServerError
-}
-
-var (
- // HTTPError replies to the request with the error.
- // You can set a custom function to this variable to customize error format.
- HTTPError = DefaultHTTPError
- // OtherErrorHandler handles the following error used by the gateway: StatusMethodNotAllowed StatusNotFound and StatusBadRequest
- OtherErrorHandler = DefaultOtherErrorHandler
-)
-
-type errorBody struct {
- Error string `protobuf:"bytes,1,name=error" json:"error"`
- // This is to make the error more compatible with users that expect errors to be Status objects:
- // https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto
- // It should be the exact same message as the Error field.
- Message string `protobuf:"bytes,1,name=message" json:"message"`
- Code int32 `protobuf:"varint,2,name=code" json:"code"`
- Details []*any.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"`
-}
-
-// Make this also conform to proto.Message for builtin JSONPb Marshaler
-func (e *errorBody) Reset() { *e = errorBody{} }
-func (e *errorBody) String() string { return proto.CompactTextString(e) }
-func (*errorBody) ProtoMessage() {}
-
-// DefaultHTTPError is the default implementation of HTTPError.
-// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
-// If otherwise, it replies with http.StatusInternalServerError.
-//
-// The response body returned by this function is a JSON object,
-// which contains a member whose key is "error" and whose value is err.Error().
-func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) {
- const fallback = `{"error": "failed to marshal error message"}`
-
- s, ok := status.FromError(err)
- if !ok {
- s = status.New(codes.Unknown, err.Error())
- }
-
- w.Header().Del("Trailer")
-
- contentType := marshaler.ContentType()
- // Check marshaler on run time in order to keep backwards compatability
- // An interface param needs to be added to the ContentType() function on
- // the Marshal interface to be able to remove this check
- if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok {
- pb := s.Proto()
- contentType = httpBodyMarshaler.ContentTypeFromMessage(pb)
- }
- w.Header().Set("Content-Type", contentType)
-
- body := &errorBody{
- Error: s.Message(),
- Message: s.Message(),
- Code: int32(s.Code()),
- Details: s.Proto().GetDetails(),
- }
-
- buf, merr := marshaler.Marshal(body)
- if merr != nil {
- grpclog.Infof("Failed to marshal error message %q: %v", body, merr)
- w.WriteHeader(http.StatusInternalServerError)
- if _, err := io.WriteString(w, fallback); err != nil {
- grpclog.Infof("Failed to write response: %v", err)
- }
- return
- }
-
- md, ok := ServerMetadataFromContext(ctx)
- if !ok {
- grpclog.Infof("Failed to extract ServerMetadata from context")
- }
-
- handleForwardResponseServerMetadata(w, mux, md)
- handleForwardResponseTrailerHeader(w, md)
- st := HTTPStatusFromCode(s.Code())
- w.WriteHeader(st)
- if _, err := w.Write(buf); err != nil {
- grpclog.Infof("Failed to write response: %v", err)
- }
-
- handleForwardResponseTrailer(w, md)
-}
-
-// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler.
-// It simply writes a string representation of the given error into "w".
-func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) {
- http.Error(w, msg, code)
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go
deleted file mode 100644
index e1cf7a914..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package runtime
-
-import (
- "encoding/json"
- "io"
- "strings"
-
- "github.com/golang/protobuf/protoc-gen-go/generator"
- "google.golang.org/genproto/protobuf/field_mask"
-)
-
-// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body.
-func FieldMaskFromRequestBody(r io.Reader) (*field_mask.FieldMask, error) {
- fm := &field_mask.FieldMask{}
- var root interface{}
- if err := json.NewDecoder(r).Decode(&root); err != nil {
- if err == io.EOF {
- return fm, nil
- }
- return nil, err
- }
-
- queue := []fieldMaskPathItem{{node: root}}
- for len(queue) > 0 {
- // dequeue an item
- item := queue[0]
- queue = queue[1:]
-
- if m, ok := item.node.(map[string]interface{}); ok {
- // if the item is an object, then enqueue all of its children
- for k, v := range m {
- queue = append(queue, fieldMaskPathItem{path: append(item.path, generator.CamelCase(k)), node: v})
- }
- } else if len(item.path) > 0 {
- // otherwise, it's a leaf node so print its path
- fm.Paths = append(fm.Paths, strings.Join(item.path, "."))
- }
- }
-
- return fm, nil
-}
-
-// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask
-type fieldMaskPathItem struct {
- // the list of prior fields leading up to node
- path []string
-
- // a generic decoded json object the current item to inspect for further path extraction
- node interface{}
-}
-
-// CamelCaseFieldMask updates the given FieldMask by converting all of its paths to CamelCase, using the same heuristic
-// that's used for naming protobuf fields in Go.
-func CamelCaseFieldMask(mask *field_mask.FieldMask) {
- if mask == nil || mask.Paths == nil {
- return
- }
-
- var newPaths []string
- for _, path := range mask.Paths {
- lowerCasedParts := strings.Split(path, ".")
- var camelCasedParts []string
- for _, part := range lowerCasedParts {
- camelCasedParts = append(camelCasedParts, generator.CamelCase(part))
- }
- newPaths = append(newPaths, strings.Join(camelCasedParts, "."))
- }
-
- mask.Paths = newPaths
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go
deleted file mode 100644
index 2af900650..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go
+++ /dev/null
@@ -1,209 +0,0 @@
-package runtime
-
-import (
- "errors"
- "fmt"
- "io"
- "net/http"
- "net/textproto"
-
- "context"
- "github.com/golang/protobuf/proto"
- "github.com/grpc-ecosystem/grpc-gateway/internal"
- "google.golang.org/grpc/grpclog"
-)
-
-var errEmptyResponse = errors.New("empty response")
-
-// ForwardResponseStream forwards the stream from gRPC server to REST client.
-func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
- f, ok := w.(http.Flusher)
- if !ok {
- grpclog.Infof("Flush not supported in %T", w)
- http.Error(w, "unexpected type of web server", http.StatusInternalServerError)
- return
- }
-
- md, ok := ServerMetadataFromContext(ctx)
- if !ok {
- grpclog.Infof("Failed to extract ServerMetadata from context")
- http.Error(w, "unexpected error", http.StatusInternalServerError)
- return
- }
- handleForwardResponseServerMetadata(w, mux, md)
-
- w.Header().Set("Transfer-Encoding", "chunked")
- w.Header().Set("Content-Type", marshaler.ContentType())
- if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil {
- HTTPError(ctx, mux, marshaler, w, req, err)
- return
- }
-
- var delimiter []byte
- if d, ok := marshaler.(Delimited); ok {
- delimiter = d.Delimiter()
- } else {
- delimiter = []byte("\n")
- }
-
- var wroteHeader bool
- for {
- resp, err := recv()
- if err == io.EOF {
- return
- }
- if err != nil {
- handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
- return
- }
- if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
- handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
- return
- }
-
- buf, err := marshaler.Marshal(streamChunk(ctx, resp, mux.streamErrorHandler))
- if err != nil {
- grpclog.Infof("Failed to marshal response chunk: %v", err)
- handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
- return
- }
- if _, err = w.Write(buf); err != nil {
- grpclog.Infof("Failed to send response chunk: %v", err)
- return
- }
- wroteHeader = true
- if _, err = w.Write(delimiter); err != nil {
- grpclog.Infof("Failed to send delimiter chunk: %v", err)
- return
- }
- f.Flush()
- }
-}
-
-func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
- for k, vs := range md.HeaderMD {
- if h, ok := mux.outgoingHeaderMatcher(k); ok {
- for _, v := range vs {
- w.Header().Add(h, v)
- }
- }
- }
-}
-
-func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) {
- for k := range md.TrailerMD {
- tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k))
- w.Header().Add("Trailer", tKey)
- }
-}
-
-func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) {
- for k, vs := range md.TrailerMD {
- tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)
- for _, v := range vs {
- w.Header().Add(tKey, v)
- }
- }
-}
-
-// responseBody interface contains method for getting field for marshaling to the response body
-// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule`
-type responseBody interface {
- XXX_ResponseBody() interface{}
-}
-
-// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
-func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
- md, ok := ServerMetadataFromContext(ctx)
- if !ok {
- grpclog.Infof("Failed to extract ServerMetadata from context")
- }
-
- handleForwardResponseServerMetadata(w, mux, md)
- handleForwardResponseTrailerHeader(w, md)
-
- contentType := marshaler.ContentType()
- // Check marshaler on run time in order to keep backwards compatability
- // An interface param needs to be added to the ContentType() function on
- // the Marshal interface to be able to remove this check
- if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok {
- contentType = httpBodyMarshaler.ContentTypeFromMessage(resp)
- }
- w.Header().Set("Content-Type", contentType)
-
- if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
- HTTPError(ctx, mux, marshaler, w, req, err)
- return
- }
- var buf []byte
- var err error
- if rb, ok := resp.(responseBody); ok {
- buf, err = marshaler.Marshal(rb.XXX_ResponseBody())
- } else {
- buf, err = marshaler.Marshal(resp)
- }
- if err != nil {
- grpclog.Infof("Marshal error: %v", err)
- HTTPError(ctx, mux, marshaler, w, req, err)
- return
- }
-
- if _, err = w.Write(buf); err != nil {
- grpclog.Infof("Failed to write response: %v", err)
- }
-
- handleForwardResponseTrailer(w, md)
-}
-
-func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error {
- if len(opts) == 0 {
- return nil
- }
- for _, opt := range opts {
- if err := opt(ctx, w, resp); err != nil {
- grpclog.Infof("Error handling ForwardResponseOptions: %v", err)
- return err
- }
- }
- return nil
-}
-
-func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) {
- serr := streamError(ctx, mux.streamErrorHandler, err)
- if !wroteHeader {
- w.WriteHeader(int(serr.HttpCode))
- }
- buf, merr := marshaler.Marshal(errorChunk(serr))
- if merr != nil {
- grpclog.Infof("Failed to marshal an error: %v", merr)
- return
- }
- if _, werr := w.Write(buf); werr != nil {
- grpclog.Infof("Failed to notify error to client: %v", werr)
- return
- }
-}
-
-// streamChunk returns a chunk in a response stream for the given result. The
-// given errHandler is used to render an error chunk if result is nil.
-func streamChunk(ctx context.Context, result proto.Message, errHandler StreamErrorHandlerFunc) map[string]proto.Message {
- if result == nil {
- return errorChunk(streamError(ctx, errHandler, errEmptyResponse))
- }
- return map[string]proto.Message{"result": result}
-}
-
-// streamError returns the payload for the final message in a response stream
-// that represents the given err.
-func streamError(ctx context.Context, errHandler StreamErrorHandlerFunc, err error) *StreamError {
- serr := errHandler(ctx, err)
- if serr != nil {
- return serr
- }
- // TODO: log about misbehaving stream error handler?
- return DefaultHTTPStreamErrorHandler(ctx, err)
-}
-
-func errorChunk(err *StreamError) map[string]proto.Message {
- return map[string]proto.Message{"error": (*internal.StreamError)(err)}
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go
deleted file mode 100644
index f55285b5d..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package runtime
-
-import (
- "google.golang.org/genproto/googleapis/api/httpbody"
-)
-
-// SetHTTPBodyMarshaler overwrite the default marshaler with the HTTPBodyMarshaler
-func SetHTTPBodyMarshaler(serveMux *ServeMux) {
- serveMux.marshalers.mimeMap[MIMEWildcard] = &HTTPBodyMarshaler{
- Marshaler: &JSONPb{OrigName: true},
- }
-}
-
-// HTTPBodyMarshaler is a Marshaler which supports marshaling of a
-// google.api.HttpBody message as the full response body if it is
-// the actual message used as the response. If not, then this will
-// simply fallback to the Marshaler specified as its default Marshaler.
-type HTTPBodyMarshaler struct {
- Marshaler
-}
-
-// ContentType implementation to keep backwards compatability with marshal interface
-func (h *HTTPBodyMarshaler) ContentType() string {
- return h.ContentTypeFromMessage(nil)
-}
-
-// ContentTypeFromMessage in case v is a google.api.HttpBody message it returns
-// its specified content type otherwise fall back to the default Marshaler.
-func (h *HTTPBodyMarshaler) ContentTypeFromMessage(v interface{}) string {
- if httpBody, ok := v.(*httpbody.HttpBody); ok {
- return httpBody.GetContentType()
- }
- return h.Marshaler.ContentType()
-}
-
-// Marshal marshals "v" by returning the body bytes if v is a
-// google.api.HttpBody message, otherwise it falls back to the default Marshaler.
-func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) {
- if httpBody, ok := v.(*httpbody.HttpBody); ok {
- return httpBody.Data, nil
- }
- return h.Marshaler.Marshal(v)
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go
deleted file mode 100644
index f9d3a585a..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package runtime
-
-import (
- "encoding/json"
- "io"
-)
-
-// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON
-// with the standard "encoding/json" package of Golang.
-// Although it is generally faster for simple proto messages than JSONPb,
-// it does not support advanced features of protobuf, e.g. map, oneof, ....
-//
-// The NewEncoder and NewDecoder types return *json.Encoder and
-// *json.Decoder respectively.
-type JSONBuiltin struct{}
-
-// ContentType always Returns "application/json".
-func (*JSONBuiltin) ContentType() string {
- return "application/json"
-}
-
-// Marshal marshals "v" into JSON
-func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) {
- return json.Marshal(v)
-}
-
-// Unmarshal unmarshals JSON data into "v".
-func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error {
- return json.Unmarshal(data, v)
-}
-
-// NewDecoder returns a Decoder which reads JSON stream from "r".
-func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder {
- return json.NewDecoder(r)
-}
-
-// NewEncoder returns an Encoder which writes JSON stream into "w".
-func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder {
- return json.NewEncoder(w)
-}
-
-// Delimiter for newline encoded JSON streams.
-func (j *JSONBuiltin) Delimiter() []byte {
- return []byte("\n")
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go
deleted file mode 100644
index 2fbb26287..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go
+++ /dev/null
@@ -1,250 +0,0 @@
-package runtime
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "reflect"
-
- "github.com/golang/protobuf/jsonpb"
- "github.com/golang/protobuf/proto"
-)
-
-// JSONPb is a Marshaler which marshals/unmarshals into/from JSON
-// with the "github.com/golang/protobuf/jsonpb".
-// It supports fully functionality of protobuf unlike JSONBuiltin.
-//
-// The NewDecoder method returns a DecoderWrapper, so the underlying
-// *json.Decoder methods can be used.
-type JSONPb jsonpb.Marshaler
-
-// ContentType always returns "application/json".
-func (*JSONPb) ContentType() string {
- return "application/json"
-}
-
-// Marshal marshals "v" into JSON.
-func (j *JSONPb) Marshal(v interface{}) ([]byte, error) {
- if _, ok := v.(proto.Message); !ok {
- return j.marshalNonProtoField(v)
- }
-
- var buf bytes.Buffer
- if err := j.marshalTo(&buf, v); err != nil {
- return nil, err
- }
- return buf.Bytes(), nil
-}
-
-func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error {
- p, ok := v.(proto.Message)
- if !ok {
- buf, err := j.marshalNonProtoField(v)
- if err != nil {
- return err
- }
- _, err = w.Write(buf)
- return err
- }
- return (*jsonpb.Marshaler)(j).Marshal(w, p)
-}
-
-var (
- // protoMessageType is stored to prevent constant lookup of the same type at runtime.
- protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
-)
-
-// marshalNonProto marshals a non-message field of a protobuf message.
-// This function does not correctly marshals arbitrary data structure into JSON,
-// but it is only capable of marshaling non-message field values of protobuf,
-// i.e. primitive types, enums; pointers to primitives or enums; maps from
-// integer/string types to primitives/enums/pointers to messages.
-func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
- if v == nil {
- return []byte("null"), nil
- }
- rv := reflect.ValueOf(v)
- for rv.Kind() == reflect.Ptr {
- if rv.IsNil() {
- return []byte("null"), nil
- }
- rv = rv.Elem()
- }
-
- if rv.Kind() == reflect.Slice {
- if rv.IsNil() {
- if j.EmitDefaults {
- return []byte("[]"), nil
- }
- return []byte("null"), nil
- }
-
- if rv.Type().Elem().Implements(protoMessageType) {
- var buf bytes.Buffer
- err := buf.WriteByte('[')
- if err != nil {
- return nil, err
- }
- for i := 0; i < rv.Len(); i++ {
- if i != 0 {
- err = buf.WriteByte(',')
- if err != nil {
- return nil, err
- }
- }
- if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
- return nil, err
- }
- }
- err = buf.WriteByte(']')
- if err != nil {
- return nil, err
- }
-
- return buf.Bytes(), nil
- }
- }
-
- if rv.Kind() == reflect.Map {
- m := make(map[string]*json.RawMessage)
- for _, k := range rv.MapKeys() {
- buf, err := j.Marshal(rv.MapIndex(k).Interface())
- if err != nil {
- return nil, err
- }
- m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf)
- }
- if j.Indent != "" {
- return json.MarshalIndent(m, "", j.Indent)
- }
- return json.Marshal(m)
- }
- if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts {
- return json.Marshal(enum.String())
- }
- return json.Marshal(rv.Interface())
-}
-
-// Unmarshal unmarshals JSON "data" into "v"
-func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
- return unmarshalJSONPb(data, v)
-}
-
-// NewDecoder returns a Decoder which reads JSON stream from "r".
-func (j *JSONPb) NewDecoder(r io.Reader) Decoder {
- d := json.NewDecoder(r)
- return DecoderWrapper{Decoder: d}
-}
-
-// DecoderWrapper is a wrapper around a *json.Decoder that adds
-// support for protos to the Decode method.
-type DecoderWrapper struct {
- *json.Decoder
-}
-
-// Decode wraps the embedded decoder's Decode method to support
-// protos using a jsonpb.Unmarshaler.
-func (d DecoderWrapper) Decode(v interface{}) error {
- return decodeJSONPb(d.Decoder, v)
-}
-
-// NewEncoder returns an Encoder which writes JSON stream into "w".
-func (j *JSONPb) NewEncoder(w io.Writer) Encoder {
- return EncoderFunc(func(v interface{}) error {
- if err := j.marshalTo(w, v); err != nil {
- return err
- }
- // mimic json.Encoder by adding a newline (makes output
- // easier to read when it contains multiple encoded items)
- _, err := w.Write(j.Delimiter())
- return err
- })
-}
-
-func unmarshalJSONPb(data []byte, v interface{}) error {
- d := json.NewDecoder(bytes.NewReader(data))
- return decodeJSONPb(d, v)
-}
-
-func decodeJSONPb(d *json.Decoder, v interface{}) error {
- p, ok := v.(proto.Message)
- if !ok {
- return decodeNonProtoField(d, v)
- }
- unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true}
- return unmarshaler.UnmarshalNext(d, p)
-}
-
-func decodeNonProtoField(d *json.Decoder, v interface{}) error {
- rv := reflect.ValueOf(v)
- if rv.Kind() != reflect.Ptr {
- return fmt.Errorf("%T is not a pointer", v)
- }
- for rv.Kind() == reflect.Ptr {
- if rv.IsNil() {
- rv.Set(reflect.New(rv.Type().Elem()))
- }
- if rv.Type().ConvertibleTo(typeProtoMessage) {
- unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true}
- return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message))
- }
- rv = rv.Elem()
- }
- if rv.Kind() == reflect.Map {
- if rv.IsNil() {
- rv.Set(reflect.MakeMap(rv.Type()))
- }
- conv, ok := convFromType[rv.Type().Key().Kind()]
- if !ok {
- return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key())
- }
-
- m := make(map[string]*json.RawMessage)
- if err := d.Decode(&m); err != nil {
- return err
- }
- for k, v := range m {
- result := conv.Call([]reflect.Value{reflect.ValueOf(k)})
- if err := result[1].Interface(); err != nil {
- return err.(error)
- }
- bk := result[0]
- bv := reflect.New(rv.Type().Elem())
- if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil {
- return err
- }
- rv.SetMapIndex(bk, bv.Elem())
- }
- return nil
- }
- if _, ok := rv.Interface().(protoEnum); ok {
- var repr interface{}
- if err := d.Decode(&repr); err != nil {
- return err
- }
- switch repr.(type) {
- case string:
- // TODO(yugui) Should use proto.StructProperties?
- return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface())
- case float64:
- rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type()))
- return nil
- default:
- return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface())
- }
- }
- return d.Decode(v)
-}
-
-type protoEnum interface {
- fmt.Stringer
- EnumDescriptor() ([]byte, []int)
-}
-
-var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
-
-// Delimiter for newline encoded JSON streams.
-func (j *JSONPb) Delimiter() []byte {
- return []byte("\n")
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go
deleted file mode 100644
index f65d1a267..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package runtime
-
-import (
- "io"
-
- "errors"
- "github.com/golang/protobuf/proto"
- "io/ioutil"
-)
-
-// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes
-type ProtoMarshaller struct{}
-
-// ContentType always returns "application/octet-stream".
-func (*ProtoMarshaller) ContentType() string {
- return "application/octet-stream"
-}
-
-// Marshal marshals "value" into Proto
-func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) {
- message, ok := value.(proto.Message)
- if !ok {
- return nil, errors.New("unable to marshal non proto field")
- }
- return proto.Marshal(message)
-}
-
-// Unmarshal unmarshals proto "data" into "value"
-func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error {
- message, ok := value.(proto.Message)
- if !ok {
- return errors.New("unable to unmarshal non proto field")
- }
- return proto.Unmarshal(data, message)
-}
-
-// NewDecoder returns a Decoder which reads proto stream from "reader".
-func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder {
- return DecoderFunc(func(value interface{}) error {
- buffer, err := ioutil.ReadAll(reader)
- if err != nil {
- return err
- }
- return marshaller.Unmarshal(buffer, value)
- })
-}
-
-// NewEncoder returns an Encoder which writes proto stream into "writer".
-func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder {
- return EncoderFunc(func(value interface{}) error {
- buffer, err := marshaller.Marshal(value)
- if err != nil {
- return err
- }
- _, err = writer.Write(buffer)
- if err != nil {
- return err
- }
-
- return nil
- })
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go
deleted file mode 100644
index 98fe6e88a..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package runtime
-
-import (
- "io"
-)
-
-// Marshaler defines a conversion between byte sequence and gRPC payloads / fields.
-type Marshaler interface {
- // Marshal marshals "v" into byte sequence.
- Marshal(v interface{}) ([]byte, error)
- // Unmarshal unmarshals "data" into "v".
- // "v" must be a pointer value.
- Unmarshal(data []byte, v interface{}) error
- // NewDecoder returns a Decoder which reads byte sequence from "r".
- NewDecoder(r io.Reader) Decoder
- // NewEncoder returns an Encoder which writes bytes sequence into "w".
- NewEncoder(w io.Writer) Encoder
- // ContentType returns the Content-Type which this marshaler is responsible for.
- ContentType() string
-}
-
-// Decoder decodes a byte sequence
-type Decoder interface {
- Decode(v interface{}) error
-}
-
-// Encoder encodes gRPC payloads / fields into byte sequence.
-type Encoder interface {
- Encode(v interface{}) error
-}
-
-// DecoderFunc adapts an decoder function into Decoder.
-type DecoderFunc func(v interface{}) error
-
-// Decode delegates invocations to the underlying function itself.
-func (f DecoderFunc) Decode(v interface{}) error { return f(v) }
-
-// EncoderFunc adapts an encoder function into Encoder
-type EncoderFunc func(v interface{}) error
-
-// Encode delegates invocations to the underlying function itself.
-func (f EncoderFunc) Encode(v interface{}) error { return f(v) }
-
-// Delimited defines the streaming delimiter.
-type Delimited interface {
- // Delimiter returns the record seperator for the stream.
- Delimiter() []byte
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go
deleted file mode 100644
index 5cc53ae4f..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package runtime
-
-import (
- "errors"
- "net/http"
-)
-
-// MIMEWildcard is the fallback MIME type used for requests which do not match
-// a registered MIME type.
-const MIMEWildcard = "*"
-
-var (
- acceptHeader = http.CanonicalHeaderKey("Accept")
- contentTypeHeader = http.CanonicalHeaderKey("Content-Type")
-
- defaultMarshaler = &JSONPb{OrigName: true}
-)
-
-// MarshalerForRequest returns the inbound/outbound marshalers for this request.
-// It checks the registry on the ServeMux for the MIME type set by the Content-Type header.
-// If it isn't set (or the request Content-Type is empty), checks for "*".
-// If there are multiple Content-Type headers set, choose the first one that it can
-// exactly match in the registry.
-// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler.
-func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) {
- for _, acceptVal := range r.Header[acceptHeader] {
- if m, ok := mux.marshalers.mimeMap[acceptVal]; ok {
- outbound = m
- break
- }
- }
-
- for _, contentTypeVal := range r.Header[contentTypeHeader] {
- if m, ok := mux.marshalers.mimeMap[contentTypeVal]; ok {
- inbound = m
- break
- }
- }
-
- if inbound == nil {
- inbound = mux.marshalers.mimeMap[MIMEWildcard]
- }
- if outbound == nil {
- outbound = inbound
- }
-
- return inbound, outbound
-}
-
-// marshalerRegistry is a mapping from MIME types to Marshalers.
-type marshalerRegistry struct {
- mimeMap map[string]Marshaler
-}
-
-// add adds a marshaler for a case-sensitive MIME type string ("*" to match any
-// MIME type).
-func (m marshalerRegistry) add(mime string, marshaler Marshaler) error {
- if len(mime) == 0 {
- return errors.New("empty MIME type")
- }
-
- m.mimeMap[mime] = marshaler
-
- return nil
-}
-
-// makeMarshalerMIMERegistry returns a new registry of marshalers.
-// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces.
-//
-// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler
-// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler
-// with a "application/json" Content-Type.
-// "*" can be used to match any Content-Type.
-// This can be attached to a ServerMux with the marshaler option.
-func makeMarshalerMIMERegistry() marshalerRegistry {
- return marshalerRegistry{
- mimeMap: map[string]Marshaler{
- MIMEWildcard: defaultMarshaler,
- },
- }
-}
-
-// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound
-// Marshalers to a MIME type in mux.
-func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption {
- return func(mux *ServeMux) {
- if err := mux.marshalers.add(mime, marshaler); err != nil {
- panic(err)
- }
- }
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go
deleted file mode 100644
index 093373a20..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go
+++ /dev/null
@@ -1,289 +0,0 @@
-package runtime
-
-import (
- "context"
- "fmt"
- "net/http"
- "net/textproto"
- "strings"
-
- "github.com/golang/protobuf/proto"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/status"
-)
-
-// A HandlerFunc handles a specific pair of path pattern and HTTP method.
-type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
-
-// ErrUnknownURI is the error supplied to a custom ProtoErrorHandlerFunc when
-// a request is received with a URI path that does not match any registered
-// service method.
-//
-// Since gRPC servers return an "Unimplemented" code for requests with an
-// unrecognized URI path, this error also has a gRPC "Unimplemented" code.
-var ErrUnknownURI = status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented))
-
-// ServeMux is a request multiplexer for grpc-gateway.
-// It matches http requests to patterns and invokes the corresponding handler.
-type ServeMux struct {
- // handlers maps HTTP method to a list of handlers.
- handlers map[string][]handler
- forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error
- marshalers marshalerRegistry
- incomingHeaderMatcher HeaderMatcherFunc
- outgoingHeaderMatcher HeaderMatcherFunc
- metadataAnnotators []func(context.Context, *http.Request) metadata.MD
- streamErrorHandler StreamErrorHandlerFunc
- protoErrorHandler ProtoErrorHandlerFunc
- disablePathLengthFallback bool
-}
-
-// ServeMuxOption is an option that can be given to a ServeMux on construction.
-type ServeMuxOption func(*ServeMux)
-
-// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption.
-//
-// forwardResponseOption is an option that will be called on the relevant context.Context,
-// http.ResponseWriter, and proto.Message before every forwarded response.
-//
-// The message may be nil in the case where just a header is being sent.
-func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption {
- return func(serveMux *ServeMux) {
- serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption)
- }
-}
-
-// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context.
-type HeaderMatcherFunc func(string) (string, bool)
-
-// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header
-// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with
-// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'.
-func DefaultHeaderMatcher(key string) (string, bool) {
- key = textproto.CanonicalMIMEHeaderKey(key)
- if isPermanentHTTPHeader(key) {
- return MetadataPrefix + key, true
- } else if strings.HasPrefix(key, MetadataHeaderPrefix) {
- return key[len(MetadataHeaderPrefix):], true
- }
- return "", false
-}
-
-// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway.
-//
-// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
-// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header.
-func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
- return func(mux *ServeMux) {
- mux.incomingHeaderMatcher = fn
- }
-}
-
-// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
-//
-// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
-// passed to http response returned from gateway. To transform the header before passing to response,
-// matcher should return modified header.
-func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
- return func(mux *ServeMux) {
- mux.outgoingHeaderMatcher = fn
- }
-}
-
-// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context.
-//
-// This can be used by services that need to read from http.Request and modify gRPC context. A common use case
-// is reading token from cookie and adding it in gRPC context.
-func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption {
- return func(serveMux *ServeMux) {
- serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator)
- }
-}
-
-// WithProtoErrorHandler returns a ServeMuxOption for passing metadata to a gRPC context.
-//
-// This can be used to handle an error as general proto message defined by gRPC.
-// The response including body and status is not backward compatible with the default error handler.
-// When this option is used, HTTPError and OtherErrorHandler are overwritten on initialization.
-func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption {
- return func(serveMux *ServeMux) {
- serveMux.protoErrorHandler = fn
- }
-}
-
-// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback.
-func WithDisablePathLengthFallback() ServeMuxOption {
- return func(serveMux *ServeMux) {
- serveMux.disablePathLengthFallback = true
- }
-}
-
-// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream
-// error handler, which allows for customizing the error trailer for server-streaming
-// calls.
-//
-// For stream errors that occur before any response has been written, the mux's
-// ProtoErrorHandler will be invoked. However, once data has been written, the errors must
-// be handled differently: they must be included in the response body. The response body's
-// final message will include the error details returned by the stream error handler.
-func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption {
- return func(serveMux *ServeMux) {
- serveMux.streamErrorHandler = fn
- }
-}
-
-// NewServeMux returns a new ServeMux whose internal mapping is empty.
-func NewServeMux(opts ...ServeMuxOption) *ServeMux {
- serveMux := &ServeMux{
- handlers: make(map[string][]handler),
- forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0),
- marshalers: makeMarshalerMIMERegistry(),
- streamErrorHandler: DefaultHTTPStreamErrorHandler,
- }
-
- for _, opt := range opts {
- opt(serveMux)
- }
-
- if serveMux.protoErrorHandler != nil {
- HTTPError = serveMux.protoErrorHandler
- // OtherErrorHandler is no longer used when protoErrorHandler is set.
- // Overwritten by a special error handler to return Unknown.
- OtherErrorHandler = func(w http.ResponseWriter, r *http.Request, _ string, _ int) {
- ctx := context.Background()
- _, outboundMarshaler := MarshalerForRequest(serveMux, r)
- sterr := status.Error(codes.Unknown, "unexpected use of OtherErrorHandler")
- serveMux.protoErrorHandler(ctx, serveMux, outboundMarshaler, w, r, sterr)
- }
- }
-
- if serveMux.incomingHeaderMatcher == nil {
- serveMux.incomingHeaderMatcher = DefaultHeaderMatcher
- }
-
- if serveMux.outgoingHeaderMatcher == nil {
- serveMux.outgoingHeaderMatcher = func(key string) (string, bool) {
- return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
- }
- }
-
- return serveMux
-}
-
-// Handle associates "h" to the pair of HTTP method and path pattern.
-func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) {
- s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h})
-}
-
-// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path.
-func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- ctx := r.Context()
-
- path := r.URL.Path
- if !strings.HasPrefix(path, "/") {
- if s.protoErrorHandler != nil {
- _, outboundMarshaler := MarshalerForRequest(s, r)
- sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest))
- s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
- } else {
- OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
- }
- return
- }
-
- components := strings.Split(path[1:], "/")
- l := len(components)
- var verb string
- if idx := strings.LastIndex(components[l-1], ":"); idx == 0 {
- if s.protoErrorHandler != nil {
- _, outboundMarshaler := MarshalerForRequest(s, r)
- s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI)
- } else {
- OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
- }
- return
- } else if idx > 0 {
- c := components[l-1]
- components[l-1], verb = c[:idx], c[idx+1:]
- }
-
- if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
- r.Method = strings.ToUpper(override)
- if err := r.ParseForm(); err != nil {
- if s.protoErrorHandler != nil {
- _, outboundMarshaler := MarshalerForRequest(s, r)
- sterr := status.Error(codes.InvalidArgument, err.Error())
- s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
- } else {
- OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
- }
- return
- }
- }
- for _, h := range s.handlers[r.Method] {
- pathParams, err := h.pat.Match(components, verb)
- if err != nil {
- continue
- }
- h.h(w, r, pathParams)
- return
- }
-
- // lookup other methods to handle fallback from GET to POST and
- // to determine if it is MethodNotAllowed or NotFound.
- for m, handlers := range s.handlers {
- if m == r.Method {
- continue
- }
- for _, h := range handlers {
- pathParams, err := h.pat.Match(components, verb)
- if err != nil {
- continue
- }
- // X-HTTP-Method-Override is optional. Always allow fallback to POST.
- if s.isPathLengthFallback(r) {
- if err := r.ParseForm(); err != nil {
- if s.protoErrorHandler != nil {
- _, outboundMarshaler := MarshalerForRequest(s, r)
- sterr := status.Error(codes.InvalidArgument, err.Error())
- s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
- } else {
- OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
- }
- return
- }
- h.h(w, r, pathParams)
- return
- }
- if s.protoErrorHandler != nil {
- _, outboundMarshaler := MarshalerForRequest(s, r)
- s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI)
- } else {
- OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
- }
- return
- }
- }
-
- if s.protoErrorHandler != nil {
- _, outboundMarshaler := MarshalerForRequest(s, r)
- s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI)
- } else {
- OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
- }
-}
-
-// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux.
-func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error {
- return s.forwardResponseOptions
-}
-
-func (s *ServeMux) isPathLengthFallback(r *http.Request) bool {
- return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded"
-}
-
-type handler struct {
- pat Pattern
- h HandlerFunc
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go
deleted file mode 100644
index f16a84ad3..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go
+++ /dev/null
@@ -1,227 +0,0 @@
-package runtime
-
-import (
- "errors"
- "fmt"
- "strings"
-
- "github.com/grpc-ecosystem/grpc-gateway/utilities"
- "google.golang.org/grpc/grpclog"
-)
-
-var (
- // ErrNotMatch indicates that the given HTTP request path does not match to the pattern.
- ErrNotMatch = errors.New("not match to the path pattern")
- // ErrInvalidPattern indicates that the given definition of Pattern is not valid.
- ErrInvalidPattern = errors.New("invalid pattern")
-)
-
-type op struct {
- code utilities.OpCode
- operand int
-}
-
-// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto.
-type Pattern struct {
- // ops is a list of operations
- ops []op
- // pool is a constant pool indexed by the operands or vars.
- pool []string
- // vars is a list of variables names to be bound by this pattern
- vars []string
- // stacksize is the max depth of the stack
- stacksize int
- // tailLen is the length of the fixed-size segments after a deep wildcard
- tailLen int
- // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part.
- verb string
-}
-
-// NewPattern returns a new Pattern from the given definition values.
-// "ops" is a sequence of op codes. "pool" is a constant pool.
-// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part.
-// "version" must be 1 for now.
-// It returns an error if the given definition is invalid.
-func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) {
- if version != 1 {
- grpclog.Infof("unsupported version: %d", version)
- return Pattern{}, ErrInvalidPattern
- }
-
- l := len(ops)
- if l%2 != 0 {
- grpclog.Infof("odd number of ops codes: %d", l)
- return Pattern{}, ErrInvalidPattern
- }
-
- var (
- typedOps []op
- stack, maxstack int
- tailLen int
- pushMSeen bool
- vars []string
- )
- for i := 0; i < l; i += 2 {
- op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]}
- switch op.code {
- case utilities.OpNop:
- continue
- case utilities.OpPush:
- if pushMSeen {
- tailLen++
- }
- stack++
- case utilities.OpPushM:
- if pushMSeen {
- grpclog.Infof("pushM appears twice")
- return Pattern{}, ErrInvalidPattern
- }
- pushMSeen = true
- stack++
- case utilities.OpLitPush:
- if op.operand < 0 || len(pool) <= op.operand {
- grpclog.Infof("negative literal index: %d", op.operand)
- return Pattern{}, ErrInvalidPattern
- }
- if pushMSeen {
- tailLen++
- }
- stack++
- case utilities.OpConcatN:
- if op.operand <= 0 {
- grpclog.Infof("negative concat size: %d", op.operand)
- return Pattern{}, ErrInvalidPattern
- }
- stack -= op.operand
- if stack < 0 {
- grpclog.Print("stack underflow")
- return Pattern{}, ErrInvalidPattern
- }
- stack++
- case utilities.OpCapture:
- if op.operand < 0 || len(pool) <= op.operand {
- grpclog.Infof("variable name index out of bound: %d", op.operand)
- return Pattern{}, ErrInvalidPattern
- }
- v := pool[op.operand]
- op.operand = len(vars)
- vars = append(vars, v)
- stack--
- if stack < 0 {
- grpclog.Infof("stack underflow")
- return Pattern{}, ErrInvalidPattern
- }
- default:
- grpclog.Infof("invalid opcode: %d", op.code)
- return Pattern{}, ErrInvalidPattern
- }
-
- if maxstack < stack {
- maxstack = stack
- }
- typedOps = append(typedOps, op)
- }
- return Pattern{
- ops: typedOps,
- pool: pool,
- vars: vars,
- stacksize: maxstack,
- tailLen: tailLen,
- verb: verb,
- }, nil
-}
-
-// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization.
-func MustPattern(p Pattern, err error) Pattern {
- if err != nil {
- grpclog.Fatalf("Pattern initialization failed: %v", err)
- }
- return p
-}
-
-// Match examines components if it matches to the Pattern.
-// If it matches, the function returns a mapping from field paths to their captured values.
-// If otherwise, the function returns an error.
-func (p Pattern) Match(components []string, verb string) (map[string]string, error) {
- if p.verb != verb {
- return nil, ErrNotMatch
- }
-
- var pos int
- stack := make([]string, 0, p.stacksize)
- captured := make([]string, len(p.vars))
- l := len(components)
- for _, op := range p.ops {
- switch op.code {
- case utilities.OpNop:
- continue
- case utilities.OpPush, utilities.OpLitPush:
- if pos >= l {
- return nil, ErrNotMatch
- }
- c := components[pos]
- if op.code == utilities.OpLitPush {
- if lit := p.pool[op.operand]; c != lit {
- return nil, ErrNotMatch
- }
- }
- stack = append(stack, c)
- pos++
- case utilities.OpPushM:
- end := len(components)
- if end < pos+p.tailLen {
- return nil, ErrNotMatch
- }
- end -= p.tailLen
- stack = append(stack, strings.Join(components[pos:end], "/"))
- pos = end
- case utilities.OpConcatN:
- n := op.operand
- l := len(stack) - n
- stack = append(stack[:l], strings.Join(stack[l:], "/"))
- case utilities.OpCapture:
- n := len(stack) - 1
- captured[op.operand] = stack[n]
- stack = stack[:n]
- }
- }
- if pos < l {
- return nil, ErrNotMatch
- }
- bindings := make(map[string]string)
- for i, val := range captured {
- bindings[p.vars[i]] = val
- }
- return bindings, nil
-}
-
-// Verb returns the verb part of the Pattern.
-func (p Pattern) Verb() string { return p.verb }
-
-func (p Pattern) String() string {
- var stack []string
- for _, op := range p.ops {
- switch op.code {
- case utilities.OpNop:
- continue
- case utilities.OpPush:
- stack = append(stack, "*")
- case utilities.OpLitPush:
- stack = append(stack, p.pool[op.operand])
- case utilities.OpPushM:
- stack = append(stack, "**")
- case utilities.OpConcatN:
- n := op.operand
- l := len(stack) - n
- stack = append(stack[:l], strings.Join(stack[l:], "/"))
- case utilities.OpCapture:
- n := len(stack) - 1
- stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n])
- }
- }
- segs := strings.Join(stack, "/")
- if p.verb != "" {
- return fmt.Sprintf("/%s:%s", segs, p.verb)
- }
- return "/" + segs
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go
deleted file mode 100644
index a3151e2a5..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package runtime
-
-import (
- "github.com/golang/protobuf/proto"
-)
-
-// StringP returns a pointer to a string whose pointee is same as the given string value.
-func StringP(val string) (*string, error) {
- return proto.String(val), nil
-}
-
-// BoolP parses the given string representation of a boolean value,
-// and returns a pointer to a bool whose value is same as the parsed value.
-func BoolP(val string) (*bool, error) {
- b, err := Bool(val)
- if err != nil {
- return nil, err
- }
- return proto.Bool(b), nil
-}
-
-// Float64P parses the given string representation of a floating point number,
-// and returns a pointer to a float64 whose value is same as the parsed number.
-func Float64P(val string) (*float64, error) {
- f, err := Float64(val)
- if err != nil {
- return nil, err
- }
- return proto.Float64(f), nil
-}
-
-// Float32P parses the given string representation of a floating point number,
-// and returns a pointer to a float32 whose value is same as the parsed number.
-func Float32P(val string) (*float32, error) {
- f, err := Float32(val)
- if err != nil {
- return nil, err
- }
- return proto.Float32(f), nil
-}
-
-// Int64P parses the given string representation of an integer
-// and returns a pointer to a int64 whose value is same as the parsed integer.
-func Int64P(val string) (*int64, error) {
- i, err := Int64(val)
- if err != nil {
- return nil, err
- }
- return proto.Int64(i), nil
-}
-
-// Int32P parses the given string representation of an integer
-// and returns a pointer to a int32 whose value is same as the parsed integer.
-func Int32P(val string) (*int32, error) {
- i, err := Int32(val)
- if err != nil {
- return nil, err
- }
- return proto.Int32(i), err
-}
-
-// Uint64P parses the given string representation of an integer
-// and returns a pointer to a uint64 whose value is same as the parsed integer.
-func Uint64P(val string) (*uint64, error) {
- i, err := Uint64(val)
- if err != nil {
- return nil, err
- }
- return proto.Uint64(i), err
-}
-
-// Uint32P parses the given string representation of an integer
-// and returns a pointer to a uint32 whose value is same as the parsed integer.
-func Uint32P(val string) (*uint32, error) {
- i, err := Uint32(val)
- if err != nil {
- return nil, err
- }
- return proto.Uint32(i), err
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go
deleted file mode 100644
index ca76324ef..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go
+++ /dev/null
@@ -1,106 +0,0 @@
-package runtime
-
-import (
- "context"
- "io"
- "net/http"
-
- "github.com/golang/protobuf/ptypes/any"
- "github.com/grpc-ecosystem/grpc-gateway/internal"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/status"
-)
-
-// StreamErrorHandlerFunc accepts an error as a gRPC error generated via status package and translates it into a
-// a proto struct used to represent error at the end of a stream.
-type StreamErrorHandlerFunc func(context.Context, error) *StreamError
-
-// StreamError is the payload for the final message in a server stream in the event that the server returns an
-// error after a response message has already been sent.
-type StreamError internal.StreamError
-
-// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request.
-type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error)
-
-var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler
-
-// DefaultHTTPProtoErrorHandler is an implementation of HTTPError.
-// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
-// If otherwise, it replies with http.StatusInternalServerError.
-//
-// The response body returned by this function is a Status message marshaled by a Marshaler.
-//
-// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead.
-func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) {
- // return Internal when Marshal failed
- const fallback = `{"code": 13, "message": "failed to marshal error message"}`
-
- s, ok := status.FromError(err)
- if !ok {
- s = status.New(codes.Unknown, err.Error())
- }
-
- w.Header().Del("Trailer")
-
- contentType := marshaler.ContentType()
- // Check marshaler on run time in order to keep backwards compatability
- // An interface param needs to be added to the ContentType() function on
- // the Marshal interface to be able to remove this check
- if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok {
- pb := s.Proto()
- contentType = httpBodyMarshaler.ContentTypeFromMessage(pb)
- }
- w.Header().Set("Content-Type", contentType)
-
- buf, merr := marshaler.Marshal(s.Proto())
- if merr != nil {
- grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr)
- w.WriteHeader(http.StatusInternalServerError)
- if _, err := io.WriteString(w, fallback); err != nil {
- grpclog.Infof("Failed to write response: %v", err)
- }
- return
- }
-
- md, ok := ServerMetadataFromContext(ctx)
- if !ok {
- grpclog.Infof("Failed to extract ServerMetadata from context")
- }
-
- handleForwardResponseServerMetadata(w, mux, md)
- handleForwardResponseTrailerHeader(w, md)
- st := HTTPStatusFromCode(s.Code())
- w.WriteHeader(st)
- if _, err := w.Write(buf); err != nil {
- grpclog.Infof("Failed to write response: %v", err)
- }
-
- handleForwardResponseTrailer(w, md)
-}
-
-// DefaultHTTPStreamErrorHandler converts the given err into a *StreamError via
-// default logic.
-//
-// It extracts the gRPC status from err if possible. The fields of the status are
-// used to populate the returned StreamError, and the HTTP status code is derived
-// from the gRPC code via HTTPStatusFromCode. If the given err does not contain a
-// gRPC status, an "Unknown" gRPC code is used and "Internal Server Error" HTTP code.
-func DefaultHTTPStreamErrorHandler(_ context.Context, err error) *StreamError {
- grpcCode := codes.Unknown
- grpcMessage := err.Error()
- var grpcDetails []*any.Any
- if s, ok := status.FromError(err); ok {
- grpcCode = s.Code()
- grpcMessage = s.Message()
- grpcDetails = s.Proto().GetDetails()
- }
- httpCode := HTTPStatusFromCode(grpcCode)
- return &StreamError{
- GrpcCode: int32(grpcCode),
- HttpCode: int32(httpCode),
- Message: grpcMessage,
- HttpStatus: http.StatusText(httpCode),
- Details: grpcDetails,
- }
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go
deleted file mode 100644
index bb9359f17..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go
+++ /dev/null
@@ -1,392 +0,0 @@
-package runtime
-
-import (
- "encoding/base64"
- "fmt"
- "net/url"
- "reflect"
- "regexp"
- "strconv"
- "strings"
- "time"
-
- "github.com/golang/protobuf/proto"
- "github.com/grpc-ecosystem/grpc-gateway/utilities"
- "google.golang.org/grpc/grpclog"
-)
-
-// PopulateQueryParameters populates "values" into "msg".
-// A value is ignored if its key starts with one of the elements in "filter".
-func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
- for key, values := range values {
- re, err := regexp.Compile("^(.*)\\[(.*)\\]$")
- if err != nil {
- return err
- }
- match := re.FindStringSubmatch(key)
- if len(match) == 3 {
- key = match[1]
- values = append([]string{match[2]}, values...)
- }
- fieldPath := strings.Split(key, ".")
- if filter.HasCommonPrefix(fieldPath) {
- continue
- }
- if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil {
- return err
- }
- }
- return nil
-}
-
-// PopulateFieldFromPath sets a value in a nested Protobuf structure.
-// It instantiates missing protobuf fields as it goes.
-func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
- fieldPath := strings.Split(fieldPathString, ".")
- return populateFieldValueFromPath(msg, fieldPath, []string{value})
-}
-
-func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error {
- m := reflect.ValueOf(msg)
- if m.Kind() != reflect.Ptr {
- return fmt.Errorf("unexpected type %T: %v", msg, msg)
- }
- var props *proto.Properties
- m = m.Elem()
- for i, fieldName := range fieldPath {
- isLast := i == len(fieldPath)-1
- if !isLast && m.Kind() != reflect.Struct {
- return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, "."))
- }
- var f reflect.Value
- var err error
- f, props, err = fieldByProtoName(m, fieldName)
- if err != nil {
- return err
- } else if !f.IsValid() {
- grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, "."))
- return nil
- }
-
- switch f.Kind() {
- case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64:
- if !isLast {
- return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
- }
- m = f
- case reflect.Slice:
- if !isLast {
- return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, "."))
- }
- // Handle []byte
- if f.Type().Elem().Kind() == reflect.Uint8 {
- m = f
- break
- }
- return populateRepeatedField(f, values, props)
- case reflect.Ptr:
- if f.IsNil() {
- m = reflect.New(f.Type().Elem())
- f.Set(m.Convert(f.Type()))
- }
- m = f.Elem()
- continue
- case reflect.Struct:
- m = f
- continue
- case reflect.Map:
- if !isLast {
- return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
- }
- return populateMapField(f, values, props)
- default:
- return fmt.Errorf("unexpected type %s in %T", f.Type(), msg)
- }
- }
- switch len(values) {
- case 0:
- return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, "."))
- case 1:
- default:
- grpclog.Infof("too many field values: %s", strings.Join(fieldPath, "."))
- }
- return populateField(m, values[0], props)
-}
-
-// fieldByProtoName looks up a field whose corresponding protobuf field name is "name".
-// "m" must be a struct value. It returns zero reflect.Value if no such field found.
-func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) {
- props := proto.GetProperties(m.Type())
-
- // look up field name in oneof map
- if op, ok := props.OneofTypes[name]; ok {
- v := reflect.New(op.Type.Elem())
- field := m.Field(op.Field)
- if !field.IsNil() {
- return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName)
- }
- field.Set(v)
- return v.Elem().Field(0), op.Prop, nil
- }
-
- for _, p := range props.Prop {
- if p.OrigName == name {
- return m.FieldByName(p.Name), p, nil
- }
- if p.JSONName == name {
- return m.FieldByName(p.Name), p, nil
- }
- }
- return reflect.Value{}, nil, nil
-}
-
-func populateMapField(f reflect.Value, values []string, props *proto.Properties) error {
- if len(values) != 2 {
- return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name)
- }
-
- key, value := values[0], values[1]
- keyType := f.Type().Key()
- valueType := f.Type().Elem()
- if f.IsNil() {
- f.Set(reflect.MakeMap(f.Type()))
- }
-
- keyConv, ok := convFromType[keyType.Kind()]
- if !ok {
- return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name)
- }
- valueConv, ok := convFromType[valueType.Kind()]
- if !ok {
- return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name)
- }
-
- keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)})
- if err := keyV[1].Interface(); err != nil {
- return err.(error)
- }
- valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)})
- if err := valueV[1].Interface(); err != nil {
- return err.(error)
- }
-
- f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType))
-
- return nil
-}
-
-func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error {
- elemType := f.Type().Elem()
-
- // is the destination field a slice of an enumeration type?
- if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
- return populateFieldEnumRepeated(f, values, enumValMap)
- }
-
- conv, ok := convFromType[elemType.Kind()]
- if !ok {
- return fmt.Errorf("unsupported field type %s", elemType)
- }
- f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
- for i, v := range values {
- result := conv.Call([]reflect.Value{reflect.ValueOf(v)})
- if err := result[1].Interface(); err != nil {
- return err.(error)
- }
- f.Index(i).Set(result[0].Convert(f.Index(i).Type()))
- }
- return nil
-}
-
-func populateField(f reflect.Value, value string, props *proto.Properties) error {
- i := f.Addr().Interface()
-
- // Handle protobuf well known types
- type wkt interface {
- XXX_WellKnownType() string
- }
- if wkt, ok := i.(wkt); ok {
- switch wkt.XXX_WellKnownType() {
- case "Timestamp":
- if value == "null" {
- f.Field(0).SetInt(0)
- f.Field(1).SetInt(0)
- return nil
- }
-
- t, err := time.Parse(time.RFC3339Nano, value)
- if err != nil {
- return fmt.Errorf("bad Timestamp: %v", err)
- }
- f.Field(0).SetInt(int64(t.Unix()))
- f.Field(1).SetInt(int64(t.Nanosecond()))
- return nil
- case "Duration":
- if value == "null" {
- f.Field(0).SetInt(0)
- f.Field(1).SetInt(0)
- return nil
- }
- d, err := time.ParseDuration(value)
- if err != nil {
- return fmt.Errorf("bad Duration: %v", err)
- }
-
- ns := d.Nanoseconds()
- s := ns / 1e9
- ns %= 1e9
- f.Field(0).SetInt(s)
- f.Field(1).SetInt(ns)
- return nil
- case "DoubleValue":
- fallthrough
- case "FloatValue":
- float64Val, err := strconv.ParseFloat(value, 64)
- if err != nil {
- return fmt.Errorf("bad DoubleValue: %s", value)
- }
- f.Field(0).SetFloat(float64Val)
- return nil
- case "Int64Value":
- fallthrough
- case "Int32Value":
- int64Val, err := strconv.ParseInt(value, 10, 64)
- if err != nil {
- return fmt.Errorf("bad DoubleValue: %s", value)
- }
- f.Field(0).SetInt(int64Val)
- return nil
- case "UInt64Value":
- fallthrough
- case "UInt32Value":
- uint64Val, err := strconv.ParseUint(value, 10, 64)
- if err != nil {
- return fmt.Errorf("bad DoubleValue: %s", value)
- }
- f.Field(0).SetUint(uint64Val)
- return nil
- case "BoolValue":
- if value == "true" {
- f.Field(0).SetBool(true)
- } else if value == "false" {
- f.Field(0).SetBool(false)
- } else {
- return fmt.Errorf("bad BoolValue: %s", value)
- }
- return nil
- case "StringValue":
- f.Field(0).SetString(value)
- return nil
- case "BytesValue":
- bytesVal, err := base64.StdEncoding.DecodeString(value)
- if err != nil {
- return fmt.Errorf("bad BytesValue: %s", value)
- }
- f.Field(0).SetBytes(bytesVal)
- return nil
- }
- }
-
- // Handle google well known types
- if gwkt, ok := i.(proto.Message); ok {
- switch proto.MessageName(gwkt) {
- case "google.protobuf.FieldMask":
- p := f.Field(0)
- for _, v := range strings.Split(value, ",") {
- if v != "" {
- p.Set(reflect.Append(p, reflect.ValueOf(v)))
- }
- }
- return nil
- }
- }
-
- // Handle Time and Duration stdlib types
- switch t := i.(type) {
- case *time.Time:
- pt, err := time.Parse(time.RFC3339Nano, value)
- if err != nil {
- return fmt.Errorf("bad Timestamp: %v", err)
- }
- *t = pt
- return nil
- case *time.Duration:
- d, err := time.ParseDuration(value)
- if err != nil {
- return fmt.Errorf("bad Duration: %v", err)
- }
- *t = d
- return nil
- }
-
- // is the destination field an enumeration type?
- if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
- return populateFieldEnum(f, value, enumValMap)
- }
-
- conv, ok := convFromType[f.Kind()]
- if !ok {
- return fmt.Errorf("field type %T is not supported in query parameters", i)
- }
- result := conv.Call([]reflect.Value{reflect.ValueOf(value)})
- if err := result[1].Interface(); err != nil {
- return err.(error)
- }
- f.Set(result[0].Convert(f.Type()))
- return nil
-}
-
-func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) {
- // see if it's an enumeration string
- if enumVal, ok := enumValMap[value]; ok {
- return reflect.ValueOf(enumVal).Convert(t), nil
- }
-
- // check for an integer that matches an enumeration value
- eVal, err := strconv.Atoi(value)
- if err != nil {
- return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
- }
- for _, v := range enumValMap {
- if v == int32(eVal) {
- return reflect.ValueOf(eVal).Convert(t), nil
- }
- }
- return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
-}
-
-func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error {
- cval, err := convertEnum(value, f.Type(), enumValMap)
- if err != nil {
- return err
- }
- f.Set(cval)
- return nil
-}
-
-func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error {
- elemType := f.Type().Elem()
- f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
- for i, v := range values {
- result, err := convertEnum(v, elemType, enumValMap)
- if err != nil {
- return err
- }
- f.Index(i).Set(result)
- }
- return nil
-}
-
-var (
- convFromType = map[reflect.Kind]reflect.Value{
- reflect.String: reflect.ValueOf(String),
- reflect.Bool: reflect.ValueOf(Bool),
- reflect.Float64: reflect.ValueOf(Float64),
- reflect.Float32: reflect.ValueOf(Float32),
- reflect.Int64: reflect.ValueOf(Int64),
- reflect.Int32: reflect.ValueOf(Int32),
- reflect.Uint64: reflect.ValueOf(Uint64),
- reflect.Uint32: reflect.ValueOf(Uint32),
- reflect.Slice: reflect.ValueOf(Bytes),
- }
-)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel
deleted file mode 100644
index 7109d7932..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel
+++ /dev/null
@@ -1,21 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-package(default_visibility = ["//visibility:public"])
-
-go_library(
- name = "go_default_library",
- srcs = [
- "doc.go",
- "pattern.go",
- "readerfactory.go",
- "trie.go",
- ],
- importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities",
-)
-
-go_test(
- name = "go_default_test",
- size = "small",
- srcs = ["trie_test.go"],
- embed = [":go_default_library"],
-)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go
deleted file mode 100644
index cf79a4d58..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go
+++ /dev/null
@@ -1,2 +0,0 @@
-// Package utilities provides members for internal use in grpc-gateway.
-package utilities
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go
deleted file mode 100644
index dfe7de486..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package utilities
-
-// An OpCode is a opcode of compiled path patterns.
-type OpCode int
-
-// These constants are the valid values of OpCode.
-const (
- // OpNop does nothing
- OpNop = OpCode(iota)
- // OpPush pushes a component to stack
- OpPush
- // OpLitPush pushes a component to stack if it matches to the literal
- OpLitPush
- // OpPushM concatenates the remaining components and pushes it to stack
- OpPushM
- // OpConcatN pops N items from stack, concatenates them and pushes it back to stack
- OpConcatN
- // OpCapture pops an item and binds it to the variable
- OpCapture
- // OpEnd is the least positive invalid opcode.
- OpEnd
-)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go
deleted file mode 100644
index 6dd385466..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package utilities
-
-import (
- "bytes"
- "io"
- "io/ioutil"
-)
-
-// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins
-// at the start of the stream
-func IOReaderFactory(r io.Reader) (func() io.Reader, error) {
- b, err := ioutil.ReadAll(r)
- if err != nil {
- return nil, err
- }
-
- return func() io.Reader {
- return bytes.NewReader(b)
- }, nil
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go
deleted file mode 100644
index c2b7b30dd..000000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package utilities
-
-import (
- "sort"
-)
-
-// DoubleArray is a Double Array implementation of trie on sequences of strings.
-type DoubleArray struct {
- // Encoding keeps an encoding from string to int
- Encoding map[string]int
- // Base is the base array of Double Array
- Base []int
- // Check is the check array of Double Array
- Check []int
-}
-
-// NewDoubleArray builds a DoubleArray from a set of sequences of strings.
-func NewDoubleArray(seqs [][]string) *DoubleArray {
- da := &DoubleArray{Encoding: make(map[string]int)}
- if len(seqs) == 0 {
- return da
- }
-
- encoded := registerTokens(da, seqs)
- sort.Sort(byLex(encoded))
-
- root := node{row: -1, col: -1, left: 0, right: len(encoded)}
- addSeqs(da, encoded, 0, root)
-
- for i := len(da.Base); i > 0; i-- {
- if da.Check[i-1] != 0 {
- da.Base = da.Base[:i]
- da.Check = da.Check[:i]
- break
- }
- }
- return da
-}
-
-func registerTokens(da *DoubleArray, seqs [][]string) [][]int {
- var result [][]int
- for _, seq := range seqs {
- var encoded []int
- for _, token := range seq {
- if _, ok := da.Encoding[token]; !ok {
- da.Encoding[token] = len(da.Encoding)
- }
- encoded = append(encoded, da.Encoding[token])
- }
- result = append(result, encoded)
- }
- for i := range result {
- result[i] = append(result[i], len(da.Encoding))
- }
- return result
-}
-
-type node struct {
- row, col int
- left, right int
-}
-
-func (n node) value(seqs [][]int) int {
- return seqs[n.row][n.col]
-}
-
-func (n node) children(seqs [][]int) []*node {
- var result []*node
- lastVal := int(-1)
- last := new(node)
- for i := n.left; i < n.right; i++ {
- if lastVal == seqs[i][n.col+1] {
- continue
- }
- last.right = i
- last = &node{
- row: i,
- col: n.col + 1,
- left: i,
- }
- result = append(result, last)
- }
- last.right = n.right
- return result
-}
-
-func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) {
- ensureSize(da, pos)
-
- children := n.children(seqs)
- var i int
- for i = 1; ; i++ {
- ok := func() bool {
- for _, child := range children {
- code := child.value(seqs)
- j := i + code
- ensureSize(da, j)
- if da.Check[j] != 0 {
- return false
- }
- }
- return true
- }()
- if ok {
- break
- }
- }
- da.Base[pos] = i
- for _, child := range children {
- code := child.value(seqs)
- j := i + code
- da.Check[j] = pos + 1
- }
- terminator := len(da.Encoding)
- for _, child := range children {
- code := child.value(seqs)
- if code == terminator {
- continue
- }
- j := i + code
- addSeqs(da, seqs, j, *child)
- }
-}
-
-func ensureSize(da *DoubleArray, i int) {
- for i >= len(da.Base) {
- da.Base = append(da.Base, make([]int, len(da.Base)+1)...)
- da.Check = append(da.Check, make([]int, len(da.Check)+1)...)
- }
-}
-
-type byLex [][]int
-
-func (l byLex) Len() int { return len(l) }
-func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
-func (l byLex) Less(i, j int) bool {
- si := l[i]
- sj := l[j]
- var k int
- for k = 0; k < len(si) && k < len(sj); k++ {
- if si[k] < sj[k] {
- return true
- }
- if si[k] > sj[k] {
- return false
- }
- }
- if k < len(sj) {
- return true
- }
- return false
-}
-
-// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence.
-func (da *DoubleArray) HasCommonPrefix(seq []string) bool {
- if len(da.Base) == 0 {
- return false
- }
-
- var i int
- for _, t := range seq {
- code, ok := da.Encoding[t]
- if !ok {
- break
- }
- j := da.Base[i] + code
- if len(da.Check) <= j || da.Check[j] != i+1 {
- break
- }
- i = j
- }
- j := da.Base[i] + len(da.Encoding)
- if len(da.Check) <= j || da.Check[j] != i+1 {
- return false
- }
- return true
-}
diff --git a/vendor/github.com/hashicorp/consul/ipaddr/detect.go b/vendor/github.com/hashicorp/consul/ipaddr/detect.go
new file mode 100644
index 000000000..1512a0049
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/ipaddr/detect.go
@@ -0,0 +1,138 @@
+package ipaddr
+
+import (
+ "fmt"
+ "net"
+)
+
+// GetPrivateIPv4 returns the list of private network IPv4 addresses on
+// all active interfaces.
+func GetPrivateIPv4() ([]*net.IPAddr, error) {
+ addresses, err := activeInterfaceAddresses()
+ if err != nil {
+ return nil, fmt.Errorf("Failed to get interface addresses: %v", err)
+ }
+
+ var addrs []*net.IPAddr
+ for _, rawAddr := range addresses {
+ var ip net.IP
+ switch addr := rawAddr.(type) {
+ case *net.IPAddr:
+ ip = addr.IP
+ case *net.IPNet:
+ ip = addr.IP
+ default:
+ continue
+ }
+ if ip.To4() == nil {
+ continue
+ }
+ if !isPrivate(ip) {
+ continue
+ }
+ addrs = append(addrs, &net.IPAddr{IP: ip})
+ }
+ return addrs, nil
+}
+
+// GetPublicIPv6 returns the list of all public IPv6 addresses
+// on all active interfaces.
+func GetPublicIPv6() ([]*net.IPAddr, error) {
+ addresses, err := net.InterfaceAddrs()
+ if err != nil {
+ return nil, fmt.Errorf("Failed to get interface addresses: %v", err)
+ }
+
+ var addrs []*net.IPAddr
+ for _, rawAddr := range addresses {
+ var ip net.IP
+ switch addr := rawAddr.(type) {
+ case *net.IPAddr:
+ ip = addr.IP
+ case *net.IPNet:
+ ip = addr.IP
+ default:
+ continue
+ }
+ if ip.To4() != nil {
+ continue
+ }
+ if isPrivate(ip) {
+ continue
+ }
+ addrs = append(addrs, &net.IPAddr{IP: ip})
+ }
+ return addrs, nil
+}
+
+// privateBlocks contains non-forwardable address blocks which are used
+// for private networks. RFC 6890 provides an overview of special
+// address blocks.
+var privateBlocks = []*net.IPNet{
+ parseCIDR("10.0.0.0/8"), // RFC 1918 IPv4 private network address
+ parseCIDR("100.64.0.0/10"), // RFC 6598 IPv4 shared address space
+ parseCIDR("127.0.0.0/8"), // RFC 1122 IPv4 loopback address
+ parseCIDR("169.254.0.0/16"), // RFC 3927 IPv4 link local address
+ parseCIDR("172.16.0.0/12"), // RFC 1918 IPv4 private network address
+ parseCIDR("192.0.0.0/24"), // RFC 6890 IPv4 IANA address
+ parseCIDR("192.0.2.0/24"), // RFC 5737 IPv4 documentation address
+ parseCIDR("192.168.0.0/16"), // RFC 1918 IPv4 private network address
+ parseCIDR("::1/128"), // RFC 1884 IPv6 loopback address
+ parseCIDR("fe80::/10"), // RFC 4291 IPv6 link local addresses
+ parseCIDR("fc00::/7"), // RFC 4193 IPv6 unique local addresses
+ parseCIDR("fec0::/10"), // RFC 1884 IPv6 site-local addresses
+ parseCIDR("2001:db8::/32"), // RFC 3849 IPv6 documentation address
+}
+
+func parseCIDR(s string) *net.IPNet {
+ _, block, err := net.ParseCIDR(s)
+ if err != nil {
+ panic(fmt.Sprintf("Bad CIDR %s: %s", s, err))
+ }
+ return block
+}
+
+func isPrivate(ip net.IP) bool {
+ for _, priv := range privateBlocks {
+ if priv.Contains(ip) {
+ return true
+ }
+ }
+ return false
+}
+
+// Returns addresses from interfaces that is up
+func activeInterfaceAddresses() ([]net.Addr, error) {
+ var upAddrs []net.Addr
+ var loAddrs []net.Addr
+
+ interfaces, err := net.Interfaces()
+ if err != nil {
+ return nil, fmt.Errorf("Failed to get interfaces: %v", err)
+ }
+
+ for _, iface := range interfaces {
+ // Require interface to be up
+ if iface.Flags&net.FlagUp == 0 {
+ continue
+ }
+
+ addresses, err := iface.Addrs()
+ if err != nil {
+ return nil, fmt.Errorf("Failed to get interface addresses: %v", err)
+ }
+
+ if iface.Flags&net.FlagLoopback != 0 {
+ loAddrs = append(loAddrs, addresses...)
+ continue
+ }
+
+ upAddrs = append(upAddrs, addresses...)
+ }
+
+ if len(upAddrs) == 0 {
+ return loAddrs, nil
+ }
+
+ return upAddrs, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/ipaddr/ipaddr.go b/vendor/github.com/hashicorp/consul/ipaddr/ipaddr.go
new file mode 100644
index 000000000..321c8d9b1
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/ipaddr/ipaddr.go
@@ -0,0 +1,59 @@
+package ipaddr
+
+import (
+ "fmt"
+ "net"
+ "reflect"
+ "strconv"
+)
+
+// FormatAddressPort Helper for net.JoinHostPort that takes int for port
+func FormatAddressPort(address string, port int) string {
+ return net.JoinHostPort(address, strconv.Itoa(port))
+}
+
+// IsAny checks if the given ip address is an IPv4 or IPv6 ANY address. ip
+// can be either a *net.IP or a string. It panics on another type.
+func IsAny(ip interface{}) bool {
+ return IsAnyV4(ip) || IsAnyV6(ip)
+}
+
+// IsAnyV4 checks if the given ip address is an IPv4 ANY address. ip
+// can be either a *net.IP or a string. It panics on another type.
+func IsAnyV4(ip interface{}) bool {
+ return iptos(ip) == "0.0.0.0"
+}
+
+// IsAnyV6 checks if the given ip address is an IPv6 ANY address. ip
+// can be either a *net.IP or a string. It panics on another type.
+func IsAnyV6(ip interface{}) bool {
+ ips := iptos(ip)
+ return ips == "::" || ips == "[::]"
+}
+
+func iptos(ip interface{}) string {
+ if ip == nil || reflect.TypeOf(ip).Kind() == reflect.Ptr && reflect.ValueOf(ip).IsNil() {
+ return ""
+ }
+ switch x := ip.(type) {
+ case string:
+ return x
+ case *string:
+ if x == nil {
+ return ""
+ }
+ return *x
+ case net.IP:
+ return x.String()
+ case *net.IP:
+ return x.String()
+ case *net.IPAddr:
+ return x.IP.String()
+ case *net.TCPAddr:
+ return x.IP.String()
+ case *net.UDPAddr:
+ return x.IP.String()
+ default:
+ panic(fmt.Sprintf("invalid type: %T", ip))
+ }
+}
diff --git a/vendor/github.com/hashicorp/consul/lib/testing_httpserver.go b/vendor/github.com/hashicorp/consul/lib/testing_httpserver.go
new file mode 100644
index 000000000..1d469bf08
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/lib/testing_httpserver.go
@@ -0,0 +1,30 @@
+package lib
+
+import (
+ "net/http"
+
+ "github.com/hashicorp/consul/ipaddr"
+ "github.com/hashicorp/consul/sdk/freeport"
+)
+
+// StartTestServer fires up a web server on a random unused port to serve the
+// given handler body. The address it is listening on is returned. When the
+// test case terminates the server will be stopped via cleanup functions.
+//
+// We can't directly use httptest.Server here because that only thinks a port
+// is free if it's not bound. Consul tests frequently reserve ports via
+// `sdk/freeport` so you can have one part of the test try to use a port and
+// _know_ nothing is listening. If you simply assumed unbound ports were free
+// you'd end up with test cross-talk and weirdness.
+func StartTestServer(handler http.Handler) (string, func()) {
+ ports := freeport.MustTake(1)
+ addr := ipaddr.FormatAddressPort("127.0.0.1", ports[0])
+
+ server := &http.Server{Addr: addr, Handler: handler}
+ go server.ListenAndServe()
+
+ return addr, func() {
+ server.Close()
+ freeport.Return(ports)
+ }
+}
diff --git a/vendor/github.com/hashicorp/consul/logging/names.go b/vendor/github.com/hashicorp/consul/logging/names.go
index 69551f42f..b83b01e96 100644
--- a/vendor/github.com/hashicorp/consul/logging/names.go
+++ b/vendor/github.com/hashicorp/consul/logging/names.go
@@ -31,6 +31,7 @@ const (
Memberlist string = "memberlist"
MeshGateway string = "mesh_gateway"
Namespace string = "namespace"
+ NetworkAreas string = "network_areas"
Operator string = "operator"
PreparedQuery string = "prepared_query"
Proxy string = "proxy"
diff --git a/vendor/github.com/hashicorp/consul/version/version.go b/vendor/github.com/hashicorp/consul/version/version.go
index 760dae108..7dc6c3582 100644
--- a/vendor/github.com/hashicorp/consul/version/version.go
+++ b/vendor/github.com/hashicorp/consul/version/version.go
@@ -15,12 +15,12 @@ var (
//
// Version must conform to the format expected by github.com/hashicorp/go-version
// for tests to work.
- Version = "1.7.0"
+ Version = "1.7.7"
// A pre-release marker for the version. If this is "" (empty string)
// then it means that it is a final release. Otherwise, this is a pre-release
// such as "dev" (in development), "beta", "rc1", etc.
- VersionPrerelease = "dev"
+ VersionPrerelease = ""
)
// GetHumanVersion composes the parts of the version in a way that's suitable
diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing.go b/vendor/github.com/mitchellh/go-testing-interface/testing.go
index 204afb420..b05a49a69 100644
--- a/vendor/github.com/mitchellh/go-testing-interface/testing.go
+++ b/vendor/github.com/mitchellh/go-testing-interface/testing.go
@@ -22,6 +22,7 @@ type T interface {
Log(args ...interface{})
Logf(format string, args ...interface{})
Name() string
+ Parallel()
Skip(args ...interface{})
SkipNow()
Skipf(format string, args ...interface{})
@@ -31,10 +32,12 @@ type T interface {
// RuntimeT implements T and can be instantiated and run at runtime to
// mimic *testing.T behavior. Unlike *testing.T, this will simply panic
// for calls to Fatal. For calls to Error, you'll have to check the errors
-// list to determine whether to exit yourself. Name and Skip methods are
-// unimplemented noops.
+// list to determine whether to exit yourself.
+//
+// Parallel does not do anything.
type RuntimeT struct {
- failed bool
+ failed bool
+ skipped bool
}
func (t *RuntimeT) Error(args ...interface{}) {
@@ -77,8 +80,26 @@ func (t *RuntimeT) Logf(format string, args ...interface{}) {
log.Println(fmt.Sprintf(format, args...))
}
-func (t *RuntimeT) Name() string { return "" }
-func (t *RuntimeT) Skip(args ...interface{}) {}
-func (t *RuntimeT) SkipNow() {}
-func (t *RuntimeT) Skipf(format string, args ...interface{}) {}
-func (t *RuntimeT) Skipped() bool { return false }
+func (t *RuntimeT) Name() string {
+ return ""
+}
+
+func (t *RuntimeT) Parallel() {}
+
+func (t *RuntimeT) Skip(args ...interface{}) {
+ log.Print(args...)
+ t.SkipNow()
+}
+
+func (t *RuntimeT) SkipNow() {
+ t.skipped = true
+}
+
+func (t *RuntimeT) Skipf(format string, args ...interface{}) {
+ log.Printf(format, args...)
+ t.SkipNow()
+}
+
+func (t *RuntimeT) Skipped() bool {
+ return t.skipped
+}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client.go b/vendor/go.opencensus.io/plugin/ocgrpc/client.go
deleted file mode 100644
index 28fddb844..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/client.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ocgrpc
-
-import (
- "context"
- "go.opencensus.io/trace"
-
- "google.golang.org/grpc/stats"
-)
-
-// ClientHandler implements a gRPC stats.Handler for recording OpenCensus stats and
-// traces. Use with gRPC clients only.
-type ClientHandler struct {
- // StartOptions allows configuring the StartOptions used to create new spans.
- //
- // StartOptions.SpanKind will always be set to trace.SpanKindClient
- // for spans started by this handler.
- StartOptions trace.StartOptions
-}
-
-// HandleConn exists to satisfy gRPC stats.Handler.
-func (c *ClientHandler) HandleConn(ctx context.Context, cs stats.ConnStats) {
- // no-op
-}
-
-// TagConn exists to satisfy gRPC stats.Handler.
-func (c *ClientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context {
- // no-op
- return ctx
-}
-
-// HandleRPC implements per-RPC tracing and stats instrumentation.
-func (c *ClientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
- traceHandleRPC(ctx, rs)
- statsHandleRPC(ctx, rs)
-}
-
-// TagRPC implements per-RPC context management.
-func (c *ClientHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
- ctx = c.traceTagRPC(ctx, rti)
- ctx = c.statsTagRPC(ctx, rti)
- return ctx
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go
deleted file mode 100644
index abe978b67..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package ocgrpc
-
-import (
- "go.opencensus.io/stats"
- "go.opencensus.io/stats/view"
- "go.opencensus.io/tag"
-)
-
-// The following variables are measures are recorded by ClientHandler:
-var (
- ClientSentMessagesPerRPC = stats.Int64("grpc.io/client/sent_messages_per_rpc", "Number of messages sent in the RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless)
- ClientSentBytesPerRPC = stats.Int64("grpc.io/client/sent_bytes_per_rpc", "Total bytes sent across all request messages per RPC.", stats.UnitBytes)
- ClientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless)
- ClientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes)
- ClientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds)
- ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds)
-)
-
-// Predefined views may be registered to collect data for the above measures.
-// As always, you may also define your own custom views over measures collected by this
-// package. These are declared as a convenience only; none are registered by
-// default.
-var (
- ClientSentBytesPerRPCView = &view.View{
- Measure: ClientSentBytesPerRPC,
- Name: "grpc.io/client/sent_bytes_per_rpc",
- Description: "Distribution of bytes sent per RPC, by method.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: DefaultBytesDistribution,
- }
-
- ClientReceivedBytesPerRPCView = &view.View{
- Measure: ClientReceivedBytesPerRPC,
- Name: "grpc.io/client/received_bytes_per_rpc",
- Description: "Distribution of bytes received per RPC, by method.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: DefaultBytesDistribution,
- }
-
- ClientRoundtripLatencyView = &view.View{
- Measure: ClientRoundtripLatency,
- Name: "grpc.io/client/roundtrip_latency",
- Description: "Distribution of round-trip latency, by method.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: DefaultMillisecondsDistribution,
- }
-
- ClientCompletedRPCsView = &view.View{
- Measure: ClientRoundtripLatency,
- Name: "grpc.io/client/completed_rpcs",
- Description: "Count of RPCs by method and status.",
- TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus},
- Aggregation: view.Count(),
- }
-
- ClientSentMessagesPerRPCView = &view.View{
- Measure: ClientSentMessagesPerRPC,
- Name: "grpc.io/client/sent_messages_per_rpc",
- Description: "Distribution of sent messages count per RPC, by method.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: DefaultMessageCountDistribution,
- }
-
- ClientReceivedMessagesPerRPCView = &view.View{
- Measure: ClientReceivedMessagesPerRPC,
- Name: "grpc.io/client/received_messages_per_rpc",
- Description: "Distribution of received messages count per RPC, by method.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: DefaultMessageCountDistribution,
- }
-
- ClientServerLatencyView = &view.View{
- Measure: ClientServerLatency,
- Name: "grpc.io/client/server_latency",
- Description: "Distribution of server latency as viewed by client, by method.",
- TagKeys: []tag.Key{KeyClientMethod},
- Aggregation: DefaultMillisecondsDistribution,
- }
-)
-
-// DefaultClientViews are the default client views provided by this package.
-var DefaultClientViews = []*view.View{
- ClientSentBytesPerRPCView,
- ClientReceivedBytesPerRPCView,
- ClientRoundtripLatencyView,
- ClientCompletedRPCsView,
-}
-
-// TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count.
-// TODO(acetechnologist): This is temporary and will need to be replaced by a
-// mechanism to load these defaults from a common repository/config shared by
-// all supported languages. Likely a serialized protobuf of these defaults.
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go
deleted file mode 100644
index 18821c7f5..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package ocgrpc
-
-import (
- "time"
-
- "context"
- "go.opencensus.io/tag"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/stats"
-)
-
-// statsTagRPC gets the tag.Map populated by the application code, serializes
-// its tags into the GRPC metadata in order to be sent to the server.
-func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
- startTime := time.Now()
- if info == nil {
- if grpclog.V(2) {
- grpclog.Infof("clientHandler.TagRPC called with nil info.", info.FullMethodName)
- }
- return ctx
- }
-
- d := &rpcData{
- startTime: startTime,
- method: info.FullMethodName,
- }
- ts := tag.FromContext(ctx)
- if ts != nil {
- encoded := tag.Encode(ts)
- ctx = stats.SetTags(ctx, encoded)
- }
-
- return context.WithValue(ctx, rpcDataKey, d)
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/doc.go b/vendor/go.opencensus.io/plugin/ocgrpc/doc.go
deleted file mode 100644
index 1370323fb..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package ocgrpc contains OpenCensus stats and trace
-// integrations for gRPC.
-//
-// Use ServerHandler for servers and ClientHandler for clients.
-package ocgrpc // import "go.opencensus.io/plugin/ocgrpc"
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server.go b/vendor/go.opencensus.io/plugin/ocgrpc/server.go
deleted file mode 100644
index 15ada839d..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/server.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ocgrpc
-
-import (
- "context"
- "go.opencensus.io/trace"
-
- "google.golang.org/grpc/stats"
-)
-
-// ServerHandler implements gRPC stats.Handler recording OpenCensus stats and
-// traces. Use with gRPC servers.
-//
-// When installed (see Example), tracing metadata is read from inbound RPCs
-// by default. If no tracing metadata is present, or if the tracing metadata is
-// present but the SpanContext isn't sampled, then a new trace may be started
-// (as determined by Sampler).
-type ServerHandler struct {
- // IsPublicEndpoint may be set to true to always start a new trace around
- // each RPC. Any SpanContext in the RPC metadata will be added as a linked
- // span instead of making it the parent of the span created around the
- // server RPC.
- //
- // Be aware that if you leave this false (the default) on a public-facing
- // server, callers will be able to send tracing metadata in gRPC headers
- // and trigger traces in your backend.
- IsPublicEndpoint bool
-
- // StartOptions to use for to spans started around RPCs handled by this server.
- //
- // These will apply even if there is tracing metadata already
- // present on the inbound RPC but the SpanContext is not sampled. This
- // ensures that each service has some opportunity to be traced. If you would
- // like to not add any additional traces for this gRPC service, set:
- //
- // StartOptions.Sampler = trace.ProbabilitySampler(0.0)
- //
- // StartOptions.SpanKind will always be set to trace.SpanKindServer
- // for spans started by this handler.
- StartOptions trace.StartOptions
-}
-
-var _ stats.Handler = (*ServerHandler)(nil)
-
-// HandleConn exists to satisfy gRPC stats.Handler.
-func (s *ServerHandler) HandleConn(ctx context.Context, cs stats.ConnStats) {
- // no-op
-}
-
-// TagConn exists to satisfy gRPC stats.Handler.
-func (s *ServerHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context {
- // no-op
- return ctx
-}
-
-// HandleRPC implements per-RPC tracing and stats instrumentation.
-func (s *ServerHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
- traceHandleRPC(ctx, rs)
- statsHandleRPC(ctx, rs)
-}
-
-// TagRPC implements per-RPC context management.
-func (s *ServerHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
- ctx = s.traceTagRPC(ctx, rti)
- ctx = s.statsTagRPC(ctx, rti)
- return ctx
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go
deleted file mode 100644
index 609d9ed24..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package ocgrpc
-
-import (
- "go.opencensus.io/stats"
- "go.opencensus.io/stats/view"
- "go.opencensus.io/tag"
-)
-
-// The following variables are measures are recorded by ServerHandler:
-var (
- ServerReceivedMessagesPerRPC = stats.Int64("grpc.io/server/received_messages_per_rpc", "Number of messages received in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless)
- ServerReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes)
- ServerSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless)
- ServerSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes)
- ServerLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds)
-)
-
-// TODO(acetechnologist): This is temporary and will need to be replaced by a
-// mechanism to load these defaults from a common repository/config shared by
-// all supported languages. Likely a serialized protobuf of these defaults.
-
-// Predefined views may be registered to collect data for the above measures.
-// As always, you may also define your own custom views over measures collected by this
-// package. These are declared as a convenience only; none are registered by
-// default.
-var (
- ServerReceivedBytesPerRPCView = &view.View{
- Name: "grpc.io/server/received_bytes_per_rpc",
- Description: "Distribution of received bytes per RPC, by method.",
- Measure: ServerReceivedBytesPerRPC,
- TagKeys: []tag.Key{KeyServerMethod},
- Aggregation: DefaultBytesDistribution,
- }
-
- ServerSentBytesPerRPCView = &view.View{
- Name: "grpc.io/server/sent_bytes_per_rpc",
- Description: "Distribution of total sent bytes per RPC, by method.",
- Measure: ServerSentBytesPerRPC,
- TagKeys: []tag.Key{KeyServerMethod},
- Aggregation: DefaultBytesDistribution,
- }
-
- ServerLatencyView = &view.View{
- Name: "grpc.io/server/server_latency",
- Description: "Distribution of server latency in milliseconds, by method.",
- TagKeys: []tag.Key{KeyServerMethod},
- Measure: ServerLatency,
- Aggregation: DefaultMillisecondsDistribution,
- }
-
- ServerCompletedRPCsView = &view.View{
- Name: "grpc.io/server/completed_rpcs",
- Description: "Count of RPCs by method and status.",
- TagKeys: []tag.Key{KeyServerMethod, KeyServerStatus},
- Measure: ServerLatency,
- Aggregation: view.Count(),
- }
-
- ServerReceivedMessagesPerRPCView = &view.View{
- Name: "grpc.io/server/received_messages_per_rpc",
- Description: "Distribution of messages received count per RPC, by method.",
- TagKeys: []tag.Key{KeyServerMethod},
- Measure: ServerReceivedMessagesPerRPC,
- Aggregation: DefaultMessageCountDistribution,
- }
-
- ServerSentMessagesPerRPCView = &view.View{
- Name: "grpc.io/server/sent_messages_per_rpc",
- Description: "Distribution of messages sent count per RPC, by method.",
- TagKeys: []tag.Key{KeyServerMethod},
- Measure: ServerSentMessagesPerRPC,
- Aggregation: DefaultMessageCountDistribution,
- }
-)
-
-// DefaultServerViews are the default server views provided by this package.
-var DefaultServerViews = []*view.View{
- ServerReceivedBytesPerRPCView,
- ServerSentBytesPerRPCView,
- ServerLatencyView,
- ServerCompletedRPCsView,
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go
deleted file mode 100644
index afcef023a..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package ocgrpc
-
-import (
- "time"
-
- "context"
-
- "go.opencensus.io/tag"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/stats"
-)
-
-// statsTagRPC gets the metadata from gRPC context, extracts the encoded tags from
-// it and creates a new tag.Map and puts them into the returned context.
-func (h *ServerHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
- startTime := time.Now()
- if info == nil {
- if grpclog.V(2) {
- grpclog.Infof("opencensus: TagRPC called with nil info.")
- }
- return ctx
- }
- d := &rpcData{
- startTime: startTime,
- method: info.FullMethodName,
- }
- propagated := h.extractPropagatedTags(ctx)
- ctx = tag.NewContext(ctx, propagated)
- ctx, _ = tag.New(ctx, tag.Upsert(KeyServerMethod, methodName(info.FullMethodName)))
- return context.WithValue(ctx, rpcDataKey, d)
-}
-
-// extractPropagatedTags creates a new tag map containing the tags extracted from the
-// gRPC metadata.
-func (h *ServerHandler) extractPropagatedTags(ctx context.Context) *tag.Map {
- buf := stats.Tags(ctx)
- if buf == nil {
- return nil
- }
- propagated, err := tag.Decode(buf)
- if err != nil {
- if grpclog.V(2) {
- grpclog.Warningf("opencensus: Failed to decode tags from gRPC metadata failed to decode: %v", err)
- }
- return nil
- }
- return propagated
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go
deleted file mode 100644
index 89cac9c4e..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go
+++ /dev/null
@@ -1,227 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package ocgrpc
-
-import (
- "context"
- "strconv"
- "strings"
- "sync/atomic"
- "time"
-
- "go.opencensus.io/metric/metricdata"
- ocstats "go.opencensus.io/stats"
- "go.opencensus.io/stats/view"
- "go.opencensus.io/tag"
- "go.opencensus.io/trace"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/stats"
- "google.golang.org/grpc/status"
-)
-
-type grpcInstrumentationKey string
-
-// rpcData holds the instrumentation RPC data that is needed between the start
-// and end of an call. It holds the info that this package needs to keep track
-// of between the various GRPC events.
-type rpcData struct {
- // reqCount and respCount has to be the first words
- // in order to be 64-aligned on 32-bit architectures.
- sentCount, sentBytes, recvCount, recvBytes int64 // access atomically
-
- // startTime represents the time at which TagRPC was invoked at the
- // beginning of an RPC. It is an appoximation of the time when the
- // application code invoked GRPC code.
- startTime time.Time
- method string
-}
-
-// The following variables define the default hard-coded auxiliary data used by
-// both the default GRPC client and GRPC server metrics.
-var (
- DefaultBytesDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
- DefaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
- DefaultMessageCountDistribution = view.Distribution(1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536)
-)
-
-// Server tags are applied to the context used to process each RPC, as well as
-// the measures at the end of each RPC.
-var (
- KeyServerMethod = tag.MustNewKey("grpc_server_method")
- KeyServerStatus = tag.MustNewKey("grpc_server_status")
-)
-
-// Client tags are applied to measures at the end of each RPC.
-var (
- KeyClientMethod = tag.MustNewKey("grpc_client_method")
- KeyClientStatus = tag.MustNewKey("grpc_client_status")
-)
-
-var (
- rpcDataKey = grpcInstrumentationKey("opencensus-rpcData")
-)
-
-func methodName(fullname string) string {
- return strings.TrimLeft(fullname, "/")
-}
-
-// statsHandleRPC processes the RPC events.
-func statsHandleRPC(ctx context.Context, s stats.RPCStats) {
- switch st := s.(type) {
- case *stats.Begin, *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer:
- // do nothing for client
- case *stats.OutPayload:
- handleRPCOutPayload(ctx, st)
- case *stats.InPayload:
- handleRPCInPayload(ctx, st)
- case *stats.End:
- handleRPCEnd(ctx, st)
- default:
- grpclog.Infof("unexpected stats: %T", st)
- }
-}
-
-func handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) {
- d, ok := ctx.Value(rpcDataKey).(*rpcData)
- if !ok {
- if grpclog.V(2) {
- grpclog.Infoln("Failed to retrieve *rpcData from context.")
- }
- return
- }
-
- atomic.AddInt64(&d.sentBytes, int64(s.Length))
- atomic.AddInt64(&d.sentCount, 1)
-}
-
-func handleRPCInPayload(ctx context.Context, s *stats.InPayload) {
- d, ok := ctx.Value(rpcDataKey).(*rpcData)
- if !ok {
- if grpclog.V(2) {
- grpclog.Infoln("Failed to retrieve *rpcData from context.")
- }
- return
- }
-
- atomic.AddInt64(&d.recvBytes, int64(s.Length))
- atomic.AddInt64(&d.recvCount, 1)
-}
-
-func handleRPCEnd(ctx context.Context, s *stats.End) {
- d, ok := ctx.Value(rpcDataKey).(*rpcData)
- if !ok {
- if grpclog.V(2) {
- grpclog.Infoln("Failed to retrieve *rpcData from context.")
- }
- return
- }
-
- elapsedTime := time.Since(d.startTime)
-
- var st string
- if s.Error != nil {
- s, ok := status.FromError(s.Error)
- if ok {
- st = statusCodeToString(s)
- }
- } else {
- st = "OK"
- }
-
- latencyMillis := float64(elapsedTime) / float64(time.Millisecond)
- attachments := getSpanCtxAttachment(ctx)
- if s.Client {
- ocstats.RecordWithOptions(ctx,
- ocstats.WithTags(
- tag.Upsert(KeyClientMethod, methodName(d.method)),
- tag.Upsert(KeyClientStatus, st)),
- ocstats.WithAttachments(attachments),
- ocstats.WithMeasurements(
- ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)),
- ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)),
- ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)),
- ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)),
- ClientRoundtripLatency.M(latencyMillis)))
- } else {
- ocstats.RecordWithOptions(ctx,
- ocstats.WithTags(
- tag.Upsert(KeyServerStatus, st),
- ),
- ocstats.WithAttachments(attachments),
- ocstats.WithMeasurements(
- ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)),
- ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)),
- ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)),
- ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)),
- ServerLatency.M(latencyMillis)))
- }
-}
-
-func statusCodeToString(s *status.Status) string {
- // see https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
- switch c := s.Code(); c {
- case codes.OK:
- return "OK"
- case codes.Canceled:
- return "CANCELLED"
- case codes.Unknown:
- return "UNKNOWN"
- case codes.InvalidArgument:
- return "INVALID_ARGUMENT"
- case codes.DeadlineExceeded:
- return "DEADLINE_EXCEEDED"
- case codes.NotFound:
- return "NOT_FOUND"
- case codes.AlreadyExists:
- return "ALREADY_EXISTS"
- case codes.PermissionDenied:
- return "PERMISSION_DENIED"
- case codes.ResourceExhausted:
- return "RESOURCE_EXHAUSTED"
- case codes.FailedPrecondition:
- return "FAILED_PRECONDITION"
- case codes.Aborted:
- return "ABORTED"
- case codes.OutOfRange:
- return "OUT_OF_RANGE"
- case codes.Unimplemented:
- return "UNIMPLEMENTED"
- case codes.Internal:
- return "INTERNAL"
- case codes.Unavailable:
- return "UNAVAILABLE"
- case codes.DataLoss:
- return "DATA_LOSS"
- case codes.Unauthenticated:
- return "UNAUTHENTICATED"
- default:
- return "CODE_" + strconv.FormatInt(int64(c), 10)
- }
-}
-
-func getSpanCtxAttachment(ctx context.Context) metricdata.Attachments {
- attachments := map[string]interface{}{}
- span := trace.FromContext(ctx)
- if span == nil {
- return attachments
- }
- spanCtx := span.SpanContext()
- if spanCtx.IsSampled() {
- attachments[metricdata.AttachmentKeySpanContext] = spanCtx
- }
- return attachments
-}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go
deleted file mode 100644
index fef582756..000000000
--- a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ocgrpc
-
-import (
- "strings"
-
- "google.golang.org/grpc/codes"
-
- "context"
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/propagation"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/stats"
- "google.golang.org/grpc/status"
-)
-
-const traceContextKey = "grpc-trace-bin"
-
-// TagRPC creates a new trace span for the client side of the RPC.
-//
-// It returns ctx with the new trace span added and a serialization of the
-// SpanContext added to the outgoing gRPC metadata.
-func (c *ClientHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
- name := strings.TrimPrefix(rti.FullMethodName, "/")
- name = strings.Replace(name, "/", ".", -1)
- ctx, span := trace.StartSpan(ctx, name,
- trace.WithSampler(c.StartOptions.Sampler),
- trace.WithSpanKind(trace.SpanKindClient)) // span is ended by traceHandleRPC
- traceContextBinary := propagation.Binary(span.SpanContext())
- return metadata.AppendToOutgoingContext(ctx, traceContextKey, string(traceContextBinary))
-}
-
-// TagRPC creates a new trace span for the server side of the RPC.
-//
-// It checks the incoming gRPC metadata in ctx for a SpanContext, and if
-// it finds one, uses that SpanContext as the parent context of the new span.
-//
-// It returns ctx, with the new trace span added.
-func (s *ServerHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
- md, _ := metadata.FromIncomingContext(ctx)
- name := strings.TrimPrefix(rti.FullMethodName, "/")
- name = strings.Replace(name, "/", ".", -1)
- traceContext := md[traceContextKey]
- var (
- parent trace.SpanContext
- haveParent bool
- )
- if len(traceContext) > 0 {
- // Metadata with keys ending in -bin are actually binary. They are base64
- // encoded before being put on the wire, see:
- // https://github.com/grpc/grpc-go/blob/08d6261/Documentation/grpc-metadata.md#storing-binary-data-in-metadata
- traceContextBinary := []byte(traceContext[0])
- parent, haveParent = propagation.FromBinary(traceContextBinary)
- if haveParent && !s.IsPublicEndpoint {
- ctx, _ := trace.StartSpanWithRemoteParent(ctx, name, parent,
- trace.WithSpanKind(trace.SpanKindServer),
- trace.WithSampler(s.StartOptions.Sampler),
- )
- return ctx
- }
- }
- ctx, span := trace.StartSpan(ctx, name,
- trace.WithSpanKind(trace.SpanKindServer),
- trace.WithSampler(s.StartOptions.Sampler))
- if haveParent {
- span.AddLink(trace.Link{TraceID: parent.TraceID, SpanID: parent.SpanID, Type: trace.LinkTypeChild})
- }
- return ctx
-}
-
-func traceHandleRPC(ctx context.Context, rs stats.RPCStats) {
- span := trace.FromContext(ctx)
- // TODO: compressed and uncompressed sizes are not populated in every message.
- switch rs := rs.(type) {
- case *stats.Begin:
- span.AddAttributes(
- trace.BoolAttribute("Client", rs.Client),
- trace.BoolAttribute("FailFast", rs.FailFast))
- case *stats.InPayload:
- span.AddMessageReceiveEvent(0 /* TODO: messageID */, int64(rs.Length), int64(rs.WireLength))
- case *stats.OutPayload:
- span.AddMessageSendEvent(0, int64(rs.Length), int64(rs.WireLength))
- case *stats.End:
- if rs.Error != nil {
- s, ok := status.FromError(rs.Error)
- if ok {
- span.SetStatus(trace.Status{Code: int32(s.Code()), Message: s.Message()})
- } else {
- span.SetStatus(trace.Status{Code: int32(codes.Internal), Message: rs.Error.Error()})
- }
- }
- span.End()
- }
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go
deleted file mode 100644
index 65ab1e996..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package tracecontext contains HTTP propagator for TraceContext standard.
-// See https://github.com/w3c/distributed-tracing for more information.
-package tracecontext // import "go.opencensus.io/plugin/ochttp/propagation/tracecontext"
-
-import (
- "encoding/hex"
- "fmt"
- "net/http"
- "net/textproto"
- "regexp"
- "strings"
-
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/propagation"
- "go.opencensus.io/trace/tracestate"
-)
-
-const (
- supportedVersion = 0
- maxVersion = 254
- maxTracestateLen = 512
- traceparentHeader = "traceparent"
- tracestateHeader = "tracestate"
- trimOWSRegexFmt = `^[\x09\x20]*(.*[^\x20\x09])[\x09\x20]*$`
-)
-
-var trimOWSRegExp = regexp.MustCompile(trimOWSRegexFmt)
-
-var _ propagation.HTTPFormat = (*HTTPFormat)(nil)
-
-// HTTPFormat implements the TraceContext trace propagation format.
-type HTTPFormat struct{}
-
-// SpanContextFromRequest extracts a span context from incoming requests.
-func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) {
- h, ok := getRequestHeader(req, traceparentHeader, false)
- if !ok {
- return trace.SpanContext{}, false
- }
- sections := strings.Split(h, "-")
- if len(sections) < 4 {
- return trace.SpanContext{}, false
- }
-
- if len(sections[0]) != 2 {
- return trace.SpanContext{}, false
- }
- ver, err := hex.DecodeString(sections[0])
- if err != nil {
- return trace.SpanContext{}, false
- }
- version := int(ver[0])
- if version > maxVersion {
- return trace.SpanContext{}, false
- }
-
- if version == 0 && len(sections) != 4 {
- return trace.SpanContext{}, false
- }
-
- if len(sections[1]) != 32 {
- return trace.SpanContext{}, false
- }
- tid, err := hex.DecodeString(sections[1])
- if err != nil {
- return trace.SpanContext{}, false
- }
- copy(sc.TraceID[:], tid)
-
- if len(sections[2]) != 16 {
- return trace.SpanContext{}, false
- }
- sid, err := hex.DecodeString(sections[2])
- if err != nil {
- return trace.SpanContext{}, false
- }
- copy(sc.SpanID[:], sid)
-
- opts, err := hex.DecodeString(sections[3])
- if err != nil || len(opts) < 1 {
- return trace.SpanContext{}, false
- }
- sc.TraceOptions = trace.TraceOptions(opts[0])
-
- // Don't allow all zero trace or span ID.
- if sc.TraceID == [16]byte{} || sc.SpanID == [8]byte{} {
- return trace.SpanContext{}, false
- }
-
- sc.Tracestate = tracestateFromRequest(req)
- return sc, true
-}
-
-// getRequestHeader returns a combined header field according to RFC7230 section 3.2.2.
-// If commaSeparated is true, multiple header fields with the same field name using be
-// combined using ",".
-// If no header was found using the given name, "ok" would be false.
-// If more than one headers was found using the given name, while commaSeparated is false,
-// "ok" would be false.
-func getRequestHeader(req *http.Request, name string, commaSeparated bool) (hdr string, ok bool) {
- v := req.Header[textproto.CanonicalMIMEHeaderKey(name)]
- switch len(v) {
- case 0:
- return "", false
- case 1:
- return v[0], true
- default:
- return strings.Join(v, ","), commaSeparated
- }
-}
-
-// TODO(rghetia): return an empty Tracestate when parsing tracestate header encounters an error.
-// Revisit to return additional boolean value to indicate parsing error when following issues
-// are resolved.
-// https://github.com/w3c/distributed-tracing/issues/172
-// https://github.com/w3c/distributed-tracing/issues/175
-func tracestateFromRequest(req *http.Request) *tracestate.Tracestate {
- h, _ := getRequestHeader(req, tracestateHeader, true)
- if h == "" {
- return nil
- }
-
- var entries []tracestate.Entry
- pairs := strings.Split(h, ",")
- hdrLenWithoutOWS := len(pairs) - 1 // Number of commas
- for _, pair := range pairs {
- matches := trimOWSRegExp.FindStringSubmatch(pair)
- if matches == nil {
- return nil
- }
- pair = matches[1]
- hdrLenWithoutOWS += len(pair)
- if hdrLenWithoutOWS > maxTracestateLen {
- return nil
- }
- kv := strings.Split(pair, "=")
- if len(kv) != 2 {
- return nil
- }
- entries = append(entries, tracestate.Entry{Key: kv[0], Value: kv[1]})
- }
- ts, err := tracestate.New(nil, entries...)
- if err != nil {
- return nil
- }
-
- return ts
-}
-
-func tracestateToRequest(sc trace.SpanContext, req *http.Request) {
- var pairs = make([]string, 0, len(sc.Tracestate.Entries()))
- if sc.Tracestate != nil {
- for _, entry := range sc.Tracestate.Entries() {
- pairs = append(pairs, strings.Join([]string{entry.Key, entry.Value}, "="))
- }
- h := strings.Join(pairs, ",")
-
- if h != "" && len(h) <= maxTracestateLen {
- req.Header.Set(tracestateHeader, h)
- }
- }
-}
-
-// SpanContextToRequest modifies the given request to include traceparent and tracestate headers.
-func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) {
- h := fmt.Sprintf("%x-%x-%x-%x",
- []byte{supportedVersion},
- sc.TraceID[:],
- sc.SpanID[:],
- []byte{byte(sc.TraceOptions)})
- req.Header.Set(traceparentHeader, h)
- tracestateToRequest(sc, req)
-}
diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go
index 0590070e2..f3265655e 100644
--- a/vendor/golang.org/x/crypto/ssh/client_auth.go
+++ b/vendor/golang.org/x/crypto/ssh/client_auth.go
@@ -36,7 +36,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
// during the authentication phase the client first attempts the "none" method
// then any untried methods suggested by the server.
- tried := make(map[string]bool)
+ var tried []string
var lastMethods []string
sessionID := c.transport.getSessionID()
@@ -49,7 +49,9 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
// success
return nil
} else if ok == authFailure {
- tried[auth.method()] = true
+ if m := auth.method(); !contains(tried, m) {
+ tried = append(tried, m)
+ }
}
if methods == nil {
methods = lastMethods
@@ -61,7 +63,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
findNext:
for _, a := range config.Auth {
candidateMethod := a.method()
- if tried[candidateMethod] {
+ if contains(tried, candidateMethod) {
continue
}
for _, meth := range methods {
@@ -72,16 +74,16 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
}
}
}
- return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", keys(tried))
+ return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried)
}
-func keys(m map[string]bool) []string {
- s := make([]string, 0, len(m))
-
- for key := range m {
- s = append(s, key)
+func contains(list []string, e string) bool {
+ for _, s := range list {
+ if s == e {
+ return true
+ }
}
- return s
+ return false
}
// An AuthMethod represents an instance of an RFC 4252 authentication method.
diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go
index f19016270..9654c0186 100644
--- a/vendor/golang.org/x/crypto/ssh/mux.go
+++ b/vendor/golang.org/x/crypto/ssh/mux.go
@@ -240,7 +240,7 @@ func (m *mux) onePacket() error {
id := binary.BigEndian.Uint32(packet[1:])
ch := m.chanList.getChan(id)
if ch == nil {
- return fmt.Errorf("ssh: invalid channel %d", id)
+ return m.handleUnknownChannelPacket(id, packet)
}
return ch.handlePacket(packet)
@@ -328,3 +328,24 @@ func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) {
return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg)
}
}
+
+func (m *mux) handleUnknownChannelPacket(id uint32, packet []byte) error {
+ msg, err := decode(packet)
+ if err != nil {
+ return err
+ }
+
+ switch msg := msg.(type) {
+ // RFC 4254 section 5.4 says unrecognized channel requests should
+ // receive a failure response.
+ case *channelRequestMsg:
+ if msg.WantReply {
+ return m.sendMessage(channelRequestFailureMsg{
+ PeersID: msg.PeersID,
+ })
+ }
+ return nil
+ default:
+ return fmt.Errorf("ssh: invalid channel %d", id)
+ }
+}
diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go
deleted file mode 100644
index 30f632c57..000000000
--- a/vendor/golang.org/x/sync/semaphore/semaphore.go
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package semaphore provides a weighted semaphore implementation.
-package semaphore // import "golang.org/x/sync/semaphore"
-
-import (
- "container/list"
- "context"
- "sync"
-)
-
-type waiter struct {
- n int64
- ready chan<- struct{} // Closed when semaphore acquired.
-}
-
-// NewWeighted creates a new weighted semaphore with the given
-// maximum combined weight for concurrent access.
-func NewWeighted(n int64) *Weighted {
- w := &Weighted{size: n}
- return w
-}
-
-// Weighted provides a way to bound concurrent access to a resource.
-// The callers can request access with a given weight.
-type Weighted struct {
- size int64
- cur int64
- mu sync.Mutex
- waiters list.List
-}
-
-// Acquire acquires the semaphore with a weight of n, blocking until resources
-// are available or ctx is done. On success, returns nil. On failure, returns
-// ctx.Err() and leaves the semaphore unchanged.
-//
-// If ctx is already done, Acquire may still succeed without blocking.
-func (s *Weighted) Acquire(ctx context.Context, n int64) error {
- s.mu.Lock()
- if s.size-s.cur >= n && s.waiters.Len() == 0 {
- s.cur += n
- s.mu.Unlock()
- return nil
- }
-
- if n > s.size {
- // Don't make other Acquire calls block on one that's doomed to fail.
- s.mu.Unlock()
- <-ctx.Done()
- return ctx.Err()
- }
-
- ready := make(chan struct{})
- w := waiter{n: n, ready: ready}
- elem := s.waiters.PushBack(w)
- s.mu.Unlock()
-
- select {
- case <-ctx.Done():
- err := ctx.Err()
- s.mu.Lock()
- select {
- case <-ready:
- // Acquired the semaphore after we were canceled. Rather than trying to
- // fix up the queue, just pretend we didn't notice the cancelation.
- err = nil
- default:
- isFront := s.waiters.Front() == elem
- s.waiters.Remove(elem)
- // If we're at the front and there're extra tokens left, notify other waiters.
- if isFront && s.size > s.cur {
- s.notifyWaiters()
- }
- }
- s.mu.Unlock()
- return err
-
- case <-ready:
- return nil
- }
-}
-
-// TryAcquire acquires the semaphore with a weight of n without blocking.
-// On success, returns true. On failure, returns false and leaves the semaphore unchanged.
-func (s *Weighted) TryAcquire(n int64) bool {
- s.mu.Lock()
- success := s.size-s.cur >= n && s.waiters.Len() == 0
- if success {
- s.cur += n
- }
- s.mu.Unlock()
- return success
-}
-
-// Release releases the semaphore with a weight of n.
-func (s *Weighted) Release(n int64) {
- s.mu.Lock()
- s.cur -= n
- if s.cur < 0 {
- s.mu.Unlock()
- panic("semaphore: released more than held")
- }
- s.notifyWaiters()
- s.mu.Unlock()
-}
-
-func (s *Weighted) notifyWaiters() {
- for {
- next := s.waiters.Front()
- if next == nil {
- break // No more waiters blocked.
- }
-
- w := next.Value.(waiter)
- if s.size-s.cur < w.n {
- // Not enough tokens for the next waiter. We could keep going (to try to
- // find a waiter with a smaller request), but under load that could cause
- // starvation for large requests; instead, we leave all remaining waiters
- // blocked.
- //
- // Consider a semaphore used as a read-write lock, with N tokens, N
- // readers, and one writer. Each reader can Acquire(1) to obtain a read
- // lock. The writer can Acquire(N) to obtain a write lock, excluding all
- // of the readers. If we allow the readers to jump ahead in the queue,
- // the writer will starve — there is always one token available for every
- // reader.
- break
- }
-
- s.cur += w.n
- s.waiters.Remove(next)
- close(w.ready)
- }
-}
diff --git a/vendor/google.golang.org/api/support/bundler/bundler.go b/vendor/google.golang.org/api/support/bundler/bundler.go
deleted file mode 100644
index fc515e4e1..000000000
--- a/vendor/google.golang.org/api/support/bundler/bundler.go
+++ /dev/null
@@ -1,339 +0,0 @@
-// Copyright 2016 Google LLC.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package bundler supports bundling (batching) of items. Bundling amortizes an
-// action with fixed costs over multiple items. For example, if an API provides
-// an RPC that accepts a list of items as input, but clients would prefer
-// adding items one at a time, then a Bundler can accept individual items from
-// the client and bundle many of them into a single RPC.
-//
-// This package is experimental and subject to change without notice.
-package bundler
-
-import (
- "context"
- "errors"
- "math"
- "reflect"
- "sync"
- "time"
-
- "golang.org/x/sync/semaphore"
-)
-
-const (
- DefaultDelayThreshold = time.Second
- DefaultBundleCountThreshold = 10
- DefaultBundleByteThreshold = 1e6 // 1M
- DefaultBufferedByteLimit = 1e9 // 1G
-)
-
-var (
- // ErrOverflow indicates that Bundler's stored bytes exceeds its BufferedByteLimit.
- ErrOverflow = errors.New("bundler reached buffered byte limit")
-
- // ErrOversizedItem indicates that an item's size exceeds the maximum bundle size.
- ErrOversizedItem = errors.New("item size exceeds bundle byte limit")
-)
-
-// A Bundler collects items added to it into a bundle until the bundle
-// exceeds a given size, then calls a user-provided function to handle the bundle.
-type Bundler struct {
- // Starting from the time that the first message is added to a bundle, once
- // this delay has passed, handle the bundle. The default is DefaultDelayThreshold.
- DelayThreshold time.Duration
-
- // Once a bundle has this many items, handle the bundle. Since only one
- // item at a time is added to a bundle, no bundle will exceed this
- // threshold, so it also serves as a limit. The default is
- // DefaultBundleCountThreshold.
- BundleCountThreshold int
-
- // Once the number of bytes in current bundle reaches this threshold, handle
- // the bundle. The default is DefaultBundleByteThreshold. This triggers handling,
- // but does not cap the total size of a bundle.
- BundleByteThreshold int
-
- // The maximum size of a bundle, in bytes. Zero means unlimited.
- BundleByteLimit int
-
- // The maximum number of bytes that the Bundler will keep in memory before
- // returning ErrOverflow. The default is DefaultBufferedByteLimit.
- BufferedByteLimit int
-
- // The maximum number of handler invocations that can be running at once.
- // The default is 1.
- HandlerLimit int
-
- handler func(interface{}) // called to handle a bundle
- itemSliceZero reflect.Value // nil (zero value) for slice of items
- flushTimer *time.Timer // implements DelayThreshold
-
- mu sync.Mutex
- sem *semaphore.Weighted // enforces BufferedByteLimit
- semOnce sync.Once
- curBundle bundle // incoming items added to this bundle
-
- // Each bundle is assigned a unique ticket that determines the order in which the
- // handler is called. The ticket is assigned with mu locked, but waiting for tickets
- // to be handled is done via mu2 and cond, below.
- nextTicket uint64 // next ticket to be assigned
-
- mu2 sync.Mutex
- cond *sync.Cond
- nextHandled uint64 // next ticket to be handled
-
- // In this implementation, active uses space proportional to HandlerLimit, and
- // waitUntilAllHandled takes time proportional to HandlerLimit each time an acquire
- // or release occurs, so large values of HandlerLimit max may cause performance
- // issues.
- active map[uint64]bool // tickets of bundles actively being handled
-}
-
-type bundle struct {
- items reflect.Value // slice of item type
- size int // size in bytes of all items
-}
-
-// NewBundler creates a new Bundler.
-//
-// itemExample is a value of the type that will be bundled. For example, if you
-// want to create bundles of *Entry, you could pass &Entry{} for itemExample.
-//
-// handler is a function that will be called on each bundle. If itemExample is
-// of type T, the argument to handler is of type []T. handler is always called
-// sequentially for each bundle, and never in parallel.
-//
-// Configure the Bundler by setting its thresholds and limits before calling
-// any of its methods.
-func NewBundler(itemExample interface{}, handler func(interface{})) *Bundler {
- b := &Bundler{
- DelayThreshold: DefaultDelayThreshold,
- BundleCountThreshold: DefaultBundleCountThreshold,
- BundleByteThreshold: DefaultBundleByteThreshold,
- BufferedByteLimit: DefaultBufferedByteLimit,
- HandlerLimit: 1,
-
- handler: handler,
- itemSliceZero: reflect.Zero(reflect.SliceOf(reflect.TypeOf(itemExample))),
- active: map[uint64]bool{},
- }
- b.curBundle.items = b.itemSliceZero
- b.cond = sync.NewCond(&b.mu2)
- return b
-}
-
-func (b *Bundler) initSemaphores() {
- // Create the semaphores lazily, because the user may set limits
- // after NewBundler.
- b.semOnce.Do(func() {
- b.sem = semaphore.NewWeighted(int64(b.BufferedByteLimit))
- })
-}
-
-// Add adds item to the current bundle. It marks the bundle for handling and
-// starts a new one if any of the thresholds or limits are exceeded.
-//
-// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then
-// the item can never be handled. Add returns ErrOversizedItem in this case.
-//
-// If adding the item would exceed the maximum memory allowed
-// (Bundler.BufferedByteLimit) or an AddWait call is blocked waiting for
-// memory, Add returns ErrOverflow.
-//
-// Add never blocks.
-func (b *Bundler) Add(item interface{}, size int) error {
- // If this item exceeds the maximum size of a bundle,
- // we can never send it.
- if b.BundleByteLimit > 0 && size > b.BundleByteLimit {
- return ErrOversizedItem
- }
- // If adding this item would exceed our allotted memory
- // footprint, we can't accept it.
- // (TryAcquire also returns false if anything is waiting on the semaphore,
- // so calls to Add and AddWait shouldn't be mixed.)
- b.initSemaphores()
- if !b.sem.TryAcquire(int64(size)) {
- return ErrOverflow
- }
- b.add(item, size)
- return nil
-}
-
-// add adds item to the current bundle. It marks the bundle for handling and
-// starts a new one if any of the thresholds or limits are exceeded.
-func (b *Bundler) add(item interface{}, size int) {
- b.mu.Lock()
- defer b.mu.Unlock()
-
- // If adding this item to the current bundle would cause it to exceed the
- // maximum bundle size, close the current bundle and start a new one.
- if b.BundleByteLimit > 0 && b.curBundle.size+size > b.BundleByteLimit {
- b.startFlushLocked()
- }
- // Add the item.
- b.curBundle.items = reflect.Append(b.curBundle.items, reflect.ValueOf(item))
- b.curBundle.size += size
-
- // Start a timer to flush the item if one isn't already running.
- // startFlushLocked clears the timer and closes the bundle at the same time,
- // so we only allocate a new timer for the first item in each bundle.
- // (We could try to call Reset on the timer instead, but that would add a lot
- // of complexity to the code just to save one small allocation.)
- if b.flushTimer == nil {
- b.flushTimer = time.AfterFunc(b.DelayThreshold, b.Flush)
- }
-
- // If the current bundle equals the count threshold, close it.
- if b.curBundle.items.Len() == b.BundleCountThreshold {
- b.startFlushLocked()
- }
- // If the current bundle equals or exceeds the byte threshold, close it.
- if b.curBundle.size >= b.BundleByteThreshold {
- b.startFlushLocked()
- }
-}
-
-// AddWait adds item to the current bundle. It marks the bundle for handling and
-// starts a new one if any of the thresholds or limits are exceeded.
-//
-// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then
-// the item can never be handled. AddWait returns ErrOversizedItem in this case.
-//
-// If adding the item would exceed the maximum memory allowed (Bundler.BufferedByteLimit),
-// AddWait blocks until space is available or ctx is done.
-//
-// Calls to Add and AddWait should not be mixed on the same Bundler.
-func (b *Bundler) AddWait(ctx context.Context, item interface{}, size int) error {
- // If this item exceeds the maximum size of a bundle,
- // we can never send it.
- if b.BundleByteLimit > 0 && size > b.BundleByteLimit {
- return ErrOversizedItem
- }
- // If adding this item would exceed our allotted memory footprint, block
- // until space is available. The semaphore is FIFO, so there will be no
- // starvation.
- b.initSemaphores()
- if err := b.sem.Acquire(ctx, int64(size)); err != nil {
- return err
- }
- // Here, we've reserved space for item. Other goroutines can call AddWait
- // and even acquire space, but no one can take away our reservation
- // (assuming sem.Release is used correctly). So there is no race condition
- // resulting from locking the mutex after sem.Acquire returns.
- b.add(item, size)
- return nil
-}
-
-// Flush invokes the handler for all remaining items in the Bundler and waits
-// for it to return.
-func (b *Bundler) Flush() {
- b.mu.Lock()
- b.startFlushLocked()
- // Here, all bundles with tickets < b.nextTicket are
- // either finished or active. Those are the ones
- // we want to wait for.
- t := b.nextTicket
- b.mu.Unlock()
- b.initSemaphores()
- b.waitUntilAllHandled(t)
-}
-
-func (b *Bundler) startFlushLocked() {
- if b.flushTimer != nil {
- b.flushTimer.Stop()
- b.flushTimer = nil
- }
- if b.curBundle.items.Len() == 0 {
- return
- }
- // Here, both semaphores must have been initialized.
- bun := b.curBundle
- b.curBundle = bundle{items: b.itemSliceZero}
- ticket := b.nextTicket
- b.nextTicket++
- go func() {
- defer func() {
- b.sem.Release(int64(bun.size))
- b.release(ticket)
- }()
- b.acquire(ticket)
- b.handler(bun.items.Interface())
- }()
-}
-
-// acquire blocks until ticket is the next to be served, then returns. In order for N
-// acquire calls to return, the tickets must be in the range [0, N). A ticket must
-// not be presented to acquire more than once.
-func (b *Bundler) acquire(ticket uint64) {
- b.mu2.Lock()
- defer b.mu2.Unlock()
- if ticket < b.nextHandled {
- panic("bundler: acquire: arg too small")
- }
- for !(ticket == b.nextHandled && len(b.active) < b.HandlerLimit) {
- b.cond.Wait()
- }
- // Here,
- // ticket == b.nextHandled: the caller is the next one to be handled;
- // and len(b.active) < b.HandlerLimit: there is space available.
- b.active[ticket] = true
- b.nextHandled++
- // Broadcast, not Signal: although at most one acquire waiter can make progress,
- // there might be waiters in waitUntilAllHandled.
- b.cond.Broadcast()
-}
-
-// If a ticket is used for a call to acquire, it must later be passed to release. A
-// ticket must not be presented to release more than once.
-func (b *Bundler) release(ticket uint64) {
- b.mu2.Lock()
- defer b.mu2.Unlock()
- if !b.active[ticket] {
- panic("bundler: release: not an active ticket")
- }
- delete(b.active, ticket)
- b.cond.Broadcast()
-}
-
-// waitUntilAllHandled blocks until all tickets < n have called release, meaning
-// all bundles with tickets < n have been handled.
-func (b *Bundler) waitUntilAllHandled(n uint64) {
- // Proof of correctness of this function.
- // "N is acquired" means acquire(N) has returned.
- // "N is released" means release(N) has returned.
- // 1. If N is acquired, N-1 is acquired.
- // Follows from the loop test in acquire, and the fact
- // that nextHandled is incremented by 1.
- // 2. If nextHandled >= N, then N-1 is acquired.
- // Because we only increment nextHandled to N after N-1 is acquired.
- // 3. If nextHandled >= N, then all n < N is acquired.
- // Follows from #1 and #2.
- // 4. If N is acquired and N is not in active, then N is released.
- // Because we put N in active before acquire returns, and only
- // remove it when it is released.
- // Let min(active) be the smallest member of active, or infinity if active is empty.
- // 5. If nextHandled >= N and N <= min(active), then all n < N is released.
- // From nextHandled >= N and #3, all n < N is acquired.
- // N <= min(active) implies n < min(active) for all n < N. So all n < N is not in active.
- // So from #4, all n < N is released.
- // The loop test below is the antecedent of #5.
- b.mu2.Lock()
- defer b.mu2.Unlock()
- for !(b.nextHandled >= n && n <= min(b.active)) {
- b.cond.Wait()
- }
-}
-
-// min returns the minimum value of the set s, or the largest uint64 if
-// s is empty.
-func min(s map[uint64]bool) uint64 {
- var m uint64 = math.MaxUint64
- for n := range s {
- if n < m {
- m = n
- }
- }
- return m
-}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
deleted file mode 100644
index 425dea4a1..000000000
--- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
+++ /dev/null
@@ -1,148 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/api/httpbody.proto
-
-package httpbody
-
-import (
- fmt "fmt"
- math "math"
-
- proto "github.com/golang/protobuf/proto"
- any "github.com/golang/protobuf/ptypes/any"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-// Message that represents an arbitrary HTTP body. It should only be used for
-// payload formats that can't be represented as JSON, such as raw binary or
-// an HTML page.
-//
-//
-// This message can be used both in streaming and non-streaming API methods in
-// the request as well as the response.
-//
-// It can be used as a top-level request field, which is convenient if one
-// wants to extract parameters from either the URL or HTTP template into the
-// request fields and also want access to the raw HTTP body.
-//
-// Example:
-//
-// message GetResourceRequest {
-// // A unique request id.
-// string request_id = 1;
-//
-// // The raw HTTP body is bound to this field.
-// google.api.HttpBody http_body = 2;
-// }
-//
-// service ResourceService {
-// rpc GetResource(GetResourceRequest) returns (google.api.HttpBody);
-// rpc UpdateResource(google.api.HttpBody) returns
-// (google.protobuf.Empty);
-// }
-//
-// Example with streaming methods:
-//
-// service CaldavService {
-// rpc GetCalendar(stream google.api.HttpBody)
-// returns (stream google.api.HttpBody);
-// rpc UpdateCalendar(stream google.api.HttpBody)
-// returns (stream google.api.HttpBody);
-// }
-//
-// Use of this type only changes how the request and response bodies are
-// handled, all other features will continue to work unchanged.
-type HttpBody struct {
- // The HTTP Content-Type header value specifying the content type of the body.
- ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"`
- // The HTTP request/response body as raw binary.
- Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
- // Application specific response metadata. Must be set in the first response
- // for streaming APIs.
- Extensions []*any.Any `protobuf:"bytes,3,rep,name=extensions,proto3" json:"extensions,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *HttpBody) Reset() { *m = HttpBody{} }
-func (m *HttpBody) String() string { return proto.CompactTextString(m) }
-func (*HttpBody) ProtoMessage() {}
-func (*HttpBody) Descriptor() ([]byte, []int) {
- return fileDescriptor_09ea2ecaa32a0070, []int{0}
-}
-
-func (m *HttpBody) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_HttpBody.Unmarshal(m, b)
-}
-func (m *HttpBody) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_HttpBody.Marshal(b, m, deterministic)
-}
-func (m *HttpBody) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HttpBody.Merge(m, src)
-}
-func (m *HttpBody) XXX_Size() int {
- return xxx_messageInfo_HttpBody.Size(m)
-}
-func (m *HttpBody) XXX_DiscardUnknown() {
- xxx_messageInfo_HttpBody.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HttpBody proto.InternalMessageInfo
-
-func (m *HttpBody) GetContentType() string {
- if m != nil {
- return m.ContentType
- }
- return ""
-}
-
-func (m *HttpBody) GetData() []byte {
- if m != nil {
- return m.Data
- }
- return nil
-}
-
-func (m *HttpBody) GetExtensions() []*any.Any {
- if m != nil {
- return m.Extensions
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*HttpBody)(nil), "google.api.HttpBody")
-}
-
-func init() {
- proto.RegisterFile("google/api/httpbody.proto", fileDescriptor_09ea2ecaa32a0070)
-}
-
-var fileDescriptor_09ea2ecaa32a0070 = []byte{
- // 229 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x8f, 0x31, 0x4f, 0xc3, 0x30,
- 0x10, 0x85, 0xe5, 0xb6, 0x42, 0x70, 0x2d, 0x0c, 0x16, 0x43, 0x60, 0x0a, 0x4c, 0x99, 0x6c, 0x09,
- 0xd8, 0x3a, 0x35, 0x0b, 0xb0, 0x45, 0x11, 0x13, 0x0b, 0x72, 0x1a, 0xe3, 0x46, 0x2a, 0x77, 0xa7,
- 0xe6, 0x10, 0xf8, 0xef, 0xf0, 0x2b, 0x19, 0x11, 0x69, 0x2c, 0xe8, 0xf6, 0xe4, 0xef, 0x3d, 0xbf,
- 0x77, 0x70, 0x11, 0x88, 0xc2, 0xd6, 0x5b, 0xc7, 0x9d, 0xdd, 0x88, 0x70, 0x43, 0x6d, 0x34, 0xbc,
- 0x23, 0x21, 0x0d, 0x7b, 0x64, 0x1c, 0x77, 0x97, 0xc9, 0x36, 0x90, 0xe6, 0xfd, 0xd5, 0x3a, 0x1c,
- 0x6d, 0xd7, 0x1f, 0x70, 0xfc, 0x20, 0xc2, 0x25, 0xb5, 0x51, 0x5f, 0xc1, 0x62, 0x4d, 0x28, 0x1e,
- 0xe5, 0x45, 0x22, 0xfb, 0x4c, 0xe5, 0xaa, 0x38, 0xa9, 0xe7, 0xe3, 0xdb, 0x53, 0x64, 0xaf, 0x35,
- 0xcc, 0x5a, 0x27, 0x2e, 0x9b, 0xe4, 0xaa, 0x58, 0xd4, 0x83, 0xd6, 0x77, 0x00, 0xfe, 0x53, 0x3c,
- 0xf6, 0x1d, 0x61, 0x9f, 0x4d, 0xf3, 0x69, 0x31, 0xbf, 0x39, 0x37, 0x63, 0x7d, 0xaa, 0x34, 0x2b,
- 0x8c, 0xf5, 0x3f, 0x5f, 0xb9, 0x81, 0xb3, 0x35, 0xbd, 0x99, 0xbf, 0x95, 0xe5, 0x69, 0x1a, 0x52,
- 0xfd, 0x66, 0x2a, 0xf5, 0xbc, 0x1c, 0x61, 0xa0, 0xad, 0xc3, 0x60, 0x68, 0x17, 0x6c, 0xf0, 0x38,
- 0xfc, 0x68, 0xf7, 0xc8, 0x71, 0xd7, 0x1f, 0x1c, 0xbf, 0x4c, 0xe2, 0x5b, 0xa9, 0xaf, 0xc9, 0xec,
- 0x7e, 0x55, 0x3d, 0x36, 0x47, 0x43, 0xe2, 0xf6, 0x27, 0x00, 0x00, 0xff, 0xff, 0x78, 0xb9, 0x16,
- 0x2b, 0x2d, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go
deleted file mode 100644
index 44fb5e630..000000000
--- a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go
+++ /dev/null
@@ -1,284 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google/protobuf/field_mask.proto
-
-package field_mask
-
-import (
- fmt "fmt"
- math "math"
-
- proto "github.com/golang/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-// `FieldMask` represents a set of symbolic field paths, for example:
-//
-// paths: "f.a"
-// paths: "f.b.d"
-//
-// Here `f` represents a field in some root message, `a` and `b`
-// fields in the message found in `f`, and `d` a field found in the
-// message in `f.b`.
-//
-// Field masks are used to specify a subset of fields that should be
-// returned by a get operation or modified by an update operation.
-// Field masks also have a custom JSON encoding (see below).
-//
-// # Field Masks in Projections
-//
-// When used in the context of a projection, a response message or
-// sub-message is filtered by the API to only contain those fields as
-// specified in the mask. For example, if the mask in the previous
-// example is applied to a response message as follows:
-//
-// f {
-// a : 22
-// b {
-// d : 1
-// x : 2
-// }
-// y : 13
-// }
-// z: 8
-//
-// The result will not contain specific values for fields x,y and z
-// (their value will be set to the default, and omitted in proto text
-// output):
-//
-//
-// f {
-// a : 22
-// b {
-// d : 1
-// }
-// }
-//
-// A repeated field is not allowed except at the last position of a
-// paths string.
-//
-// If a FieldMask object is not present in a get operation, the
-// operation applies to all fields (as if a FieldMask of all fields
-// had been specified).
-//
-// Note that a field mask does not necessarily apply to the
-// top-level response message. In case of a REST get operation, the
-// field mask applies directly to the response, but in case of a REST
-// list operation, the mask instead applies to each individual message
-// in the returned resource list. In case of a REST custom method,
-// other definitions may be used. Where the mask applies will be
-// clearly documented together with its declaration in the API. In
-// any case, the effect on the returned resource/resources is required
-// behavior for APIs.
-//
-// # Field Masks in Update Operations
-//
-// A field mask in update operations specifies which fields of the
-// targeted resource are going to be updated. The API is required
-// to only change the values of the fields as specified in the mask
-// and leave the others untouched. If a resource is passed in to
-// describe the updated values, the API ignores the values of all
-// fields not covered by the mask.
-//
-// If a repeated field is specified for an update operation, new values will
-// be appended to the existing repeated field in the target resource. Note that
-// a repeated field is only allowed in the last position of a `paths` string.
-//
-// If a sub-message is specified in the last position of the field mask for an
-// update operation, then new value will be merged into the existing sub-message
-// in the target resource.
-//
-// For example, given the target message:
-//
-// f {
-// b {
-// d: 1
-// x: 2
-// }
-// c: [1]
-// }
-//
-// And an update message:
-//
-// f {
-// b {
-// d: 10
-// }
-// c: [2]
-// }
-//
-// then if the field mask is:
-//
-// paths: ["f.b", "f.c"]
-//
-// then the result will be:
-//
-// f {
-// b {
-// d: 10
-// x: 2
-// }
-// c: [1, 2]
-// }
-//
-// An implementation may provide options to override this default behavior for
-// repeated and message fields.
-//
-// In order to reset a field's value to the default, the field must
-// be in the mask and set to the default value in the provided resource.
-// Hence, in order to reset all fields of a resource, provide a default
-// instance of the resource and set all fields in the mask, or do
-// not provide a mask as described below.
-//
-// If a field mask is not present on update, the operation applies to
-// all fields (as if a field mask of all fields has been specified).
-// Note that in the presence of schema evolution, this may mean that
-// fields the client does not know and has therefore not filled into
-// the request will be reset to their default. If this is unwanted
-// behavior, a specific service may require a client to always specify
-// a field mask, producing an error if not.
-//
-// As with get operations, the location of the resource which
-// describes the updated values in the request message depends on the
-// operation kind. In any case, the effect of the field mask is
-// required to be honored by the API.
-//
-// ## Considerations for HTTP REST
-//
-// The HTTP kind of an update operation which uses a field mask must
-// be set to PATCH instead of PUT in order to satisfy HTTP semantics
-// (PUT must only be used for full updates).
-//
-// # JSON Encoding of Field Masks
-//
-// In JSON, a field mask is encoded as a single string where paths are
-// separated by a comma. Fields name in each path are converted
-// to/from lower-camel naming conventions.
-//
-// As an example, consider the following message declarations:
-//
-// message Profile {
-// User user = 1;
-// Photo photo = 2;
-// }
-// message User {
-// string display_name = 1;
-// string address = 2;
-// }
-//
-// In proto a field mask for `Profile` may look as such:
-//
-// mask {
-// paths: "user.display_name"
-// paths: "photo"
-// }
-//
-// In JSON, the same mask is represented as below:
-//
-// {
-// mask: "user.displayName,photo"
-// }
-//
-// # Field Masks and Oneof Fields
-//
-// Field masks treat fields in oneofs just as regular fields. Consider the
-// following message:
-//
-// message SampleMessage {
-// oneof test_oneof {
-// string name = 4;
-// SubMessage sub_message = 9;
-// }
-// }
-//
-// The field mask can be:
-//
-// mask {
-// paths: "name"
-// }
-//
-// Or:
-//
-// mask {
-// paths: "sub_message"
-// }
-//
-// Note that oneof type names ("test_oneof" in this case) cannot be used in
-// paths.
-//
-// ## Field Mask Verification
-//
-// The implementation of any API method which has a FieldMask type field in the
-// request should verify the included field paths, and return an
-// `INVALID_ARGUMENT` error if any path is unmappable.
-type FieldMask struct {
- // The set of field mask paths.
- Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *FieldMask) Reset() { *m = FieldMask{} }
-func (m *FieldMask) String() string { return proto.CompactTextString(m) }
-func (*FieldMask) ProtoMessage() {}
-func (*FieldMask) Descriptor() ([]byte, []int) {
- return fileDescriptor_5158202634f0da48, []int{0}
-}
-
-func (m *FieldMask) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_FieldMask.Unmarshal(m, b)
-}
-func (m *FieldMask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_FieldMask.Marshal(b, m, deterministic)
-}
-func (m *FieldMask) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FieldMask.Merge(m, src)
-}
-func (m *FieldMask) XXX_Size() int {
- return xxx_messageInfo_FieldMask.Size(m)
-}
-func (m *FieldMask) XXX_DiscardUnknown() {
- xxx_messageInfo_FieldMask.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FieldMask proto.InternalMessageInfo
-
-func (m *FieldMask) GetPaths() []string {
- if m != nil {
- return m.Paths
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask")
-}
-
-func init() {
- proto.RegisterFile("google/protobuf/field_mask.proto", fileDescriptor_5158202634f0da48)
-}
-
-var fileDescriptor_5158202634f0da48 = []byte{
- // 175 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcf, 0xcf, 0x4f,
- 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd,
- 0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x03, 0x8b, 0x09, 0xf1, 0x43, 0x54, 0xe8, 0xc1, 0x54,
- 0x28, 0x29, 0x72, 0x71, 0xba, 0x81, 0x14, 0xf9, 0x26, 0x16, 0x67, 0x0b, 0x89, 0x70, 0xb1, 0x16,
- 0x24, 0x96, 0x64, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x41, 0x38, 0x4e, 0x3d, 0x8c,
- 0x5c, 0xc2, 0xc9, 0xf9, 0xb9, 0x7a, 0x68, 0x5a, 0x9d, 0xf8, 0xe0, 0x1a, 0x03, 0x40, 0x42, 0x01,
- 0x8c, 0x51, 0x96, 0x50, 0x25, 0xe9, 0xf9, 0x39, 0x89, 0x79, 0xe9, 0x7a, 0xf9, 0x45, 0xe9, 0xfa,
- 0xe9, 0xa9, 0x79, 0x60, 0x0d, 0xd8, 0xdc, 0x64, 0x8d, 0x60, 0xfe, 0x60, 0x64, 0x5c, 0xc4, 0xc4,
- 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x48, 0x00, 0x54, 0x83, 0x5e, 0x78, 0x6a,
- 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x48, 0x65, 0x41, 0x6a, 0x71, 0x12, 0x1b, 0xd8, 0x24,
- 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfd, 0xda, 0xb7, 0xa8, 0xed, 0x00, 0x00, 0x00,
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index d1683d1a5..2796665bc 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -9,9 +9,6 @@ cloud.google.com/go/internal/version
# cloud.google.com/go/storage v1.0.0
## explicit
cloud.google.com/go/storage
-# contrib.go.opencensus.io/exporter/ocagent v0.4.12
-## explicit
-contrib.go.opencensus.io/exporter/ocagent
# github.com/Azure/azure-sdk-for-go v29.0.0+incompatible
## explicit
github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network
@@ -19,17 +16,30 @@ github.com/Azure/azure-sdk-for-go/version
# github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78
github.com/Azure/go-ansiterm
github.com/Azure/go-ansiterm/winterm
-# github.com/Azure/go-autorest v11.7.1+incompatible
+# github.com/Azure/go-autorest v14.2.0+incompatible
+github.com/Azure/go-autorest
+# github.com/Azure/go-autorest/autorest v0.11.4
## explicit
github.com/Azure/go-autorest/autorest
-github.com/Azure/go-autorest/autorest/adal
github.com/Azure/go-autorest/autorest/azure
+# github.com/Azure/go-autorest/autorest/adal v0.9.2
+github.com/Azure/go-autorest/autorest/adal
+# github.com/Azure/go-autorest/autorest/azure/auth v0.5.1
+## explicit
github.com/Azure/go-autorest/autorest/azure/auth
+# github.com/Azure/go-autorest/autorest/azure/cli v0.4.0
github.com/Azure/go-autorest/autorest/azure/cli
+# github.com/Azure/go-autorest/autorest/date v0.3.0
github.com/Azure/go-autorest/autorest/date
+# github.com/Azure/go-autorest/autorest/to v0.4.0
+## explicit
github.com/Azure/go-autorest/autorest/to
+# github.com/Azure/go-autorest/autorest/validation v0.3.0
+## explicit
github.com/Azure/go-autorest/autorest/validation
+# github.com/Azure/go-autorest/logger v0.2.0
github.com/Azure/go-autorest/logger
+# github.com/Azure/go-autorest/tracing v0.6.0
github.com/Azure/go-autorest/tracing
# github.com/BurntSushi/toml v0.3.1
github.com/BurntSushi/toml
@@ -118,13 +128,6 @@ github.com/bgentry/speakeasy
# github.com/boltdb/bolt v1.3.1
## explicit
github.com/boltdb/bolt
-# github.com/census-instrumentation/opencensus-proto v0.2.1
-github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1
-github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1
-github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1
-github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1
-github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1
-github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1
# github.com/cespare/xxhash/v2 v2.1.1
github.com/cespare/xxhash/v2
# github.com/checkpoint-restore/go-criu/v4 v4.1.0
@@ -283,7 +286,6 @@ github.com/godbus/dbus/v5
github.com/gogo/protobuf/proto
# github.com/golang/protobuf v1.4.2 => github.com/golang/protobuf v1.3.4
## explicit
-github.com/golang/protobuf/jsonpb
github.com/golang/protobuf/proto
github.com/golang/protobuf/protoc-gen-go
github.com/golang/protobuf/protoc-gen-go/descriptor
@@ -294,7 +296,6 @@ github.com/golang/protobuf/protoc-gen-go/plugin
github.com/golang/protobuf/ptypes
github.com/golang/protobuf/ptypes/any
github.com/golang/protobuf/ptypes/duration
-github.com/golang/protobuf/ptypes/struct
github.com/golang/protobuf/ptypes/timestamp
github.com/golang/protobuf/ptypes/wrappers
# github.com/golang/snappy v0.0.1
@@ -335,15 +336,11 @@ github.com/gorilla/websocket
github.com/grpc-ecosystem/go-grpc-middleware/retry
github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils
github.com/grpc-ecosystem/go-grpc-middleware/util/metautils
-# github.com/grpc-ecosystem/grpc-gateway v1.9.0
-## explicit
-github.com/grpc-ecosystem/grpc-gateway/internal
-github.com/grpc-ecosystem/grpc-gateway/runtime
-github.com/grpc-ecosystem/grpc-gateway/utilities
-# github.com/hashicorp/consul v1.7.1-0.20200213195527-b137060630b4
+# github.com/hashicorp/consul v1.7.7
## explicit
github.com/hashicorp/consul/agent/consul/autopilot
github.com/hashicorp/consul/command/flags
+github.com/hashicorp/consul/ipaddr
github.com/hashicorp/consul/lib
github.com/hashicorp/consul/logging
github.com/hashicorp/consul/version
@@ -568,7 +565,7 @@ github.com/mitchellh/go-homedir
# github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b
## explicit
github.com/mitchellh/go-ps
-# github.com/mitchellh/go-testing-interface v1.0.0
+# github.com/mitchellh/go-testing-interface v1.0.3
## explicit
github.com/mitchellh/go-testing-interface
# github.com/mitchellh/go-wordwrap v1.0.0
@@ -803,10 +800,8 @@ go.opencensus.io/internal
go.opencensus.io/internal/tagencoding
go.opencensus.io/metric/metricdata
go.opencensus.io/metric/metricproducer
-go.opencensus.io/plugin/ocgrpc
go.opencensus.io/plugin/ochttp
go.opencensus.io/plugin/ochttp/propagation/b3
-go.opencensus.io/plugin/ochttp/propagation/tracecontext
go.opencensus.io/resource
go.opencensus.io/stats
go.opencensus.io/stats/internal
@@ -816,7 +811,7 @@ go.opencensus.io/trace
go.opencensus.io/trace/internal
go.opencensus.io/trace/propagation
go.opencensus.io/trace/tracestate
-# golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37
+# golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
## explicit
golang.org/x/crypto/blake2b
golang.org/x/crypto/blowfish
@@ -874,7 +869,6 @@ golang.org/x/oauth2/jwt
# golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a
## explicit
golang.org/x/sync/errgroup
-golang.org/x/sync/semaphore
# golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1
## explicit
golang.org/x/sys/cpu
@@ -947,7 +941,6 @@ google.golang.org/api/internal/gensupport
google.golang.org/api/iterator
google.golang.org/api/option
google.golang.org/api/storage/v1
-google.golang.org/api/support/bundler
google.golang.org/api/transport/http
google.golang.org/api/transport/http/internal/propagation
# google.golang.org/appengine v1.6.1
@@ -967,12 +960,10 @@ google.golang.org/appengine/urlfetch
# google.golang.org/genproto v0.0.0-20200302123026-7795fca6ccb1
## explicit
google.golang.org/genproto/googleapis/api/annotations
-google.golang.org/genproto/googleapis/api/httpbody
google.golang.org/genproto/googleapis/iam/v1
google.golang.org/genproto/googleapis/rpc/code
google.golang.org/genproto/googleapis/rpc/status
google.golang.org/genproto/googleapis/type/expr
-google.golang.org/genproto/protobuf/field_mask
# google.golang.org/grpc v1.27.1
## explicit
google.golang.org/grpc
@@ -1071,7 +1062,7 @@ honnef.co/go/tools/version
# github.com/NYTimes/gziphandler => github.com/NYTimes/gziphandler v1.0.0
# github.com/apparentlymart/go-textseg/v12 => github.com/apparentlymart/go-textseg/v12 v12.0.0
# github.com/godbus/dbus => github.com/godbus/dbus v5.0.1+incompatible
+# github.com/golang/protobuf => github.com/golang/protobuf v1.3.4
# github.com/hashicorp/nomad/api => ./api
# github.com/kr/pty => github.com/kr/pty v1.1.5
# github.com/shirou/gopsutil => github.com/hashicorp/gopsutil v2.18.13-0.20200531184148-5aca383d4f9d+incompatible
-# github.com/golang/protobuf => github.com/golang/protobuf v1.3.4