mirror of
https://github.com/kemko/nomad.git
synced 2026-01-06 02:15:43 +03:00
Merge pull request #1913 from hashicorp/b-json-crasher-1754
Update hcl to fix json parsing panic
This commit is contained in:
1
vendor/github.com/hashicorp/hcl/Makefile
generated
vendored
1
vendor/github.com/hashicorp/hcl/Makefile
generated
vendored
@@ -6,6 +6,7 @@ fmt: generate
|
||||
go fmt ./...
|
||||
|
||||
test: generate
|
||||
go get -t ./...
|
||||
go test $(TEST) $(TESTARGS)
|
||||
|
||||
generate:
|
||||
|
||||
27
vendor/github.com/hashicorp/hcl/README.md
generated
vendored
27
vendor/github.com/hashicorp/hcl/README.md
generated
vendored
@@ -81,9 +81,20 @@ FOO
|
||||
* Boolean values: `true`, `false`
|
||||
|
||||
* Arrays can be made by wrapping it in `[]`. Example:
|
||||
`["foo", "bar", 42]`. Arrays can contain primitives
|
||||
and other arrays, but cannot contain objects. Objects must
|
||||
use the block syntax shown below.
|
||||
`["foo", "bar", 42]`. Arrays can contain primitives,
|
||||
other arrays, and objects. As an alternative, lists
|
||||
of objects can be created with repeated blocks, using
|
||||
this structure:
|
||||
|
||||
```hcl
|
||||
service {
|
||||
key = "value"
|
||||
}
|
||||
|
||||
service {
|
||||
key = "value"
|
||||
}
|
||||
```
|
||||
|
||||
Objects and nested objects are created using the structure shown below:
|
||||
|
||||
@@ -92,6 +103,16 @@ variable "ami" {
|
||||
description = "the AMI to use"
|
||||
}
|
||||
```
|
||||
This would be equivalent to the following json:
|
||||
``` json
|
||||
{
|
||||
"variable": {
|
||||
"ami": {
|
||||
"description": "the AMI to use"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Thanks
|
||||
|
||||
|
||||
68
vendor/github.com/hashicorp/hcl/decoder.go
generated
vendored
68
vendor/github.com/hashicorp/hcl/decoder.go
generated
vendored
@@ -409,7 +409,6 @@ func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value)
|
||||
if result.Kind() == reflect.Interface {
|
||||
result = result.Elem()
|
||||
}
|
||||
|
||||
// Create the slice if it isn't nil
|
||||
resultType := result.Type()
|
||||
resultElemType := resultType.Elem()
|
||||
@@ -443,6 +442,12 @@ func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value)
|
||||
|
||||
// Decode
|
||||
val := reflect.Indirect(reflect.New(resultElemType))
|
||||
|
||||
// if item is an object that was decoded from ambiguous JSON and
|
||||
// flattened, make sure it's expanded if it needs to decode into a
|
||||
// defined structure.
|
||||
item := expandObject(item, val)
|
||||
|
||||
if err := d.decode(fieldName, item, val); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -455,6 +460,57 @@ func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// expandObject detects if an ambiguous JSON object was flattened to a List which
|
||||
// should be decoded into a struct, and expands the ast to properly deocode.
|
||||
func expandObject(node ast.Node, result reflect.Value) ast.Node {
|
||||
item, ok := node.(*ast.ObjectItem)
|
||||
if !ok {
|
||||
return node
|
||||
}
|
||||
|
||||
elemType := result.Type()
|
||||
|
||||
// our target type must be a struct
|
||||
switch elemType.Kind() {
|
||||
case reflect.Ptr:
|
||||
switch elemType.Elem().Kind() {
|
||||
case reflect.Struct:
|
||||
//OK
|
||||
default:
|
||||
return node
|
||||
}
|
||||
case reflect.Struct:
|
||||
//OK
|
||||
default:
|
||||
return node
|
||||
}
|
||||
|
||||
// A list value will have a key and field name. If it had more fields,
|
||||
// it wouldn't have been flattened.
|
||||
if len(item.Keys) != 2 {
|
||||
return node
|
||||
}
|
||||
|
||||
keyToken := item.Keys[0].Token
|
||||
item.Keys = item.Keys[1:]
|
||||
|
||||
// we need to un-flatten the ast enough to decode
|
||||
newNode := &ast.ObjectItem{
|
||||
Keys: []*ast.ObjectKey{
|
||||
&ast.ObjectKey{
|
||||
Token: keyToken,
|
||||
},
|
||||
},
|
||||
Val: &ast.ObjectType{
|
||||
List: &ast.ObjectList{
|
||||
Items: []*ast.ObjectItem{item},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return newNode
|
||||
}
|
||||
|
||||
func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
|
||||
switch n := node.(type) {
|
||||
case *ast.LiteralType:
|
||||
@@ -489,7 +545,7 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
|
||||
// the yacc parser would always ensure top-level elements were arrays. The new
|
||||
// parser does not make the same guarantees, thus we need to convert any
|
||||
// top-level literal elements into a list.
|
||||
if _, ok := node.(*ast.LiteralType); ok {
|
||||
if _, ok := node.(*ast.LiteralType); ok && item != nil {
|
||||
node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
|
||||
}
|
||||
|
||||
@@ -517,6 +573,12 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
|
||||
structType := structVal.Type()
|
||||
for i := 0; i < structType.NumField(); i++ {
|
||||
fieldType := structType.Field(i)
|
||||
tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
|
||||
|
||||
// Ignore fields with tag name "-"
|
||||
if tagParts[0] == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
if fieldType.Anonymous {
|
||||
fieldKind := fieldType.Type.Kind()
|
||||
@@ -531,7 +593,6 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
|
||||
// We have an embedded field. We "squash" the fields down
|
||||
// if specified in the tag.
|
||||
squash := false
|
||||
tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
|
||||
for _, tag := range tagParts[1:] {
|
||||
if tag == "squash" {
|
||||
squash = true
|
||||
@@ -601,6 +662,7 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
|
||||
// match (only object with the field), then we decode it exactly.
|
||||
// If it is a prefix match, then we decode the matches.
|
||||
filter := list.Filter(fieldName)
|
||||
|
||||
prefixMatches := filter.Children()
|
||||
matches := filter.Elem()
|
||||
if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {
|
||||
|
||||
12
vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
generated
vendored
12
vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
generated
vendored
@@ -133,6 +133,12 @@ type ObjectItem struct {
|
||||
}
|
||||
|
||||
func (o *ObjectItem) Pos() token.Pos {
|
||||
// I'm not entirely sure what causes this, but removing this causes
|
||||
// a test failure. We should investigate at some point.
|
||||
if len(o.Keys) == 0 {
|
||||
return token.Pos{}
|
||||
}
|
||||
|
||||
return o.Keys[0].Pos()
|
||||
}
|
||||
|
||||
@@ -150,7 +156,8 @@ func (o *ObjectKey) Pos() token.Pos {
|
||||
type LiteralType struct {
|
||||
Token token.Token
|
||||
|
||||
// associated line comment, only when used in a list
|
||||
// comment types, only used when in a list
|
||||
LeadComment *CommentGroup
|
||||
LineComment *CommentGroup
|
||||
}
|
||||
|
||||
@@ -208,4 +215,5 @@ func (c *CommentGroup) Pos() token.Pos {
|
||||
// GoStringer
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) }
|
||||
func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) }
|
||||
func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) }
|
||||
|
||||
115
vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
generated
vendored
115
vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
generated
vendored
@@ -5,6 +5,7 @@ package parser
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
"github.com/hashicorp/hcl/hcl/scanner"
|
||||
@@ -49,7 +50,7 @@ func (p *Parser) Parse() (*ast.File, error) {
|
||||
scerr = &PosError{Pos: pos, Err: errors.New(msg)}
|
||||
}
|
||||
|
||||
f.Node, err = p.objectList()
|
||||
f.Node, err = p.objectList(false)
|
||||
if scerr != nil {
|
||||
return nil, scerr
|
||||
}
|
||||
@@ -61,11 +62,23 @@ func (p *Parser) Parse() (*ast.File, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (p *Parser) objectList() (*ast.ObjectList, error) {
|
||||
// objectList parses a list of items within an object (generally k/v pairs).
|
||||
// The parameter" obj" tells this whether to we are within an object (braces:
|
||||
// '{', '}') or just at the top level. If we're within an object, we end
|
||||
// at an RBRACE.
|
||||
func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
|
||||
defer un(trace(p, "ParseObjectList"))
|
||||
node := &ast.ObjectList{}
|
||||
|
||||
for {
|
||||
if obj {
|
||||
tok := p.scan()
|
||||
p.unscan()
|
||||
if tok.Type == token.RBRACE {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
n, err := p.objectItem()
|
||||
if err == errEofToken {
|
||||
break // we are finished
|
||||
@@ -78,6 +91,13 @@ func (p *Parser) objectList() (*ast.ObjectList, error) {
|
||||
}
|
||||
|
||||
node.Add(n)
|
||||
|
||||
// object lists can be optionally comma-delimited e.g. when a list of maps
|
||||
// is being expressed, so a comma is allowed here - it's simply consumed
|
||||
tok := p.scan()
|
||||
if tok.Type != token.COMMA {
|
||||
p.unscan()
|
||||
}
|
||||
}
|
||||
return node, nil
|
||||
}
|
||||
@@ -122,6 +142,24 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) {
|
||||
defer un(trace(p, "ParseObjectItem"))
|
||||
|
||||
keys, err := p.objectKey()
|
||||
if len(keys) > 0 && err == errEofToken {
|
||||
// We ignore eof token here since it is an error if we didn't
|
||||
// receive a value (but we did receive a key) for the item.
|
||||
err = nil
|
||||
}
|
||||
if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
|
||||
// This is a strange boolean statement, but what it means is:
|
||||
// We have keys with no value, and we're likely in an object
|
||||
// (since RBrace ends an object). For this, we set err to nil so
|
||||
// we continue and get the error below of having the wrong value
|
||||
// type.
|
||||
err = nil
|
||||
|
||||
// Reset the token type so we don't think it completed fine. See
|
||||
// objectType which uses p.tok.Type to check if we're done with
|
||||
// the object.
|
||||
p.tok.Type = token.EOF
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -147,6 +185,15 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
keyStr := make([]string, 0, len(keys))
|
||||
for _, k := range keys {
|
||||
keyStr = append(keyStr, k.Token.Text)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf(
|
||||
"key '%s' expected start of object ('{') or assignment ('=')",
|
||||
strings.Join(keyStr, " "))
|
||||
}
|
||||
|
||||
// do a look-ahead for line comment
|
||||
@@ -168,7 +215,11 @@ func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
|
||||
tok := p.scan()
|
||||
switch tok.Type {
|
||||
case token.EOF:
|
||||
return nil, errEofToken
|
||||
// It is very important to also return the keys here as well as
|
||||
// the error. This is because we need to be able to tell if we
|
||||
// did parse keys prior to finding the EOF, or if we just found
|
||||
// a bare EOF.
|
||||
return keys, errEofToken
|
||||
case token.ASSIGN:
|
||||
// assignment or object only, but not nested objects. this is not
|
||||
// allowed: `foo bar = {}`
|
||||
@@ -188,15 +239,26 @@ func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
|
||||
|
||||
return keys, nil
|
||||
case token.LBRACE:
|
||||
var err error
|
||||
|
||||
// If we have no keys, then it is a syntax error. i.e. {{}} is not
|
||||
// allowed.
|
||||
if len(keys) == 0 {
|
||||
err = &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
|
||||
}
|
||||
}
|
||||
|
||||
// object
|
||||
return keys, nil
|
||||
return keys, err
|
||||
case token.IDENT, token.STRING:
|
||||
keyCount++
|
||||
keys = append(keys, &ast.ObjectKey{Token: p.tok})
|
||||
case token.ILLEGAL:
|
||||
fmt.Println("illegal")
|
||||
default:
|
||||
return nil, &PosError{
|
||||
return keys, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
|
||||
}
|
||||
@@ -238,7 +300,7 @@ func (p *Parser) objectType() (*ast.ObjectType, error) {
|
||||
Lbrace: p.tok.Pos,
|
||||
}
|
||||
|
||||
l, err := p.objectList()
|
||||
l, err := p.objectList(true)
|
||||
|
||||
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
|
||||
// not a RBRACE, it's an syntax error and we just return it.
|
||||
@@ -246,9 +308,9 @@ func (p *Parser) objectType() (*ast.ObjectType, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If there is no error, we should be at a RBRACE to end the object
|
||||
if p.tok.Type != token.RBRACE {
|
||||
return nil, fmt.Errorf("object expected closing RBRACE got: %s", p.tok.Type)
|
||||
// No error, scan and expect the ending to be a brace
|
||||
if tok := p.scan(); tok.Type != token.RBRACE {
|
||||
return nil, fmt.Errorf("object expected closing RBRACE got: %s", tok.Type)
|
||||
}
|
||||
|
||||
o.List = l
|
||||
@@ -268,27 +330,38 @@ func (p *Parser) listType() (*ast.ListType, error) {
|
||||
needComma := false
|
||||
for {
|
||||
tok := p.scan()
|
||||
switch tok.Type {
|
||||
case token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
|
||||
if needComma {
|
||||
if needComma {
|
||||
switch tok.Type {
|
||||
case token.COMMA, token.RBRACK:
|
||||
default:
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf("unexpected token: %s. Expecting %s", tok.Type, token.COMMA),
|
||||
Err: fmt.Errorf(
|
||||
"error parsing list, expected comma or list end, got: %s",
|
||||
tok.Type),
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
switch tok.Type {
|
||||
case token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
|
||||
node, err := p.literalType()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If there is a lead comment, apply it
|
||||
if p.leadComment != nil {
|
||||
node.LeadComment = p.leadComment
|
||||
p.leadComment = nil
|
||||
}
|
||||
|
||||
l.Add(node)
|
||||
needComma = true
|
||||
case token.COMMA:
|
||||
// get next list item or we are at the end
|
||||
// do a look-ahead for line comment
|
||||
p.scan()
|
||||
if p.lineComment != nil {
|
||||
if p.lineComment != nil && len(l.List) > 0 {
|
||||
lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
|
||||
if ok {
|
||||
lit.LineComment = p.lineComment
|
||||
@@ -300,6 +373,18 @@ func (p *Parser) listType() (*ast.ListType, error) {
|
||||
|
||||
needComma = false
|
||||
continue
|
||||
case token.LBRACE:
|
||||
// Looks like a nested object, so parse it out
|
||||
node, err := p.objectType()
|
||||
if err != nil {
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf(
|
||||
"error while trying to parse object within list: %s", err),
|
||||
}
|
||||
}
|
||||
l.Add(node)
|
||||
needComma = true
|
||||
case token.BOOL:
|
||||
// TODO(arslan) should we support? not supported by HCL yet
|
||||
case token.LBRACK:
|
||||
|
||||
51
vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
generated
vendored
51
vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
generated
vendored
@@ -6,6 +6,7 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
@@ -223,6 +224,11 @@ func (s *Scanner) Scan() token.Token {
|
||||
func (s *Scanner) scanComment(ch rune) {
|
||||
// single line comments
|
||||
if ch == '#' || (ch == '/' && s.peek() != '*') {
|
||||
if ch == '/' && s.peek() != '/' {
|
||||
s.err("expected '/' for comment")
|
||||
return
|
||||
}
|
||||
|
||||
ch = s.next()
|
||||
for ch != '\n' && ch >= 0 && ch != eof {
|
||||
ch = s.next()
|
||||
@@ -376,7 +382,7 @@ func (s *Scanner) scanExponent(ch rune) rune {
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanHeredoc scans a heredoc string.
|
||||
// scanHeredoc scans a heredoc string
|
||||
func (s *Scanner) scanHeredoc() {
|
||||
// Scan the second '<' in example: '<<EOF'
|
||||
if s.next() != '<' {
|
||||
@@ -389,6 +395,12 @@ func (s *Scanner) scanHeredoc() {
|
||||
|
||||
// Scan the identifier
|
||||
ch := s.next()
|
||||
|
||||
// Indented heredoc syntax
|
||||
if ch == '-' {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
for isLetter(ch) || isDigit(ch) {
|
||||
ch = s.next()
|
||||
}
|
||||
@@ -414,6 +426,17 @@ func (s *Scanner) scanHeredoc() {
|
||||
|
||||
// Read the identifier
|
||||
identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen]
|
||||
if len(identBytes) == 0 {
|
||||
s.err("zero-length heredoc anchor")
|
||||
return
|
||||
}
|
||||
|
||||
var identRegexp *regexp.Regexp
|
||||
if identBytes[0] == '-' {
|
||||
identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes[1:]))
|
||||
} else {
|
||||
identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes))
|
||||
}
|
||||
|
||||
// Read the actual string value
|
||||
lineStart := s.srcPos.Offset
|
||||
@@ -422,12 +445,11 @@ func (s *Scanner) scanHeredoc() {
|
||||
|
||||
// Special newline handling.
|
||||
if ch == '\n' {
|
||||
// Math is fast, so we first compare the byte counts to
|
||||
// see if we have a chance of seeing the same identifier. If those
|
||||
// match, then we compare the string values directly.
|
||||
// Math is fast, so we first compare the byte counts to see if we have a chance
|
||||
// of seeing the same identifier - if the length is less than the number of bytes
|
||||
// in the identifier, this cannot be a valid terminator.
|
||||
lineBytesLen := s.srcPos.Offset - s.lastCharLen - lineStart
|
||||
if lineBytesLen == len(identBytes) &&
|
||||
bytes.Equal(identBytes, s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
|
||||
if lineBytesLen >= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -452,7 +474,7 @@ func (s *Scanner) scanString() {
|
||||
// read character after quote
|
||||
ch := s.next()
|
||||
|
||||
if ch == '\n' || ch < 0 || ch == eof {
|
||||
if ch < 0 || ch == eof {
|
||||
s.err("literal not terminated")
|
||||
return
|
||||
}
|
||||
@@ -508,16 +530,27 @@ func (s *Scanner) scanEscape() rune {
|
||||
// scanDigits scans a rune with the given base for n times. For example an
|
||||
// octal notation \184 would yield in scanDigits(ch, 8, 3)
|
||||
func (s *Scanner) scanDigits(ch rune, base, n int) rune {
|
||||
start := n
|
||||
for n > 0 && digitVal(ch) < base {
|
||||
ch = s.next()
|
||||
if ch == eof {
|
||||
// If we see an EOF, we halt any more scanning of digits
|
||||
// immediately.
|
||||
break
|
||||
}
|
||||
|
||||
n--
|
||||
}
|
||||
if n > 0 {
|
||||
s.err("illegal char escape")
|
||||
}
|
||||
|
||||
// we scanned all digits, put the last non digit char back
|
||||
s.unread()
|
||||
if n != start {
|
||||
// we scanned all digits, put the last non digit char back,
|
||||
// only if we read anything at all
|
||||
s.unread()
|
||||
}
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
|
||||
13
vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
generated
vendored
13
vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
generated
vendored
@@ -27,9 +27,6 @@ func Unquote(s string) (t string, err error) {
|
||||
if quote != '"' {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
if contains(s, '\n') {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
|
||||
// Is it trivial? Avoid allocation.
|
||||
if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') {
|
||||
@@ -49,7 +46,7 @@ func Unquote(s string) (t string, err error) {
|
||||
for len(s) > 0 {
|
||||
// If we're starting a '${}' then let it through un-unquoted.
|
||||
// Specifically: we don't unquote any characters within the `${}`
|
||||
// section, except for escaped quotes, which we handle specifically.
|
||||
// section, except for escaped backslashes, which we handle specifically.
|
||||
if s[0] == '$' && len(s) > 1 && s[1] == '{' {
|
||||
buf = append(buf, '$', '{')
|
||||
s = s[2:]
|
||||
@@ -64,10 +61,12 @@ func Unquote(s string) (t string, err error) {
|
||||
|
||||
s = s[size:]
|
||||
|
||||
// We special case escaped double quotes in interpolations, converting
|
||||
// them to straight double quotes.
|
||||
// We special case escaped backslashes in interpolations, converting
|
||||
// them to their unescaped equivalents.
|
||||
if r == '\\' {
|
||||
if q, _ := utf8.DecodeRuneInString(s); q == '"' {
|
||||
q, _ := utf8.DecodeRuneInString(s)
|
||||
switch q {
|
||||
case '\\':
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
63
vendor/github.com/hashicorp/hcl/hcl/token/token.go
generated
vendored
63
vendor/github.com/hashicorp/hcl/hcl/token/token.go
generated
vendored
@@ -142,13 +142,7 @@ func (t Token) Value() interface{} {
|
||||
case IDENT:
|
||||
return t.Text
|
||||
case HEREDOC:
|
||||
// We need to find the end of the marker
|
||||
idx := strings.IndexByte(t.Text, '\n')
|
||||
if idx == -1 {
|
||||
panic("heredoc doesn't contain newline")
|
||||
}
|
||||
|
||||
return string(t.Text[idx+1 : len(t.Text)-idx+1])
|
||||
return unindentHeredoc(t.Text)
|
||||
case STRING:
|
||||
// Determine the Unquote method to use. If it came from JSON,
|
||||
// then we need to use the built-in unquote since we have to
|
||||
@@ -158,6 +152,11 @@ func (t Token) Value() interface{} {
|
||||
f = strconv.Unquote
|
||||
}
|
||||
|
||||
// This case occurs if json null is used
|
||||
if t.Text == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
v, err := f(t.Text)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unquote %s err: %s", t.Text, err))
|
||||
@@ -168,3 +167,53 @@ func (t Token) Value() interface{} {
|
||||
panic(fmt.Sprintf("unimplemented Value for type: %s", t.Type))
|
||||
}
|
||||
}
|
||||
|
||||
// unindentHeredoc returns the string content of a HEREDOC if it is started with <<
|
||||
// and the content of a HEREDOC with the hanging indent removed if it is started with
|
||||
// a <<-, and the terminating line is at least as indented as the least indented line.
|
||||
func unindentHeredoc(heredoc string) string {
|
||||
// We need to find the end of the marker
|
||||
idx := strings.IndexByte(heredoc, '\n')
|
||||
if idx == -1 {
|
||||
panic("heredoc doesn't contain newline")
|
||||
}
|
||||
|
||||
unindent := heredoc[2] == '-'
|
||||
|
||||
// We can optimize if the heredoc isn't marked for indentation
|
||||
if !unindent {
|
||||
return string(heredoc[idx+1 : len(heredoc)-idx+1])
|
||||
}
|
||||
|
||||
// We need to unindent each line based on the indentation level of the marker
|
||||
lines := strings.Split(string(heredoc[idx+1:len(heredoc)-idx+2]), "\n")
|
||||
whitespacePrefix := lines[len(lines)-1]
|
||||
|
||||
isIndented := true
|
||||
for _, v := range lines {
|
||||
if strings.HasPrefix(v, whitespacePrefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
isIndented = false
|
||||
break
|
||||
}
|
||||
|
||||
// If all lines are not at least as indented as the terminating mark, return the
|
||||
// heredoc as is, but trim the leading space from the marker on the final line.
|
||||
if !isIndented {
|
||||
return strings.TrimRight(string(heredoc[idx+1:len(heredoc)-idx+1]), " \t")
|
||||
}
|
||||
|
||||
unindentedLines := make([]string, len(lines))
|
||||
for k, v := range lines {
|
||||
if k == len(lines)-1 {
|
||||
unindentedLines[k] = ""
|
||||
break
|
||||
}
|
||||
|
||||
unindentedLines[k] = strings.TrimPrefix(v, whitespacePrefix)
|
||||
}
|
||||
|
||||
return strings.Join(unindentedLines, "\n")
|
||||
}
|
||||
|
||||
6
vendor/github.com/hashicorp/hcl/json/parser/flatten.go
generated
vendored
6
vendor/github.com/hashicorp/hcl/json/parser/flatten.go
generated
vendored
@@ -48,6 +48,12 @@ func flattenListType(
|
||||
item *ast.ObjectItem,
|
||||
items []*ast.ObjectItem,
|
||||
frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
|
||||
// If the list is empty, keep the original list
|
||||
if len(ot.List) == 0 {
|
||||
items = append(items, item)
|
||||
return items, frontier
|
||||
}
|
||||
|
||||
// All the elements of this object must also be objects!
|
||||
for _, subitem := range ot.List {
|
||||
if _, ok := subitem.(*ast.ObjectType); !ok {
|
||||
|
||||
16
vendor/github.com/hashicorp/hcl/json/parser/parser.go
generated
vendored
16
vendor/github.com/hashicorp/hcl/json/parser/parser.go
generated
vendored
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
hcltoken "github.com/hashicorp/hcl/hcl/token"
|
||||
"github.com/hashicorp/hcl/json/scanner"
|
||||
"github.com/hashicorp/hcl/json/token"
|
||||
)
|
||||
@@ -85,6 +86,7 @@ func (p *Parser) objectList() (*ast.ObjectList, error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return node, nil
|
||||
}
|
||||
|
||||
@@ -103,6 +105,14 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) {
|
||||
|
||||
switch p.tok.Type {
|
||||
case token.COLON:
|
||||
pos := p.tok.Pos
|
||||
o.Assign = hcltoken.Pos{
|
||||
Filename: pos.Filename,
|
||||
Offset: pos.Offset,
|
||||
Line: pos.Line,
|
||||
Column: pos.Column,
|
||||
}
|
||||
|
||||
o.Val, err = p.objectValue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -128,6 +138,12 @@ func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
|
||||
Token: p.tok.HCLToken(),
|
||||
})
|
||||
case token.COLON:
|
||||
// If we have a zero keycount it means that we never got
|
||||
// an object key, i.e. `{ :`. This is a syntax error.
|
||||
if keyCount == 0 {
|
||||
return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
|
||||
}
|
||||
|
||||
// Done
|
||||
return keys, nil
|
||||
case token.ILLEGAL:
|
||||
|
||||
2
vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
generated
vendored
2
vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
generated
vendored
@@ -296,7 +296,7 @@ func (s *Scanner) scanString() {
|
||||
return
|
||||
}
|
||||
|
||||
if ch == '"' && braces == 0 {
|
||||
if ch == '"' {
|
||||
break
|
||||
}
|
||||
|
||||
|
||||
38
vendor/vendor.json
vendored
38
vendor/vendor.json
vendored
@@ -656,42 +656,58 @@
|
||||
"revision": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "5LrCq/ydlbL6pq1cdmuxiw7QV98=",
|
||||
"checksumSHA1": "8OPDk+bKyRGJoKcS4QNw9F7dpE8=",
|
||||
"path": "github.com/hashicorp/hcl",
|
||||
"revision": "d7400db7143f8e869812e50a53acd6c8d92af3b8",
|
||||
"revisionTime": "2016-06-07T00:19:40Z"
|
||||
"revision": "6e968a3fcdcbab092f5307fd0d85479d5af1e4dc",
|
||||
"revisionTime": "2016-11-01T18:00:25Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "XQmjDva9JCGGkIecOgwtBEMCJhU=",
|
||||
"path": "github.com/hashicorp/hcl/hcl/ast",
|
||||
"revision": "1c284ec98f4b398443cbabb0d9197f7f4cc0077c"
|
||||
"revision": "6e968a3fcdcbab092f5307fd0d85479d5af1e4dc",
|
||||
"revisionTime": "2016-11-01T18:00:25Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "croNloscHsjX87X+4/cKOURf1EY=",
|
||||
"path": "github.com/hashicorp/hcl/hcl/parser",
|
||||
"revision": "1c284ec98f4b398443cbabb0d9197f7f4cc0077c"
|
||||
"revision": "6e968a3fcdcbab092f5307fd0d85479d5af1e4dc",
|
||||
"revisionTime": "2016-11-01T18:00:25Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "lgR7PSAZ0RtvAc9OCtCnNsF/x8g=",
|
||||
"path": "github.com/hashicorp/hcl/hcl/scanner",
|
||||
"revision": "1c284ec98f4b398443cbabb0d9197f7f4cc0077c"
|
||||
"revision": "6e968a3fcdcbab092f5307fd0d85479d5af1e4dc",
|
||||
"revisionTime": "2016-11-01T18:00:25Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "JlZmnzqdmFFyb1+2afLyR3BOE/8=",
|
||||
"path": "github.com/hashicorp/hcl/hcl/strconv",
|
||||
"revision": "1c284ec98f4b398443cbabb0d9197f7f4cc0077c"
|
||||
"revision": "6e968a3fcdcbab092f5307fd0d85479d5af1e4dc",
|
||||
"revisionTime": "2016-11-01T18:00:25Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "c6yprzj06ASwCo18TtbbNNBHljA=",
|
||||
"path": "github.com/hashicorp/hcl/hcl/token",
|
||||
"revision": "1c284ec98f4b398443cbabb0d9197f7f4cc0077c"
|
||||
"revision": "6e968a3fcdcbab092f5307fd0d85479d5af1e4dc",
|
||||
"revisionTime": "2016-11-01T18:00:25Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "138aCV5n8n7tkGYMsMVQQnnLq+0=",
|
||||
"path": "github.com/hashicorp/hcl/json/parser",
|
||||
"revision": "1c284ec98f4b398443cbabb0d9197f7f4cc0077c"
|
||||
"revision": "6e968a3fcdcbab092f5307fd0d85479d5af1e4dc",
|
||||
"revisionTime": "2016-11-01T18:00:25Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "YdvFsNOMSWMLnY6fcliWQa0O5Fw=",
|
||||
"path": "github.com/hashicorp/hcl/json/scanner",
|
||||
"revision": "1c284ec98f4b398443cbabb0d9197f7f4cc0077c"
|
||||
"revision": "6e968a3fcdcbab092f5307fd0d85479d5af1e4dc",
|
||||
"revisionTime": "2016-11-01T18:00:25Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "fNlXQCQEnb+B3k5UDL/r15xtSJY=",
|
||||
"path": "github.com/hashicorp/hcl/json/token",
|
||||
"revision": "1c284ec98f4b398443cbabb0d9197f7f4cc0077c"
|
||||
"revision": "6e968a3fcdcbab092f5307fd0d85479d5af1e4dc",
|
||||
"revisionTime": "2016-11-01T18:00:25Z"
|
||||
},
|
||||
{
|
||||
"path": "github.com/hashicorp/logutils",
|
||||
|
||||
Reference in New Issue
Block a user