1
0
Fork 0
mirror of https://github.com/Luzifer/worktime.git synced 2024-10-18 08:04:22 +00:00

Switch to dep from Godeps, update dependencies

Signed-off-by: Knut Ahlers <knut@ahlers.me>
This commit is contained in:
Knut Ahlers 2017-09-22 13:18:15 +02:00
parent 89e32c2d8e
commit 6249d99e0a
Signed by: luzifer
GPG key ID: DC2729FDD34BE99E
1153 changed files with 526367 additions and 45588 deletions

160
Godeps/Godeps.json generated
View file

@ -1,160 +0,0 @@
{
"ImportPath": "github.com/Luzifer/worktime",
"GoVersion": "go1.7",
"GodepVersion": "v74",
"Deps": [
{
"ImportPath": "github.com/Luzifer/go_helpers/str",
"Comment": "v1.4.0",
"Rev": "d76f718bb2d7d043fdf9dfdc01af03f20047432b"
},
{
"ImportPath": "github.com/cnf/structhash",
"Rev": "f5315d4d2328a72285a323bf0542c3fb182634b7"
},
{
"ImportPath": "github.com/fsnotify/fsnotify",
"Comment": "v1.3.1-1-gf12c623",
"Rev": "f12c6236fe7b5cf6bcf30e5935d08cb079d78334"
},
{
"ImportPath": "github.com/hashicorp/hcl",
"Rev": "ef8133da8cda503718a74741312bf50821e6de79"
},
{
"ImportPath": "github.com/hashicorp/hcl/hcl/ast",
"Rev": "ef8133da8cda503718a74741312bf50821e6de79"
},
{
"ImportPath": "github.com/hashicorp/hcl/hcl/parser",
"Rev": "ef8133da8cda503718a74741312bf50821e6de79"
},
{
"ImportPath": "github.com/hashicorp/hcl/hcl/scanner",
"Rev": "ef8133da8cda503718a74741312bf50821e6de79"
},
{
"ImportPath": "github.com/hashicorp/hcl/hcl/strconv",
"Rev": "ef8133da8cda503718a74741312bf50821e6de79"
},
{
"ImportPath": "github.com/hashicorp/hcl/hcl/token",
"Rev": "ef8133da8cda503718a74741312bf50821e6de79"
},
{
"ImportPath": "github.com/hashicorp/hcl/json/parser",
"Rev": "ef8133da8cda503718a74741312bf50821e6de79"
},
{
"ImportPath": "github.com/hashicorp/hcl/json/scanner",
"Rev": "ef8133da8cda503718a74741312bf50821e6de79"
},
{
"ImportPath": "github.com/hashicorp/hcl/json/token",
"Rev": "ef8133da8cda503718a74741312bf50821e6de79"
},
{
"ImportPath": "github.com/inconshreveable/mousetrap",
"Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
},
{
"ImportPath": "github.com/kr/fs",
"Rev": "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
},
{
"ImportPath": "github.com/lancecarlson/couchgo",
"Rev": "abfc05c27bf0b4bcb937963ae37917ded5397310"
},
{
"ImportPath": "github.com/magiconair/properties",
"Comment": "v1.7.0-5-g0723e35",
"Rev": "0723e352fa358f9322c938cc2dadda874e9151a9"
},
{
"ImportPath": "github.com/mitchellh/mapstructure",
"Rev": "ca63d7c062ee3c9f34db231e352b60012b4fd0c1"
},
{
"ImportPath": "github.com/pelletier/go-buffruneio",
"Rev": "df1e16fde7fc330a0ca68167c23bf7ed6ac31d6d"
},
{
"ImportPath": "github.com/pelletier/go-toml",
"Comment": "v0.3.5-16-g45932ad",
"Rev": "45932ad32dfdd20826f5671da37a5f3ce9f26a8d"
},
{
"ImportPath": "github.com/pkg/errors",
"Comment": "v0.7.1-1-ga887431",
"Rev": "a887431f7f6ef7687b556dbf718d9f351d4858a0"
},
{
"ImportPath": "github.com/pkg/sftp",
"Rev": "8197a2e580736b78d704be0fc47b2324c0591a32"
},
{
"ImportPath": "github.com/spf13/afero",
"Rev": "52e4a6cfac46163658bd4f123c49b6ee7dc75f78"
},
{
"ImportPath": "github.com/spf13/afero/mem",
"Rev": "52e4a6cfac46163658bd4f123c49b6ee7dc75f78"
},
{
"ImportPath": "github.com/spf13/afero/sftp",
"Rev": "52e4a6cfac46163658bd4f123c49b6ee7dc75f78"
},
{
"ImportPath": "github.com/spf13/cast",
"Rev": "60e7a69a428e9ac1cf7e0c865fc2fe810d34363e"
},
{
"ImportPath": "github.com/spf13/cobra",
"Rev": "9c28e4bbd74e5c3ed7aacbc552b2cab7cfdfe744"
},
{
"ImportPath": "github.com/spf13/jwalterweatherman",
"Rev": "33c24e77fb80341fe7130ee7c594256ff08ccc46"
},
{
"ImportPath": "github.com/spf13/pflag",
"Rev": "c7e63cf4530bcd3ba943729cee0efeff2ebea63f"
},
{
"ImportPath": "github.com/spf13/viper",
"Rev": "a78f70b5b977efe08e313a9e2341c3f5457abdaf"
},
{
"ImportPath": "golang.org/x/crypto/curve25519",
"Rev": "6ab629be5e31660579425a738ba8870beb5b7404"
},
{
"ImportPath": "golang.org/x/crypto/ed25519",
"Rev": "6ab629be5e31660579425a738ba8870beb5b7404"
},
{
"ImportPath": "golang.org/x/crypto/ed25519/internal/edwards25519",
"Rev": "6ab629be5e31660579425a738ba8870beb5b7404"
},
{
"ImportPath": "golang.org/x/crypto/ssh",
"Rev": "6ab629be5e31660579425a738ba8870beb5b7404"
},
{
"ImportPath": "golang.org/x/sys/unix",
"Rev": "8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9"
},
{
"ImportPath": "golang.org/x/text/transform",
"Rev": "2df9074612f50810d82416d2229398a1e7188c5c"
},
{
"ImportPath": "golang.org/x/text/unicode/norm",
"Rev": "2df9074612f50810d82416d2229398a1e7188c5c"
},
{
"ImportPath": "gopkg.in/yaml.v2",
"Rev": "31c299268d302dd0aa9a0dcf765a3d58971ac83f"
}
]
}

5
Godeps/Readme generated
View file

@ -1,5 +0,0 @@
This directory tree is generated automatically by godep.
Please do not edit.
See https://github.com/tools/godep for more information.

123
Gopkg.lock generated Normal file
View file

@ -0,0 +1,123 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "github.com/Luzifer/go_helpers"
packages = ["str"]
revision = "d76f718bb2d7d043fdf9dfdc01af03f20047432b"
version = "v1.4.0"
[[projects]]
branch = "master"
name = "github.com/cnf/structhash"
packages = ["."]
revision = "7710f1f78fb9c581deeeab57ecfb7978901b36bc"
[[projects]]
name = "github.com/fsnotify/fsnotify"
packages = ["."]
revision = "629574ca2a5df945712d3079857300b5e4da0236"
version = "v1.4.2"
[[projects]]
branch = "master"
name = "github.com/hashicorp/hcl"
packages = [".","hcl/ast","hcl/parser","hcl/scanner","hcl/strconv","hcl/token","json/parser","json/scanner","json/token"]
revision = "68e816d1c783414e79bc65b3994d9ab6b0a722ab"
[[projects]]
name = "github.com/inconshreveable/mousetrap"
packages = ["."]
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
version = "v1.0"
[[projects]]
branch = "master"
name = "github.com/lancecarlson/couchgo"
packages = ["."]
revision = "36277681d9bfa5672b4d8818b0aa41cb42c5987b"
[[projects]]
name = "github.com/magiconair/properties"
packages = ["."]
revision = "be5ece7dd465ab0765a9682137865547526d1dfb"
version = "v1.7.3"
[[projects]]
branch = "master"
name = "github.com/mitchellh/mapstructure"
packages = ["."]
revision = "d0303fe809921458f417bcf828397a65db30a7e4"
[[projects]]
name = "github.com/pelletier/go-buffruneio"
packages = ["."]
revision = "c37440a7cf42ac63b919c752ca73a85067e05992"
version = "v0.2.0"
[[projects]]
name = "github.com/pelletier/go-toml"
packages = ["."]
revision = "5ccdfb18c776b740aecaf085c4d9a2779199c279"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/spf13/afero"
packages = [".","mem"]
revision = "ee1bd8ee15a1306d1f9201acc41ef39cd9f99a1b"
[[projects]]
name = "github.com/spf13/cast"
packages = ["."]
revision = "acbeb36b902d72a7a4c18e8f3241075e7ab763e4"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/spf13/cobra"
packages = ["."]
revision = "b78744579491c1ceeaaa3b40205e56b0591b93a3"
[[projects]]
branch = "master"
name = "github.com/spf13/jwalterweatherman"
packages = ["."]
revision = "12bd96e66386c1960ab0f74ced1362f66f552f7b"
[[projects]]
name = "github.com/spf13/pflag"
packages = ["."]
revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
version = "v1.0.0"
[[projects]]
name = "github.com/spf13/viper"
packages = ["."]
revision = "25b30aa063fc18e48662b86996252eabdcf2f0c7"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = ["unix"]
revision = "2320a9c15898af1b1b24f99700d5c1e957f9d8cf"
[[projects]]
branch = "master"
name = "golang.org/x/text"
packages = ["internal/gen","internal/triegen","internal/ucd","transform","unicode/cldr","unicode/norm"]
revision = "1cbadb444a806fd9430d14ad08967ed91da4fa0a"
[[projects]]
branch = "v2"
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "3e315e0404469ae5f1a628dbed864831e1fa63c020395988bfdef37cf3bb245d"
solver-name = "gps-cdcl"
solver-version = 1

38
Gopkg.toml Normal file
View file

@ -0,0 +1,38 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
[[constraint]]
name = "github.com/Luzifer/go_helpers"
version = "1.4.0"
[[constraint]]
name = "github.com/cnf/structhash"
[[constraint]]
name = "github.com/lancecarlson/couchgo"
[[constraint]]
name = "github.com/spf13/cobra"
[[constraint]]
name = "github.com/spf13/viper"

6
vendor/github.com/Luzifer/go_helpers/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,6 @@
language: go
go:
- 1.5
- 1.6
- tip

21
vendor/github.com/Luzifer/go_helpers/History.md generated vendored Normal file
View file

@ -0,0 +1,21 @@
# 1.4.0 / 2016-05-29
* Added environment helpers
# 1.3.0 / 2016-05-18
* Added AccessLogResponseWriter
# 1.2.0 / 2016-05-16
* Added helper to find binaries in path or directory
# 1.1.0 / 2016-05-06
* Added Haversine helper functions
1.0.0 / 2016-04-23
==================
* First versioned revision for use with gopkg.in

View file

@ -0,0 +1,37 @@
package accessLogger
import (
"fmt"
"net/http"
"strconv"
)
type AccessLogResponseWriter struct {
StatusCode int
Size int
http.ResponseWriter
}
func New(res http.ResponseWriter) *AccessLogResponseWriter {
return &AccessLogResponseWriter{
StatusCode: 200,
Size: 0,
ResponseWriter: res,
}
}
func (a *AccessLogResponseWriter) Write(out []byte) (int, error) {
s, err := a.ResponseWriter.Write(out)
a.Size += s
return s, err
}
func (a *AccessLogResponseWriter) WriteHeader(code int) {
a.StatusCode = code
a.ResponseWriter.WriteHeader(code)
}
func (a *AccessLogResponseWriter) HTTPResponseType() string {
return fmt.Sprintf("%sxx", strconv.FormatInt(int64(a.StatusCode), 10)[0])
}

26
vendor/github.com/Luzifer/go_helpers/env/env.go generated vendored Normal file
View file

@ -0,0 +1,26 @@
package env
import "strings"
// ListToMap converts a list of strings in format KEY=VALUE into a map
func ListToMap(list []string) map[string]string {
out := map[string]string{}
for _, entry := range list {
if len(entry) == 0 || entry[0] == '#' {
continue
}
parts := strings.SplitN(entry, "=", 2)
out[parts[0]] = strings.Trim(parts[1], "\"")
}
return out
}
// MapToList converts a map into a list of strings in format KEY=VALUE
func MapToList(envMap map[string]string) []string {
out := []string{}
for k, v := range envMap {
out = append(out, k+"="+v)
}
return out
}

View file

@ -0,0 +1,13 @@
package env_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"testing"
)
func TestEnv(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Env Suite")
}

55
vendor/github.com/Luzifer/go_helpers/env/env_test.go generated vendored Normal file
View file

@ -0,0 +1,55 @@
package env_test
import (
"sort"
. "github.com/Luzifer/go_helpers/env"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Env", func() {
Context("ListToMap", func() {
var (
list = []string{
"FIRST_KEY=firstvalue",
"SECOND_KEY=secondvalue",
"WEIRD=",
"",
}
emap = map[string]string{
"FIRST_KEY": "firstvalue",
"SECOND_KEY": "secondvalue",
"WEIRD": "",
}
)
It("should convert the list in the expected way", func() {
Expect(ListToMap(list)).To(Equal(emap))
})
})
Context("MapToList", func() {
var (
list = []string{
"FIRST_KEY=firstvalue",
"SECOND_KEY=secondvalue",
"WEIRD=",
}
emap = map[string]string{
"FIRST_KEY": "firstvalue",
"SECOND_KEY": "secondvalue",
"WEIRD": "",
}
)
It("should convert the map in the expected way", func() {
l := MapToList(emap)
sort.Strings(l) // Workaround: The test needs the elements to be in same order
Expect(l).To(Equal(list))
})
})
})

View file

@ -0,0 +1,13 @@
package float_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"testing"
)
func TestFloat(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Float Suite")
}

14
vendor/github.com/Luzifer/go_helpers/float/round.go generated vendored Normal file
View file

@ -0,0 +1,14 @@
package float
import "math"
// Round returns a float rounded according to "Round to nearest, ties away from zero" IEEE floaing point rounding rule
func Round(x float64) float64 {
var absx, y float64
absx = math.Abs(x)
y = math.Floor(absx)
if absx-y >= 0.5 {
y += 1.0
}
return math.Copysign(y, x)
}

View file

@ -0,0 +1,35 @@
package float_test
import (
"math"
. "github.com/Luzifer/go_helpers/float"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Round", func() {
It("should match the example table of IEEE 754 rules", func() {
Expect(Round(11.5)).To(Equal(12.0))
Expect(Round(12.5)).To(Equal(13.0))
Expect(Round(-11.5)).To(Equal(-12.0))
Expect(Round(-12.5)).To(Equal(-13.0))
})
It("should have correct rounding for numbers near 0.5", func() {
Expect(Round(0.499999999997)).To(Equal(0.0))
Expect(Round(-0.499999999997)).To(Equal(0.0))
})
It("should be able to handle +/-Inf", func() {
Expect(Round(math.Inf(1))).To(Equal(math.Inf(1)))
Expect(Round(math.Inf(-1))).To(Equal(math.Inf(-1)))
})
It("should be able to handle NaN", func() {
Expect(math.IsNaN(Round(math.NaN()))).To(Equal(true))
})
})

View file

@ -0,0 +1,21 @@
package position
import "math"
const (
earthRadius = float64(6371)
)
func Haversine(lonFrom float64, latFrom float64, lonTo float64, latTo float64) (distance float64) {
var deltaLat = (latTo - latFrom) * (math.Pi / 180)
var deltaLon = (lonTo - lonFrom) * (math.Pi / 180)
var a = math.Sin(deltaLat/2)*math.Sin(deltaLat/2) +
math.Cos(latFrom*(math.Pi/180))*math.Cos(latTo*(math.Pi/180))*
math.Sin(deltaLon/2)*math.Sin(deltaLon/2)
var c = 2 * math.Atan2(math.Sqrt(a), math.Sqrt(1-a))
distance = earthRadius * c
return
}

View file

@ -0,0 +1,34 @@
package position_test
import (
. "github.com/Luzifer/go_helpers/position"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Haversine", func() {
var testCases = []struct {
SourceLat float64
SourceLon float64
DestLat float64
DestLon float64
Distance float64
}{
{50.066389, -5.714722, 58.643889, -3.070000, 968.8535441168448},
{50.063995, -5.609464, 53.553027, 9.993782, 1137.894906816002},
{53.553027, 9.993782, 53.554528, 9.991357, 0.23133816528015647},
{50, 9, 51, 9, 111.19492664455873},
{0, 9, 0, 10, 111.19492664455873},
{1, 0, -1, 0, 222.38985328911747},
}
It("should have the documented distance", func() {
for i := range testCases {
tc := testCases[i]
Expect(Haversine(tc.SourceLon, tc.SourceLat, tc.DestLon, tc.DestLat)).To(Equal(tc.Distance))
}
})
})

View file

@ -0,0 +1,13 @@
package position_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"testing"
)
func TestPosition(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Position Suite")
}

52
vendor/github.com/Luzifer/go_helpers/str/slice_test.go generated vendored Normal file
View file

@ -0,0 +1,52 @@
package str_test
import (
. "github.com/Luzifer/go_helpers/str"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Slice", func() {
Context("AppendIfMissing", func() {
var sl = []string{
"test1",
"test2",
"test3",
}
It("should not append existing elements", func() {
Expect(len(AppendIfMissing(sl, "test1"))).To(Equal(3))
Expect(len(AppendIfMissing(sl, "test2"))).To(Equal(3))
Expect(len(AppendIfMissing(sl, "test3"))).To(Equal(3))
})
It("should append not existing elements", func() {
Expect(len(AppendIfMissing(sl, "test4"))).To(Equal(4))
Expect(len(AppendIfMissing(sl, "test5"))).To(Equal(4))
Expect(len(AppendIfMissing(sl, "test6"))).To(Equal(4))
})
})
Context("StringInSlice", func() {
var sl = []string{
"test1",
"test2",
"test3",
}
It("should find elements of slice", func() {
Expect(StringInSlice("test1", sl)).To(Equal(true))
Expect(StringInSlice("test2", sl)).To(Equal(true))
Expect(StringInSlice("test3", sl)).To(Equal(true))
})
It("should not find elements not in slice", func() {
Expect(StringInSlice("test4", sl)).To(Equal(false))
Expect(StringInSlice("test5", sl)).To(Equal(false))
Expect(StringInSlice("test6", sl)).To(Equal(false))
})
})
})

View file

@ -0,0 +1,13 @@
package str_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"testing"
)
func TestStr(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Str Suite")
}

54
vendor/github.com/Luzifer/go_helpers/which/which.go generated vendored Normal file
View file

@ -0,0 +1,54 @@
package which
import (
"errors"
"os"
"path"
"strings"
)
// Common named errors to match in programs using this library
var (
ErrBinaryNotFound = errors.New("Requested binary was not found")
ErrNoSearchSpecified = errors.New("You need to specify a binary to search")
)
// FindInPath searches the specified binary in directories listed in $PATH and returns first match
func FindInPath(binary string) (string, error) {
pathEnv := os.Getenv("PATH")
if len(pathEnv) == 0 {
return "", errors.New("Found empty $PATH, not able to search $PATH")
}
for _, part := range strings.Split(pathEnv, ":") {
if len(part) == 0 {
continue
}
if found, err := FindInDirectory(binary, part); err != nil {
return "", err
} else if found {
return path.Join(part, binary), nil
}
}
return "", ErrBinaryNotFound
}
// FindInDirectory checks whether the specified file is present in the directory
func FindInDirectory(binary, directory string) (bool, error) {
if len(binary) == 0 {
return false, ErrNoSearchSpecified
}
_, err := os.Stat(path.Join(directory, binary))
switch {
case err == nil:
return true, nil
case os.IsNotExist(err):
return false, nil
default:
return false, err
}
}

View file

@ -0,0 +1,13 @@
package which_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"testing"
)
func TestWhich(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Which Suite")
}

View file

@ -0,0 +1,63 @@
package which_test
import (
. "github.com/Luzifer/go_helpers/which"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Which", func() {
var (
result string
err error
found bool
)
Context("With a file available on linux systems", func() {
BeforeEach(func() {
found, err = FindInDirectory("bash", "/bin")
})
It("should not have errored", func() {
Expect(err).NotTo(HaveOccurred())
})
It("should have found the binary at /bin/bash", func() {
Expect(found).To(BeTrue())
})
})
Context("Searching bash on the system", func() {
BeforeEach(func() {
result, err = FindInPath("bash")
})
It("should not have errored", func() {
Expect(err).NotTo(HaveOccurred())
})
It("should have a result", func() {
Expect(len(result)).NotTo(Equal(0))
})
})
Context("Searching a non existent file", func() {
BeforeEach(func() {
result, err = FindInPath("dfqoiwurgtqi3uegrds")
})
It("should have errored", func() {
Expect(err).To(Equal(ErrBinaryNotFound))
})
})
Context("Searching an empty file", func() {
BeforeEach(func() {
result, err = FindInPath("")
})
It("should have errored", func() {
Expect(err).To(Equal(ErrNoSearchSpecified))
})
})
})

1
vendor/github.com/cnf/structhash/.hound.yml generated vendored Normal file
View file

@ -0,0 +1 @@
# added hound style checking

View file

@ -88,8 +88,10 @@ func writeValue(buf *bytes.Buffer, val reflect.Value, fltr structFieldFilter) {
buf.WriteByte('"') buf.WriteByte('"')
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
buf.WriteString(strconv.FormatInt(val.Int(), 10)) buf.WriteString(strconv.FormatInt(val.Int(), 10))
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
buf.WriteString(strconv.FormatUint(val.Uint(), 10)) buf.WriteString(strconv.FormatUint(val.Uint(), 10))
case reflect.Float32, reflect.Float64:
buf.WriteString(strconv.FormatFloat(val.Float(), 'E', -1, 64))
case reflect.Bool: case reflect.Bool:
if val.Bool() { if val.Bool() {
buf.WriteByte('t') buf.WriteByte('t')
@ -164,6 +166,8 @@ func writeValue(buf *bytes.Buffer, val reflect.Value, fltr structFieldFilter) {
writeValue(buf, items[i].value, fltr) writeValue(buf, items[i].value, fltr)
} }
buf.WriteByte('}') buf.WriteByte('}')
case reflect.Interface:
writeValue(buf, reflect.ValueOf(val.Interface()), fltr)
default: default:
buf.WriteString(val.String()) buf.WriteString(val.String())
} }
@ -171,7 +175,7 @@ func writeValue(buf *bytes.Buffer, val reflect.Value, fltr structFieldFilter) {
func formatValue(val reflect.Value, fltr structFieldFilter) string { func formatValue(val reflect.Value, fltr structFieldFilter) string {
if val.Kind() == reflect.String { if val.Kind() == reflect.String {
return "\"" + val.Interface().(string) + "\"" return "\"" + val.String() + "\""
} }
var buf bytes.Buffer var buf bytes.Buffer

View file

@ -0,0 +1,95 @@
package structhash
import (
"encoding/json"
"testing"
)
type BenchData struct {
Bool bool
String string
Int int
Uint uint
Map map[string]*BenchData
Slice []*BenchData
Struct *BenchData
}
type BenchTags struct {
Bool bool `json:"f1" hash:"name:f1"`
String string `json:"f2" hash:"name:f2"`
Int int `json:"f3" hash:"name:f3"`
Uint uint `json:"f4" hash:"name:f4"`
}
func benchDataSimple() *BenchData {
return &BenchData{true, "simple", -123, 321, nil, nil, nil}
}
func benchDataFull() *BenchData {
foo := benchDataSimple()
bar := benchDataSimple()
m := make(map[string]*BenchData)
m["foo"] = foo
m["bar"] = bar
s := []*BenchData{
foo,
bar,
}
return &BenchData{true, "hello", -123, 321, m, s, foo}
}
func benchDataTags() *BenchTags {
return &BenchTags{true, "tags", -123, 321}
}
func BenchmarkSimpleJSON(b *testing.B) {
s := benchDataSimple()
for i := 0; i < b.N; i++ {
json.Marshal(s)
}
}
func BenchmarkSimpleDump(b *testing.B) {
s := benchDataSimple()
for i := 0; i < b.N; i++ {
Dump(s, 1)
}
}
func BenchmarkFullJSON(b *testing.B) {
s := benchDataFull()
for i := 0; i < b.N; i++ {
json.Marshal(s)
}
}
func BenchmarkFullDump(b *testing.B) {
s := benchDataFull()
for i := 0; i < b.N; i++ {
Dump(s, 1)
}
}
func BenchmarkTagsJSON(b *testing.B) {
s := benchDataTags()
for i := 0; i < b.N; i++ {
json.Marshal(s)
}
}
func BenchmarkTagsDump(b *testing.B) {
s := benchDataTags()
for i := 0; i < b.N; i++ {
Dump(s, 1)
}
}

View file

@ -0,0 +1,138 @@
package structhash
import (
"crypto/md5"
"crypto/sha1"
"fmt"
)
func ExampleHash() {
type Person struct {
Name string
Age int
Emails []string
Extra map[string]string
Spouse *Person
}
bill := &Person{
Name: "Bill",
Age: 24,
Emails: []string{"bob@foo.org", "bob@bar.org"},
Extra: map[string]string{
"facebook": "Bob42",
},
}
bob := &Person{
Name: "Bob",
Age: 42,
Emails: []string{"bob@foo.org", "bob@bar.org"},
Extra: map[string]string{
"facebook": "Bob42",
},
Spouse: bill,
}
hash, err := Hash(bob, 1)
if err != nil {
panic(err)
}
fmt.Printf("%s", hash)
// Output:
// v1_6a50d73f3bd0b9ebd001a0b610f387f0
}
func ExampleHash_tags() {
type Person struct {
Ignored string `hash:"-"`
NewName string `hash:"name:OldName version:1"`
Age int `hash:"version:1"`
Emails []string `hash:"version:1"`
Extra map[string]string `hash:"version:1 lastversion:2"`
Spouse *Person `hash:"version:2"`
}
bill := &Person{
NewName: "Bill",
Age: 24,
Emails: []string{"bob@foo.org", "bob@bar.org"},
Extra: map[string]string{
"facebook": "Bob42",
},
}
bob := &Person{
NewName: "Bob",
Age: 42,
Emails: []string{"bob@foo.org", "bob@bar.org"},
Extra: map[string]string{
"facebook": "Bob42",
},
Spouse: bill,
}
hashV1, err := Hash(bob, 1)
if err != nil {
panic(err)
}
hashV2, err := Hash(bob, 2)
if err != nil {
panic(err)
}
hashV3, err := Hash(bob, 3)
if err != nil {
panic(err)
}
fmt.Printf("%s\n", hashV1)
fmt.Printf("%s\n", hashV2)
fmt.Printf("%s\n", hashV3)
// Output:
// v1_45d8a54c5f5fd287f197b26d128882cd
// v2_babd7618f29036f5564816bee6c8a037
// v3_012b06239f942549772c9139d66c121e
}
func ExampleDump() {
type Person struct {
Name string
Age int
Emails []string
Extra map[string]string
Spouse *Person
}
bill := &Person{
Name: "Bill",
Age: 24,
Emails: []string{"bob@foo.org", "bob@bar.org"},
Extra: map[string]string{
"facebook": "Bob42",
},
}
bob := &Person{
Name: "Bob",
Age: 42,
Emails: []string{"bob@foo.org", "bob@bar.org"},
Extra: map[string]string{
"facebook": "Bob42",
},
Spouse: bill,
}
fmt.Printf("md5: %x\n", md5.Sum(Dump(bob, 1)))
fmt.Printf("sha1: %x\n", sha1.Sum(Dump(bob, 1)))
// Output:
// md5: 6a50d73f3bd0b9ebd001a0b610f387f0
// sha1: c45f097a37366eaaf6ffbc7357c2272cd8fb64f6
}
func ExampleVersion() {
// A hash string gotten from Hash(). Returns the version as an int.
i := Version("v1_55743877f3ffd5fc834e97bc43a6e7bd")
fmt.Printf("%d", i)
// Output:
// 1
}
func ExampleVersion_errors() {
// A hash string gotten from Hash(). Returns -1 on error.
i := Version("va_55743877f3ffd5fc834e97bc43a6e7bd")
fmt.Printf("%d", i)
// Output:
// -1
}

247
vendor/github.com/cnf/structhash/structhash_test.go generated vendored Normal file
View file

@ -0,0 +1,247 @@
package structhash
import (
"fmt"
"testing"
)
type First struct {
Bool bool `version:"1"`
String string `version:"2"`
Int int `version:"1" lastversion:"1"`
Float float64 `version:"1"`
Struct *Second `version:"1"`
Uint uint `version:"1"`
}
type Second struct {
Map map[string]string `version:"1"`
Slice []int `version:"1"`
}
type Tags1 struct {
Int int `hash:"-"`
Str string `hash:"name:Foo version:1 lastversion:2"`
Bar string `hash:"version:1"`
}
type Tags2 struct {
Foo string
Bar string
}
type Tags3 struct {
Bar string
}
type Nils struct {
Str *string
Int *int
Bool *bool
Map map[string]string
Slice []string
}
type unexportedTags struct {
foo string
bar string
aMap map[string]string
}
type interfaceStruct struct {
Name string
Interface1 interface{}
InterfaceIgnore interface{} `hash:"-"`
}
func dataSetup() *First {
tmpmap := make(map[string]string)
tmpmap["foo"] = "bar"
tmpmap["baz"] = "go"
tmpslice := make([]int, 3)
tmpslice[0] = 0
tmpslice[1] = 1
tmpslice[2] = 2
return &First{
Bool: true,
String: "test",
Int: 123456789,
Float: 65.3458,
Struct: &Second{
Map: tmpmap,
Slice: tmpslice,
},
Uint: 1,
}
}
func TestHash(t *testing.T) {
v1Hash := "v1_e8e67581aee36d7237603381a9cbd9fc"
v2Hash := "v2_5e51490d7c24c4b7a9e63c04f55734eb"
data := dataSetup()
v1, err := Hash(data, 1)
if err != nil {
t.Error(err)
}
// fmt.Println(string(Dump(data, 1)))
if v1 != v1Hash {
t.Errorf("%s is not %s", v1, v1Hash)
}
v2, err := Hash(data, 2)
if err != nil {
t.Error(err)
}
// fmt.Println(string(Dump(data, 2)))
if v2 != v2Hash {
t.Errorf("%s is not %s", v2, v2Hash)
}
v1md5 := fmt.Sprintf("v1_%x", Md5(data, 1))
if v1md5 != v1Hash {
t.Errorf("%s is not %s", v1md5, v1Hash[3:])
}
v2md5 := fmt.Sprintf("v2_%x", Md5(data, 2))
if v2md5 != v2Hash {
t.Errorf("%s is not %s", v2md5, v2Hash[3:])
}
}
func TestTags(t *testing.T) {
t1 := Tags1{11, "foo", "bar"}
t1x := Tags1{22, "foo", "bar"}
t2 := Tags2{"foo", "bar"}
t3 := Tags3{"bar"}
t1_dump := string(Dump(t1, 1))
t1x_dump := string(Dump(t1x, 1))
if t1_dump != t1x_dump {
t.Errorf("%s is not %s", t1_dump, t1x_dump)
}
t2_dump := string(Dump(t2, 1))
if t1_dump != t2_dump {
t.Errorf("%s is not %s", t1_dump, t2_dump)
}
t1v3_dump := string(Dump(t1, 3))
t3v3_dump := string(Dump(t3, 3))
if t1v3_dump != t3v3_dump {
t.Errorf("%s is not %s", t1v3_dump, t3v3_dump)
}
}
func TestNils(t *testing.T) {
s1 := Nils{
Str: nil,
Int: nil,
Bool: nil,
Map: nil,
Slice: nil,
}
s2 := Nils{
Str: new(string),
Int: new(int),
Bool: new(bool),
Map: make(map[string]string),
Slice: make([]string, 0),
}
s1_dump := string(Dump(s1, 1))
s2_dump := string(Dump(s2, 1))
if s1_dump != s2_dump {
t.Errorf("%s is not %s", s1_dump, s2_dump)
}
}
func TestUnexportedFields(t *testing.T) {
v1Hash := "v1_750efb7c919caf87f2ab0d119650c87d"
data := unexportedTags{
foo: "foo",
bar: "bar",
aMap: map[string]string{
"key1": "val",
},
}
v1, err := Hash(data, 1)
if err != nil {
t.Error(err)
}
if v1 != v1Hash {
t.Errorf("%s is not %s", v1, v1Hash)
}
v1md5 := fmt.Sprintf("v1_%x", Md5(data, 1))
if v1md5 != v1Hash {
t.Errorf("%s is not %s", v1md5, v1Hash[3:])
}
}
func TestInterfaceField(t *testing.T) {
a := interfaceStruct{
Name: "name",
Interface1: "a",
InterfaceIgnore: "b",
}
b := interfaceStruct{
Name: "name",
Interface1: "b",
InterfaceIgnore: "b",
}
c := interfaceStruct{
Name: "name",
Interface1: "b",
InterfaceIgnore: "c",
}
ha, err := Hash(a, 1)
if err != nil {
t.Error(err)
}
hb, err := Hash(b, 1)
if err != nil {
t.Error(err)
}
hc, err := Hash(c, 1)
if err != nil {
t.Error(err)
}
if ha == hb {
t.Errorf("%s equals %s", ha, hb)
}
if hb != hc {
t.Errorf("%s is not %s", hb, hc)
}
b.Interface1 = map[string]string{"key": "value"}
c.Interface1 = map[string]string{"key": "value"}
hb, err = Hash(b, 1)
if err != nil {
t.Error(err)
}
hc, err = Hash(c, 1)
if err != nil {
t.Error(err)
}
if hb != hc {
t.Errorf("%s is not %s", hb, hc)
}
c.Interface1 = map[string]string{"key1": "value1"}
hc, err = Hash(c, 1)
if err != nil {
t.Error(err)
}
if hb == hc {
t.Errorf("%s equals %s", hb, hc)
}
}

5
vendor/github.com/fsnotify/fsnotify/.editorconfig generated vendored Normal file
View file

@ -0,0 +1,5 @@
root = true
[*]
indent_style = tab
indent_size = 4

View file

@ -0,0 +1,11 @@
Before reporting an issue, please ensure you are using the latest release of fsnotify.
### Which operating system (GOOS) and version are you using?
Linux: lsb_release -a
macOS: sw_vers
Windows: systeminfo | findstr /B /C:OS
### Please describe the issue that occurred.
### Are you able to reproduce the issue? Please provide steps to reproduce and a code sample if possible.

View file

@ -0,0 +1,8 @@
#### What does this pull request do?
#### Where should the reviewer start?
#### How should this be manually tested?

View file

@ -2,7 +2,6 @@ sudo: false
language: go language: go
go: go:
- 1.5.4
- 1.6.3 - 1.6.3
- tip - tip

View file

@ -26,12 +26,14 @@ Kelvin Fo <vmirage@gmail.com>
Ken-ichirou MATSUZAWA <chamas@h4.dion.ne.jp> Ken-ichirou MATSUZAWA <chamas@h4.dion.ne.jp>
Matt Layher <mdlayher@gmail.com> Matt Layher <mdlayher@gmail.com>
Nathan Youngman <git@nathany.com> Nathan Youngman <git@nathany.com>
Patrick <patrick@dropbox.com>
Paul Hammond <paul@paulhammond.org> Paul Hammond <paul@paulhammond.org>
Pawel Knap <pawelknap88@gmail.com> Pawel Knap <pawelknap88@gmail.com>
Pieter Droogendijk <pieter@binky.org.uk> Pieter Droogendijk <pieter@binky.org.uk>
Pursuit92 <JoshChase@techpursuit.net> Pursuit92 <JoshChase@techpursuit.net>
Riku Voipio <riku.voipio@linaro.org> Riku Voipio <riku.voipio@linaro.org>
Rob Figueiredo <robfig@gmail.com> Rob Figueiredo <robfig@gmail.com>
Slawek Ligus <root@ooz.ie>
Soge Zhang <zhssoge@gmail.com> Soge Zhang <zhssoge@gmail.com>
Tiffany Jernigan <tiffany.jernigan@intel.com> Tiffany Jernigan <tiffany.jernigan@intel.com>
Tilak Sharma <tilaks@google.com> Tilak Sharma <tilaks@google.com>

View file

@ -1,8 +1,20 @@
# Changelog # Changelog
## v1.4.2 / 2016-10-10
* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
## v1.4.1 / 2016-10-04
* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
## v1.4.0 / 2016-10-01
* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
## v1.3.1 / 2016-06-28 ## v1.3.1 / 2016-06-28
* windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) * Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
## v1.3.0 / 2016-04-19 ## v1.3.0 / 2016-04-19

View file

@ -40,7 +40,7 @@ Contribute upstream:
3. Push to the branch (`git push fork my-new-feature`) 3. Push to the branch (`git push fork my-new-feature`)
4. Create a new Pull Request on GitHub 4. Create a new Pull Request on GitHub
This workflow is [thoroughly explained by Katrina Owen](https://blog.splice.com/contributing-open-source-git-repositories-go/). This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
### Testing ### Testing

View file

@ -1,6 +1,6 @@
# File system notifications for Go # File system notifications for Go
[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) [![Coverage](http://gocover.io/_badge/github.com/fsnotify/fsnotify)](http://gocover.io/github.com/fsnotify/fsnotify) [![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running: fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:

42
vendor/github.com/fsnotify/fsnotify/example_test.go generated vendored Normal file
View file

@ -0,0 +1,42 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !plan9
package fsnotify_test
import (
"log"
"github.com/fsnotify/fsnotify"
)
func ExampleNewWatcher() {
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Fatal(err)
}
defer watcher.Close()
done := make(chan bool)
go func() {
for {
select {
case event := <-watcher.Events:
log.Println("event:", event)
if event.Op&fsnotify.Write == fsnotify.Write {
log.Println("modified file:", event.Name)
}
case err := <-watcher.Errors:
log.Println("error:", err)
}
}
}()
err = watcher.Add("/tmp/foo")
if err != nil {
log.Fatal(err)
}
<-done
}

View file

@ -30,33 +30,33 @@ const (
Chmod Chmod
) )
// String returns a string representation of the event in the form func (op Op) String() string {
// "file: REMOVE|WRITE|..."
func (e Event) String() string {
// Use a buffer for efficient string concatenation // Use a buffer for efficient string concatenation
var buffer bytes.Buffer var buffer bytes.Buffer
if e.Op&Create == Create { if op&Create == Create {
buffer.WriteString("|CREATE") buffer.WriteString("|CREATE")
} }
if e.Op&Remove == Remove { if op&Remove == Remove {
buffer.WriteString("|REMOVE") buffer.WriteString("|REMOVE")
} }
if e.Op&Write == Write { if op&Write == Write {
buffer.WriteString("|WRITE") buffer.WriteString("|WRITE")
} }
if e.Op&Rename == Rename { if op&Rename == Rename {
buffer.WriteString("|RENAME") buffer.WriteString("|RENAME")
} }
if e.Op&Chmod == Chmod { if op&Chmod == Chmod {
buffer.WriteString("|CHMOD") buffer.WriteString("|CHMOD")
} }
// If buffer remains empty, return no event names
if buffer.Len() == 0 { if buffer.Len() == 0 {
return fmt.Sprintf("%q: ", e.Name) return ""
}
return buffer.String()[1:] // Strip leading pipe
} }
// Return a list of event names, with leading pipe character stripped // String returns a string representation of the event in the form
return fmt.Sprintf("%q: %s", e.Name, buffer.String()[1:]) // "file: REMOVE|WRITE|..."
func (e Event) String() string {
return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
} }

40
vendor/github.com/fsnotify/fsnotify/fsnotify_test.go generated vendored Normal file
View file

@ -0,0 +1,40 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !plan9
package fsnotify
import "testing"
func TestEventStringWithValue(t *testing.T) {
for opMask, expectedString := range map[Op]string{
Chmod | Create: `"/usr/someFile": CREATE|CHMOD`,
Rename: `"/usr/someFile": RENAME`,
Remove: `"/usr/someFile": REMOVE`,
Write | Chmod: `"/usr/someFile": WRITE|CHMOD`,
} {
event := Event{Name: "/usr/someFile", Op: opMask}
if event.String() != expectedString {
t.Fatalf("Expected %s, got: %v", expectedString, event.String())
}
}
}
func TestEventOpStringWithValue(t *testing.T) {
expectedOpString := "WRITE|CHMOD"
event := Event{Name: "someFile", Op: Write | Chmod}
if event.Op.String() != expectedOpString {
t.Fatalf("Expected %s, got: %v", expectedOpString, event.Op.String())
}
}
func TestEventOpStringWithNoValue(t *testing.T) {
expectedOpString := ""
event := Event{Name: "testFile", Op: 0}
if event.Op.String() != expectedOpString {
t.Fatalf("Expected %s, got: %v", expectedOpString, event.Op.String())
}
}

View file

@ -36,7 +36,7 @@ type Watcher struct {
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. // NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) { func NewWatcher() (*Watcher, error) {
// Create inotify fd // Create inotify fd
fd, errno := unix.InotifyInit() fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
if fd == -1 { if fd == -1 {
return nil, errno return nil, errno
} }

View file

@ -0,0 +1,229 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
package fsnotify
import (
"testing"
"time"
"golang.org/x/sys/unix"
)
type testFd [2]int
func makeTestFd(t *testing.T) testFd {
var tfd testFd
errno := unix.Pipe(tfd[:])
if errno != nil {
t.Fatalf("Failed to create pipe: %v", errno)
}
return tfd
}
func (tfd testFd) fd() int {
return tfd[0]
}
func (tfd testFd) closeWrite(t *testing.T) {
errno := unix.Close(tfd[1])
if errno != nil {
t.Fatalf("Failed to close write end of pipe: %v", errno)
}
}
func (tfd testFd) put(t *testing.T) {
buf := make([]byte, 10)
_, errno := unix.Write(tfd[1], buf)
if errno != nil {
t.Fatalf("Failed to write to pipe: %v", errno)
}
}
func (tfd testFd) get(t *testing.T) {
buf := make([]byte, 10)
_, errno := unix.Read(tfd[0], buf)
if errno != nil {
t.Fatalf("Failed to read from pipe: %v", errno)
}
}
func (tfd testFd) close() {
unix.Close(tfd[1])
unix.Close(tfd[0])
}
func makePoller(t *testing.T) (testFd, *fdPoller) {
tfd := makeTestFd(t)
poller, err := newFdPoller(tfd.fd())
if err != nil {
t.Fatalf("Failed to create poller: %v", err)
}
return tfd, poller
}
func TestPollerWithBadFd(t *testing.T) {
_, err := newFdPoller(-1)
if err != unix.EBADF {
t.Fatalf("Expected EBADF, got: %v", err)
}
}
func TestPollerWithData(t *testing.T) {
tfd, poller := makePoller(t)
defer tfd.close()
defer poller.close()
tfd.put(t)
ok, err := poller.wait()
if err != nil {
t.Fatalf("poller failed: %v", err)
}
if !ok {
t.Fatalf("expected poller to return true")
}
tfd.get(t)
}
func TestPollerWithWakeup(t *testing.T) {
tfd, poller := makePoller(t)
defer tfd.close()
defer poller.close()
err := poller.wake()
if err != nil {
t.Fatalf("wake failed: %v", err)
}
ok, err := poller.wait()
if err != nil {
t.Fatalf("poller failed: %v", err)
}
if ok {
t.Fatalf("expected poller to return false")
}
}
func TestPollerWithClose(t *testing.T) {
tfd, poller := makePoller(t)
defer tfd.close()
defer poller.close()
tfd.closeWrite(t)
ok, err := poller.wait()
if err != nil {
t.Fatalf("poller failed: %v", err)
}
if !ok {
t.Fatalf("expected poller to return true")
}
}
func TestPollerWithWakeupAndData(t *testing.T) {
tfd, poller := makePoller(t)
defer tfd.close()
defer poller.close()
tfd.put(t)
err := poller.wake()
if err != nil {
t.Fatalf("wake failed: %v", err)
}
// both data and wakeup
ok, err := poller.wait()
if err != nil {
t.Fatalf("poller failed: %v", err)
}
if !ok {
t.Fatalf("expected poller to return true")
}
// data is still in the buffer, wakeup is cleared
ok, err = poller.wait()
if err != nil {
t.Fatalf("poller failed: %v", err)
}
if !ok {
t.Fatalf("expected poller to return true")
}
tfd.get(t)
// data is gone, only wakeup now
err = poller.wake()
if err != nil {
t.Fatalf("wake failed: %v", err)
}
ok, err = poller.wait()
if err != nil {
t.Fatalf("poller failed: %v", err)
}
if ok {
t.Fatalf("expected poller to return false")
}
}
func TestPollerConcurrent(t *testing.T) {
tfd, poller := makePoller(t)
defer tfd.close()
defer poller.close()
oks := make(chan bool)
live := make(chan bool)
defer close(live)
go func() {
defer close(oks)
for {
ok, err := poller.wait()
if err != nil {
t.Fatalf("poller failed: %v", err)
}
oks <- ok
if !<-live {
return
}
}
}()
// Try a write
select {
case <-time.After(50 * time.Millisecond):
case <-oks:
t.Fatalf("poller did not wait")
}
tfd.put(t)
if !<-oks {
t.Fatalf("expected true")
}
tfd.get(t)
live <- true
// Try a wakeup
select {
case <-time.After(50 * time.Millisecond):
case <-oks:
t.Fatalf("poller did not wait")
}
err := poller.wake()
if err != nil {
t.Fatalf("wake failed: %v", err)
}
if <-oks {
t.Fatalf("expected false")
}
live <- true
// Try a close
select {
case <-time.After(50 * time.Millisecond):
case <-oks:
t.Fatalf("poller did not wait")
}
tfd.closeWrite(t)
if !<-oks {
t.Fatalf("expected true")
}
tfd.get(t)
}

360
vendor/github.com/fsnotify/fsnotify/inotify_test.go generated vendored Normal file
View file

@ -0,0 +1,360 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
package fsnotify
import (
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"time"
)
func TestInotifyCloseRightAway(t *testing.T) {
w, err := NewWatcher()
if err != nil {
t.Fatalf("Failed to create watcher")
}
// Close immediately; it won't even reach the first unix.Read.
w.Close()
// Wait for the close to complete.
<-time.After(50 * time.Millisecond)
isWatcherReallyClosed(t, w)
}
func TestInotifyCloseSlightlyLater(t *testing.T) {
w, err := NewWatcher()
if err != nil {
t.Fatalf("Failed to create watcher")
}
// Wait until readEvents has reached unix.Read, and Close.
<-time.After(50 * time.Millisecond)
w.Close()
// Wait for the close to complete.
<-time.After(50 * time.Millisecond)
isWatcherReallyClosed(t, w)
}
func TestInotifyCloseSlightlyLaterWithWatch(t *testing.T) {
testDir := tempMkdir(t)
defer os.RemoveAll(testDir)
w, err := NewWatcher()
if err != nil {
t.Fatalf("Failed to create watcher")
}
w.Add(testDir)
// Wait until readEvents has reached unix.Read, and Close.
<-time.After(50 * time.Millisecond)
w.Close()
// Wait for the close to complete.
<-time.After(50 * time.Millisecond)
isWatcherReallyClosed(t, w)
}
func TestInotifyCloseAfterRead(t *testing.T) {
testDir := tempMkdir(t)
defer os.RemoveAll(testDir)
w, err := NewWatcher()
if err != nil {
t.Fatalf("Failed to create watcher")
}
err = w.Add(testDir)
if err != nil {
t.Fatalf("Failed to add .")
}
// Generate an event.
os.Create(filepath.Join(testDir, "somethingSOMETHINGsomethingSOMETHING"))
// Wait for readEvents to read the event, then close the watcher.
<-time.After(50 * time.Millisecond)
w.Close()
// Wait for the close to complete.
<-time.After(50 * time.Millisecond)
isWatcherReallyClosed(t, w)
}
func isWatcherReallyClosed(t *testing.T, w *Watcher) {
select {
case err, ok := <-w.Errors:
if ok {
t.Fatalf("w.Errors is not closed; readEvents is still alive after closing (error: %v)", err)
}
default:
t.Fatalf("w.Errors would have blocked; readEvents is still alive!")
}
select {
case _, ok := <-w.Events:
if ok {
t.Fatalf("w.Events is not closed; readEvents is still alive after closing")
}
default:
t.Fatalf("w.Events would have blocked; readEvents is still alive!")
}
}
func TestInotifyCloseCreate(t *testing.T) {
testDir := tempMkdir(t)
defer os.RemoveAll(testDir)
w, err := NewWatcher()
if err != nil {
t.Fatalf("Failed to create watcher: %v", err)
}
defer w.Close()
err = w.Add(testDir)
if err != nil {
t.Fatalf("Failed to add testDir: %v", err)
}
h, err := os.Create(filepath.Join(testDir, "testfile"))
if err != nil {
t.Fatalf("Failed to create file in testdir: %v", err)
}
h.Close()
select {
case _ = <-w.Events:
case err := <-w.Errors:
t.Fatalf("Error from watcher: %v", err)
case <-time.After(50 * time.Millisecond):
t.Fatalf("Took too long to wait for event")
}
// At this point, we've received one event, so the goroutine is ready.
// It's also blocking on unix.Read.
// Now we try to swap the file descriptor under its nose.
w.Close()
w, err = NewWatcher()
defer w.Close()
if err != nil {
t.Fatalf("Failed to create second watcher: %v", err)
}
<-time.After(50 * time.Millisecond)
err = w.Add(testDir)
if err != nil {
t.Fatalf("Error adding testDir again: %v", err)
}
}
// This test verifies the watcher can keep up with file creations/deletions
// when under load.
func TestInotifyStress(t *testing.T) {
maxNumToCreate := 1000
testDir := tempMkdir(t)
defer os.RemoveAll(testDir)
testFilePrefix := filepath.Join(testDir, "testfile")
w, err := NewWatcher()
if err != nil {
t.Fatalf("Failed to create watcher: %v", err)
}
defer w.Close()
err = w.Add(testDir)
if err != nil {
t.Fatalf("Failed to add testDir: %v", err)
}
doneChan := make(chan struct{})
// The buffer ensures that the file generation goroutine is never blocked.
errChan := make(chan error, 2*maxNumToCreate)
go func() {
for i := 0; i < maxNumToCreate; i++ {
testFile := fmt.Sprintf("%s%d", testFilePrefix, i)
handle, err := os.Create(testFile)
if err != nil {
errChan <- fmt.Errorf("Create failed: %v", err)
continue
}
err = handle.Close()
if err != nil {
errChan <- fmt.Errorf("Close failed: %v", err)
continue
}
}
// If we delete a newly created file too quickly, inotify will skip the
// create event and only send the delete event.
time.Sleep(100 * time.Millisecond)
for i := 0; i < maxNumToCreate; i++ {
testFile := fmt.Sprintf("%s%d", testFilePrefix, i)
err = os.Remove(testFile)
if err != nil {
errChan <- fmt.Errorf("Remove failed: %v", err)
}
}
close(doneChan)
}()
creates := 0
removes := 0
finished := false
after := time.After(10 * time.Second)
for !finished {
select {
case <-after:
t.Fatalf("Not done")
case <-doneChan:
finished = true
case err := <-errChan:
t.Fatalf("Got an error from file creator goroutine: %v", err)
case err := <-w.Errors:
t.Fatalf("Got an error from watcher: %v", err)
case evt := <-w.Events:
if !strings.HasPrefix(evt.Name, testFilePrefix) {
t.Fatalf("Got an event for an unknown file: %s", evt.Name)
}
if evt.Op == Create {
creates++
}
if evt.Op == Remove {
removes++
}
}
}
// Drain remaining events from channels
count := 0
for count < 10 {
select {
case err := <-errChan:
t.Fatalf("Got an error from file creator goroutine: %v", err)
case err := <-w.Errors:
t.Fatalf("Got an error from watcher: %v", err)
case evt := <-w.Events:
if !strings.HasPrefix(evt.Name, testFilePrefix) {
t.Fatalf("Got an event for an unknown file: %s", evt.Name)
}
if evt.Op == Create {
creates++
}
if evt.Op == Remove {
removes++
}
count = 0
default:
count++
// Give the watcher chances to fill the channels.
time.Sleep(time.Millisecond)
}
}
if creates-removes > 1 || creates-removes < -1 {
t.Fatalf("Creates and removes should not be off by more than one: %d creates, %d removes", creates, removes)
}
if creates < 50 {
t.Fatalf("Expected at least 50 creates, got %d", creates)
}
}
func TestInotifyRemoveTwice(t *testing.T) {
testDir := tempMkdir(t)
defer os.RemoveAll(testDir)
testFile := filepath.Join(testDir, "testfile")
handle, err := os.Create(testFile)
if err != nil {
t.Fatalf("Create failed: %v", err)
}
handle.Close()
w, err := NewWatcher()
if err != nil {
t.Fatalf("Failed to create watcher: %v", err)
}
defer w.Close()
err = w.Add(testFile)
if err != nil {
t.Fatalf("Failed to add testFile: %v", err)
}
err = os.Remove(testFile)
if err != nil {
t.Fatalf("Failed to remove testFile: %v", err)
}
err = w.Remove(testFile)
if err == nil {
t.Fatalf("no error on removing invalid file")
}
s1 := fmt.Sprintf("%s", err)
err = w.Remove(testFile)
if err == nil {
t.Fatalf("no error on removing invalid file")
}
s2 := fmt.Sprintf("%s", err)
if s1 != s2 {
t.Fatalf("receive different error - %s / %s", s1, s2)
}
}
func TestInotifyInnerMapLength(t *testing.T) {
testDir := tempMkdir(t)
defer os.RemoveAll(testDir)
testFile := filepath.Join(testDir, "testfile")
handle, err := os.Create(testFile)
if err != nil {
t.Fatalf("Create failed: %v", err)
}
handle.Close()
w, err := NewWatcher()
if err != nil {
t.Fatalf("Failed to create watcher: %v", err)
}
defer w.Close()
err = w.Add(testFile)
if err != nil {
t.Fatalf("Failed to add testFile: %v", err)
}
go func() {
for err := range w.Errors {
t.Fatalf("error received: %s", err)
}
}()
err = os.Remove(testFile)
if err != nil {
t.Fatalf("Failed to remove testFile: %v", err)
}
_ = <-w.Events // consume Remove event
<-time.After(50 * time.Millisecond) // wait IN_IGNORE propagated
w.mu.Lock()
defer w.mu.Unlock()
if len(w.watches) != 0 {
t.Fatalf("Expected watches len is 0, but got: %d, %v", len(w.watches), w.watches)
}
if len(w.paths) != 0 {
t.Fatalf("Expected paths len is 0, but got: %d, %v", len(w.paths), w.paths)
}
}

View file

@ -0,0 +1,147 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package fsnotify
import (
"os"
"path/filepath"
"testing"
"time"
"golang.org/x/sys/unix"
)
// testExchangedataForWatcher tests the watcher with the exchangedata operation on OS X.
//
// This is widely used for atomic saves on OS X, e.g. TextMate and in Apple's NSDocument.
//
// See https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/exchangedata.2.html
// Also see: https://github.com/textmate/textmate/blob/cd016be29489eba5f3c09b7b70b06da134dda550/Frameworks/io/src/swap_file_data.cc#L20
func testExchangedataForWatcher(t *testing.T, watchDir bool) {
// Create directory to watch
testDir1 := tempMkdir(t)
// For the intermediate file
testDir2 := tempMkdir(t)
defer os.RemoveAll(testDir1)
defer os.RemoveAll(testDir2)
resolvedFilename := "TestFsnotifyEvents.file"
// TextMate does:
//
// 1. exchangedata (intermediate, resolved)
// 2. unlink intermediate
//
// Let's try to simulate that:
resolved := filepath.Join(testDir1, resolvedFilename)
intermediate := filepath.Join(testDir2, resolvedFilename+"~")
// Make sure we create the file before we start watching
createAndSyncFile(t, resolved)
watcher := newWatcher(t)
// Test both variants in isolation
if watchDir {
addWatch(t, watcher, testDir1)
} else {
addWatch(t, watcher, resolved)
}
// Receive errors on the error channel on a separate goroutine
go func() {
for err := range watcher.Errors {
t.Fatalf("error received: %s", err)
}
}()
// Receive events on the event channel on a separate goroutine
eventstream := watcher.Events
var removeReceived counter
var createReceived counter
done := make(chan bool)
go func() {
for event := range eventstream {
// Only count relevant events
if event.Name == filepath.Clean(resolved) {
if event.Op&Remove == Remove {
removeReceived.increment()
}
if event.Op&Create == Create {
createReceived.increment()
}
}
t.Logf("event received: %s", event)
}
done <- true
}()
// Repeat to make sure the watched file/directory "survives" the REMOVE/CREATE loop.
for i := 1; i <= 3; i++ {
// The intermediate file is created in a folder outside the watcher
createAndSyncFile(t, intermediate)
// 1. Swap
if err := unix.Exchangedata(intermediate, resolved, 0); err != nil {
t.Fatalf("[%d] exchangedata failed: %s", i, err)
}
time.Sleep(50 * time.Millisecond)
// 2. Delete the intermediate file
err := os.Remove(intermediate)
if err != nil {
t.Fatalf("[%d] remove %s failed: %s", i, intermediate, err)
}
time.Sleep(50 * time.Millisecond)
}
// We expect this event to be received almost immediately, but let's wait 500 ms to be sure
time.Sleep(500 * time.Millisecond)
// The events will be (CHMOD + REMOVE + CREATE) X 2. Let's focus on the last two:
if removeReceived.value() < 3 {
t.Fatal("fsnotify remove events have not been received after 500 ms")
}
if createReceived.value() < 3 {
t.Fatal("fsnotify create events have not been received after 500 ms")
}
watcher.Close()
t.Log("waiting for the event channel to become closed...")
select {
case <-done:
t.Log("event channel closed")
case <-time.After(2 * time.Second):
t.Fatal("event stream was not closed after 2 seconds")
}
}
// TestExchangedataInWatchedDir test exchangedata operation on file in watched dir.
func TestExchangedataInWatchedDir(t *testing.T) {
testExchangedataForWatcher(t, true)
}
// TestExchangedataInWatchedDir test exchangedata operation on watched file.
func TestExchangedataInWatchedFile(t *testing.T) {
testExchangedataForWatcher(t, false)
}
func createAndSyncFile(t *testing.T, filepath string) {
f1, err := os.OpenFile(filepath, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
t.Fatalf("creating %s failed: %s", filepath, err)
}
f1.Sync()
f1.Close()
}

1237
vendor/github.com/fsnotify/fsnotify/integration_test.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,21 @@
### HCL Template
```hcl
# Place your HCL configuration file here
```
### Expected behavior
What should have happened?
### Actual behavior
What actually happened?
### Steps to reproduce
1.
2.
3.
### References
Are there any other GitHub issues (open or closed) that should
be linked here? For example:
- GH-1234
- ...

View file

@ -1,3 +1,12 @@
sudo: false sudo: false
language: go language: go
go: 1.7
go:
- 1.8
branches:
only:
- master
script: make test

View file

@ -4,7 +4,7 @@ clone_folder: c:\gopath\src\github.com\hashicorp\hcl
environment: environment:
GOPATH: c:\gopath GOPATH: c:\gopath
init: init:
- git config --global core.autocrlf true - git config --global core.autocrlf false
install: install:
- cmd: >- - cmd: >-
echo %Path% echo %Path%

View file

@ -89,9 +89,9 @@ func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error
switch k.Kind() { switch k.Kind() {
case reflect.Bool: case reflect.Bool:
return d.decodeBool(name, node, result) return d.decodeBool(name, node, result)
case reflect.Float64: case reflect.Float32, reflect.Float64:
return d.decodeFloat(name, node, result) return d.decodeFloat(name, node, result)
case reflect.Int: case reflect.Int, reflect.Int32, reflect.Int64:
return d.decodeInt(name, node, result) return d.decodeInt(name, node, result)
case reflect.Interface: case reflect.Interface:
// When we see an interface, we make our own thing // When we see an interface, we make our own thing
@ -137,13 +137,13 @@ func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) e
func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
switch n := node.(type) { switch n := node.(type) {
case *ast.LiteralType: case *ast.LiteralType:
if n.Token.Type == token.FLOAT { if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER {
v, err := strconv.ParseFloat(n.Token.Text, 64) v, err := strconv.ParseFloat(n.Token.Text, 64)
if err != nil { if err != nil {
return err return err
} }
result.Set(reflect.ValueOf(v)) result.Set(reflect.ValueOf(v).Convert(result.Type()))
return nil return nil
} }
} }
@ -164,7 +164,11 @@ func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) er
return err return err
} }
if result.Kind() == reflect.Interface {
result.Set(reflect.ValueOf(int(v))) result.Set(reflect.ValueOf(int(v)))
} else {
result.SetInt(v)
}
return nil return nil
case token.STRING: case token.STRING:
v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
@ -172,7 +176,11 @@ func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) er
return err return err
} }
if result.Kind() == reflect.Interface {
result.Set(reflect.ValueOf(int(v))) result.Set(reflect.ValueOf(int(v)))
} else {
result.SetInt(v)
}
return nil return nil
} }
} }

1203
vendor/github.com/hashicorp/hcl/decoder_test.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -156,7 +156,8 @@ func (o *ObjectKey) Pos() token.Pos {
type LiteralType struct { type LiteralType struct {
Token token.Token Token token.Token
// associated line comment, only when used in a list // comment types, only used when in a list
LeadComment *CommentGroup
LineComment *CommentGroup LineComment *CommentGroup
} }

200
vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go generated vendored Normal file
View file

@ -0,0 +1,200 @@
package ast
import (
"reflect"
"strings"
"testing"
"github.com/hashicorp/hcl/hcl/token"
)
func TestObjectListFilter(t *testing.T) {
var cases = []struct {
Filter []string
Input []*ObjectItem
Output []*ObjectItem
}{
{
[]string{"foo"},
[]*ObjectItem{
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{
Token: token.Token{Type: token.STRING, Text: `"foo"`},
},
},
},
},
[]*ObjectItem{
&ObjectItem{
Keys: []*ObjectKey{},
},
},
},
{
[]string{"foo"},
[]*ObjectItem{
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
},
},
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}},
},
},
},
[]*ObjectItem{
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
},
},
},
},
}
for _, tc := range cases {
input := &ObjectList{Items: tc.Input}
expected := &ObjectList{Items: tc.Output}
if actual := input.Filter(tc.Filter...); !reflect.DeepEqual(actual, expected) {
t.Fatalf("in order: input, expected, actual\n\n%#v\n\n%#v\n\n%#v", input, expected, actual)
}
}
}
func TestWalk(t *testing.T) {
items := []*ObjectItem{
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
},
Val: &LiteralType{Token: token.Token{Type: token.STRING, Text: `"example"`}},
},
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}},
},
},
}
node := &ObjectList{Items: items}
order := []string{
"*ast.ObjectList",
"*ast.ObjectItem",
"*ast.ObjectKey",
"*ast.ObjectKey",
"*ast.LiteralType",
"*ast.ObjectItem",
"*ast.ObjectKey",
}
count := 0
Walk(node, func(n Node) (Node, bool) {
if n == nil {
return n, false
}
typeName := reflect.TypeOf(n).String()
if order[count] != typeName {
t.Errorf("expected '%s' got: '%s'", order[count], typeName)
}
count++
return n, true
})
}
func TestWalkEquality(t *testing.T) {
items := []*ObjectItem{
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
},
},
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
},
},
}
node := &ObjectList{Items: items}
rewritten := Walk(node, func(n Node) (Node, bool) { return n, true })
newNode, ok := rewritten.(*ObjectList)
if !ok {
t.Fatalf("expected Objectlist, got %T", rewritten)
}
if !reflect.DeepEqual(node, newNode) {
t.Fatal("rewritten node is not equal to the given node")
}
if len(newNode.Items) != 2 {
t.Error("expected newNode length 2, got: %d", len(newNode.Items))
}
expected := []string{
`"foo"`,
`"bar"`,
}
for i, item := range newNode.Items {
if len(item.Keys) != 1 {
t.Error("expected keys newNode length 1, got: %d", len(item.Keys))
}
if item.Keys[0].Token.Text != expected[i] {
t.Errorf("expected key %s, got %s", expected[i], item.Keys[0].Token.Text)
}
if item.Val != nil {
t.Errorf("expected item value should be nil")
}
}
}
func TestWalkRewrite(t *testing.T) {
items := []*ObjectItem{
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
},
},
&ObjectItem{
Keys: []*ObjectKey{
&ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}},
},
},
}
node := &ObjectList{Items: items}
suffix := "_example"
node = Walk(node, func(n Node) (Node, bool) {
switch i := n.(type) {
case *ObjectKey:
i.Token.Text = i.Token.Text + suffix
n = i
}
return n, true
}).(*ObjectList)
Walk(node, func(n Node) (Node, bool) {
switch i := n.(type) {
case *ObjectKey:
if !strings.HasSuffix(i.Token.Text, suffix) {
t.Errorf("Token '%s' should have suffix: %s", i.Token.Text, suffix)
}
}
return n, true
})
}

162
vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go generated vendored Normal file
View file

@ -0,0 +1,162 @@
// Derivative work from:
// - https://golang.org/src/cmd/gofmt/gofmt.go
// - https://github.com/fatih/hclfmt
package fmtcmd
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/hashicorp/hcl/hcl/printer"
)
var (
ErrWriteStdin = errors.New("cannot use write option with standard input")
)
type Options struct {
List bool // list files whose formatting differs
Write bool // write result to (source) file instead of stdout
Diff bool // display diffs of formatting changes
}
func isValidFile(f os.FileInfo, extensions []string) bool {
if !f.IsDir() && !strings.HasPrefix(f.Name(), ".") {
for _, ext := range extensions {
if strings.HasSuffix(f.Name(), "."+ext) {
return true
}
}
}
return false
}
// If in == nil, the source is the contents of the file with the given filename.
func processFile(filename string, in io.Reader, out io.Writer, stdin bool, opts Options) error {
if in == nil {
f, err := os.Open(filename)
if err != nil {
return err
}
defer f.Close()
in = f
}
src, err := ioutil.ReadAll(in)
if err != nil {
return err
}
res, err := printer.Format(src)
if err != nil {
return fmt.Errorf("In %s: %s", filename, err)
}
if !bytes.Equal(src, res) {
// formatting has changed
if opts.List {
fmt.Fprintln(out, filename)
}
if opts.Write {
err = ioutil.WriteFile(filename, res, 0644)
if err != nil {
return err
}
}
if opts.Diff {
data, err := diff(src, res)
if err != nil {
return fmt.Errorf("computing diff: %s", err)
}
fmt.Fprintf(out, "diff a/%s b/%s\n", filename, filename)
out.Write(data)
}
}
if !opts.List && !opts.Write && !opts.Diff {
_, err = out.Write(res)
}
return err
}
func walkDir(path string, extensions []string, stdout io.Writer, opts Options) error {
visitFile := func(path string, f os.FileInfo, err error) error {
if err == nil && isValidFile(f, extensions) {
err = processFile(path, nil, stdout, false, opts)
}
return err
}
return filepath.Walk(path, visitFile)
}
func Run(
paths, extensions []string,
stdin io.Reader,
stdout io.Writer,
opts Options,
) error {
if len(paths) == 0 {
if opts.Write {
return ErrWriteStdin
}
if err := processFile("<standard input>", stdin, stdout, true, opts); err != nil {
return err
}
return nil
}
for _, path := range paths {
switch dir, err := os.Stat(path); {
case err != nil:
return err
case dir.IsDir():
if err := walkDir(path, extensions, stdout, opts); err != nil {
return err
}
default:
if err := processFile(path, nil, stdout, false, opts); err != nil {
return err
}
}
}
return nil
}
func diff(b1, b2 []byte) (data []byte, err error) {
f1, err := ioutil.TempFile("", "")
if err != nil {
return
}
defer os.Remove(f1.Name())
defer f1.Close()
f2, err := ioutil.TempFile("", "")
if err != nil {
return
}
defer os.Remove(f2.Name())
defer f2.Close()
f1.Write(b1)
f2.Write(b2)
data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput()
if len(data) > 0 {
// diff exits with a non-zero status when the files don't match.
// Ignore that failure as long as we get output.
err = nil
}
return
}

View file

@ -0,0 +1,440 @@
// +build !windows
// TODO(jen20): These need fixing on Windows but fmt is not used right now
// and red CI is making it harder to process other bugs, so ignore until
// we get around to fixing them.
package fmtcmd
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"regexp"
"sort"
"syscall"
"testing"
"github.com/hashicorp/hcl/testhelper"
)
var fixtureExtensions = []string{"hcl"}
func init() {
sort.Sort(ByFilename(fixtures))
}
func TestIsValidFile(t *testing.T) {
const fixtureDir = "./test-fixtures"
cases := []struct {
Path string
Expected bool
}{
{"good.hcl", true},
{".hidden.ignore", false},
{"file.ignore", false},
{"dir.ignore", false},
}
for _, tc := range cases {
file, err := os.Stat(filepath.Join(fixtureDir, tc.Path))
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if res := isValidFile(file, fixtureExtensions); res != tc.Expected {
t.Errorf("want: %b, got: %b", tc.Expected, res)
}
}
}
func TestRunMultiplePaths(t *testing.T) {
path1, err := renderFixtures("")
if err != nil {
t.Errorf("unexpected error: %s", err)
}
defer os.RemoveAll(path1)
path2, err := renderFixtures("")
if err != nil {
t.Errorf("unexpected error: %s", err)
}
defer os.RemoveAll(path2)
var expectedOut bytes.Buffer
for _, path := range []string{path1, path2} {
for _, fixture := range fixtures {
if !bytes.Equal(fixture.golden, fixture.input) {
expectedOut.WriteString(filepath.Join(path, fixture.filename) + "\n")
}
}
}
_, stdout := mockIO()
err = Run(
[]string{path1, path2},
fixtureExtensions,
nil, stdout,
Options{
List: true,
},
)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if stdout.String() != expectedOut.String() {
t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
}
}
func TestRunSubDirectories(t *testing.T) {
pathParent, err := ioutil.TempDir("", "")
if err != nil {
t.Errorf("unexpected error: %s", err)
}
defer os.RemoveAll(pathParent)
path1, err := renderFixtures(pathParent)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
path2, err := renderFixtures(pathParent)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
paths := []string{path1, path2}
sort.Strings(paths)
var expectedOut bytes.Buffer
for _, path := range paths {
for _, fixture := range fixtures {
if !bytes.Equal(fixture.golden, fixture.input) {
expectedOut.WriteString(filepath.Join(path, fixture.filename) + "\n")
}
}
}
_, stdout := mockIO()
err = Run(
[]string{pathParent},
fixtureExtensions,
nil, stdout,
Options{
List: true,
},
)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if stdout.String() != expectedOut.String() {
t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
}
}
func TestRunStdin(t *testing.T) {
var expectedOut bytes.Buffer
for i, fixture := range fixtures {
if i != 0 {
expectedOut.WriteString("\n")
}
expectedOut.Write(fixture.golden)
}
stdin, stdout := mockIO()
for _, fixture := range fixtures {
stdin.Write(fixture.input)
}
err := Run(
[]string{},
fixtureExtensions,
stdin, stdout,
Options{},
)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if !bytes.Equal(stdout.Bytes(), expectedOut.Bytes()) {
t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
}
}
func TestRunStdinAndWrite(t *testing.T) {
var expectedOut = []byte{}
stdin, stdout := mockIO()
stdin.WriteString("")
err := Run(
[]string{}, []string{},
stdin, stdout,
Options{
Write: true,
},
)
if err != ErrWriteStdin {
t.Errorf("error want:\n%s\ngot:\n%s", ErrWriteStdin, err)
}
if !bytes.Equal(stdout.Bytes(), expectedOut) {
t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
}
}
func TestRunFileError(t *testing.T) {
path, err := ioutil.TempDir("", "")
if err != nil {
t.Errorf("unexpected error: %s", err)
}
defer os.RemoveAll(path)
filename := filepath.Join(path, "unreadable.hcl")
var expectedError = &os.PathError{
Op: "open",
Path: filename,
Err: syscall.EACCES,
}
err = ioutil.WriteFile(filename, []byte{}, 0000)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
_, stdout := mockIO()
err = Run(
[]string{path},
fixtureExtensions,
nil, stdout,
Options{},
)
if !reflect.DeepEqual(err, expectedError) {
t.Errorf("error want: %#v, got: %#v", expectedError, err)
}
}
func TestRunNoOptions(t *testing.T) {
path, err := renderFixtures("")
if err != nil {
t.Errorf("unexpected error: %s", err)
}
defer os.RemoveAll(path)
var expectedOut bytes.Buffer
for _, fixture := range fixtures {
expectedOut.Write(fixture.golden)
}
_, stdout := mockIO()
err = Run(
[]string{path},
fixtureExtensions,
nil, stdout,
Options{},
)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if stdout.String() != expectedOut.String() {
t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
}
}
func TestRunList(t *testing.T) {
path, err := renderFixtures("")
if err != nil {
t.Errorf("unexpected error: %s", err)
}
defer os.RemoveAll(path)
var expectedOut bytes.Buffer
for _, fixture := range fixtures {
if !bytes.Equal(fixture.golden, fixture.input) {
expectedOut.WriteString(fmt.Sprintln(filepath.Join(path, fixture.filename)))
}
}
_, stdout := mockIO()
err = Run(
[]string{path},
fixtureExtensions,
nil, stdout,
Options{
List: true,
},
)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if stdout.String() != expectedOut.String() {
t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
}
}
func TestRunWrite(t *testing.T) {
path, err := renderFixtures("")
if err != nil {
t.Errorf("unexpected error: %s", err)
}
defer os.RemoveAll(path)
_, stdout := mockIO()
err = Run(
[]string{path},
fixtureExtensions,
nil, stdout,
Options{
Write: true,
},
)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
for _, fixture := range fixtures {
res, err := ioutil.ReadFile(filepath.Join(path, fixture.filename))
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if !bytes.Equal(res, fixture.golden) {
t.Errorf("file %q contents want:\n%s\ngot:\n%s", fixture.filename, fixture.golden, res)
}
}
}
func TestRunDiff(t *testing.T) {
path, err := renderFixtures("")
if err != nil {
t.Errorf("unexpected error: %s", err)
}
defer os.RemoveAll(path)
var expectedOut bytes.Buffer
for _, fixture := range fixtures {
if len(fixture.diff) > 0 {
expectedOut.WriteString(
regexp.QuoteMeta(
fmt.Sprintf("diff a/%s/%s b/%s/%s\n", path, fixture.filename, path, fixture.filename),
),
)
// Need to use regex to ignore datetimes in diff.
expectedOut.WriteString(`--- .+?\n`)
expectedOut.WriteString(`\+\+\+ .+?\n`)
expectedOut.WriteString(regexp.QuoteMeta(string(fixture.diff)))
}
}
expectedOutString := testhelper.Unix2dos(expectedOut.String())
_, stdout := mockIO()
err = Run(
[]string{path},
fixtureExtensions,
nil, stdout,
Options{
Diff: true,
},
)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if !regexp.MustCompile(expectedOutString).Match(stdout.Bytes()) {
t.Errorf("stdout want match:\n%s\ngot:\n%q", expectedOutString, stdout)
}
}
func mockIO() (stdin, stdout *bytes.Buffer) {
return new(bytes.Buffer), new(bytes.Buffer)
}
type fixture struct {
filename string
input, golden, diff []byte
}
type ByFilename []fixture
func (s ByFilename) Len() int { return len(s) }
func (s ByFilename) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s ByFilename) Less(i, j int) bool { return len(s[i].filename) > len(s[j].filename) }
var fixtures = []fixture{
{
"noop.hcl",
[]byte(`resource "aws_security_group" "firewall" {
count = 5
}
`),
[]byte(`resource "aws_security_group" "firewall" {
count = 5
}
`),
[]byte(``),
}, {
"align_equals.hcl",
[]byte(`variable "foo" {
default = "bar"
description = "bar"
}
`),
[]byte(`variable "foo" {
default = "bar"
description = "bar"
}
`),
[]byte(`@@ -1,4 +1,4 @@
variable "foo" {
- default = "bar"
+ default = "bar"
description = "bar"
}
`),
}, {
"indentation.hcl",
[]byte(`provider "aws" {
access_key = "foo"
secret_key = "bar"
}
`),
[]byte(`provider "aws" {
access_key = "foo"
secret_key = "bar"
}
`),
[]byte(`@@ -1,4 +1,4 @@
provider "aws" {
- access_key = "foo"
- secret_key = "bar"
+ access_key = "foo"
+ secret_key = "bar"
}
`),
},
}
// parent can be an empty string, in which case the system's default
// temporary directory will be used.
func renderFixtures(parent string) (path string, err error) {
path, err = ioutil.TempDir(parent, "")
if err != nil {
return "", err
}
for _, fixture := range fixtures {
err = ioutil.WriteFile(filepath.Join(path, fixture.filename), []byte(fixture.input), 0644)
if err != nil {
os.RemoveAll(path)
return "", err
}
}
return path, nil
}

View file

@ -0,0 +1 @@
invalid

View file

View file

@ -0,0 +1 @@
invalid

View file

View file

@ -0,0 +1,9 @@
package parser
import (
"testing"
)
func TestPosError_impl(t *testing.T) {
var _ error = new(PosError)
}

View file

@ -3,6 +3,7 @@
package parser package parser
import ( import (
"bytes"
"errors" "errors"
"fmt" "fmt"
"strings" "strings"
@ -36,6 +37,11 @@ func newParser(src []byte) *Parser {
// Parse returns the fully parsed source and returns the abstract syntax tree. // Parse returns the fully parsed source and returns the abstract syntax tree.
func Parse(src []byte) (*ast.File, error) { func Parse(src []byte) (*ast.File, error) {
// normalize all line endings
// since the scanner and output only work with "\n" line endings, we may
// end up with dangling "\r" characters in the parsed data.
src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
p := newParser(src) p := newParser(src)
return p.Parse() return p.Parse()
} }
@ -50,7 +56,7 @@ func (p *Parser) Parse() (*ast.File, error) {
scerr = &PosError{Pos: pos, Err: errors.New(msg)} scerr = &PosError{Pos: pos, Err: errors.New(msg)}
} }
f.Node, err = p.objectList() f.Node, err = p.objectList(false)
if scerr != nil { if scerr != nil {
return nil, scerr return nil, scerr
} }
@ -62,11 +68,23 @@ func (p *Parser) Parse() (*ast.File, error) {
return f, nil return f, nil
} }
func (p *Parser) objectList() (*ast.ObjectList, error) { // objectList parses a list of items within an object (generally k/v pairs).
// The parameter" obj" tells this whether to we are within an object (braces:
// '{', '}') or just at the top level. If we're within an object, we end
// at an RBRACE.
func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
defer un(trace(p, "ParseObjectList")) defer un(trace(p, "ParseObjectList"))
node := &ast.ObjectList{} node := &ast.ObjectList{}
for { for {
if obj {
tok := p.scan()
p.unscan()
if tok.Type == token.RBRACE {
break
}
}
n, err := p.objectItem() n, err := p.objectItem()
if err == errEofToken { if err == errEofToken {
break // we are finished break // we are finished
@ -179,9 +197,12 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) {
keyStr = append(keyStr, k.Token.Text) keyStr = append(keyStr, k.Token.Text)
} }
return nil, fmt.Errorf( return nil, &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf(
"key '%s' expected start of object ('{') or assignment ('=')", "key '%s' expected start of object ('{') or assignment ('=')",
strings.Join(keyStr, " ")) strings.Join(keyStr, " ")),
}
} }
// do a look-ahead for line comment // do a look-ahead for line comment
@ -244,7 +265,10 @@ func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
keyCount++ keyCount++
keys = append(keys, &ast.ObjectKey{Token: p.tok}) keys = append(keys, &ast.ObjectKey{Token: p.tok})
case token.ILLEGAL: case token.ILLEGAL:
fmt.Println("illegal") return keys, &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf("illegal character"),
}
default: default:
return keys, &PosError{ return keys, &PosError{
Pos: p.tok.Pos, Pos: p.tok.Pos,
@ -288,7 +312,7 @@ func (p *Parser) objectType() (*ast.ObjectType, error) {
Lbrace: p.tok.Pos, Lbrace: p.tok.Pos,
} }
l, err := p.objectList() l, err := p.objectList(true)
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's // if we hit RBRACE, we are good to go (means we parsed all Items), if it's
// not a RBRACE, it's an syntax error and we just return it. // not a RBRACE, it's an syntax error and we just return it.
@ -296,9 +320,12 @@ func (p *Parser) objectType() (*ast.ObjectType, error) {
return nil, err return nil, err
} }
// If there is no error, we should be at a RBRACE to end the object // No error, scan and expect the ending to be a brace
if p.tok.Type != token.RBRACE { if tok := p.scan(); tok.Type != token.RBRACE {
return nil, fmt.Errorf("object expected closing RBRACE got: %s", p.tok.Type) return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type),
}
} }
o.List = l o.List = l
@ -331,12 +358,18 @@ func (p *Parser) listType() (*ast.ListType, error) {
} }
} }
switch tok.Type { switch tok.Type {
case token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
node, err := p.literalType() node, err := p.literalType()
if err != nil { if err != nil {
return nil, err return nil, err
} }
// If there is a lead comment, apply it
if p.leadComment != nil {
node.LeadComment = p.leadComment
p.leadComment = nil
}
l.Add(node) l.Add(node)
needComma = true needComma = true
case token.COMMA: case token.COMMA:
@ -367,12 +400,16 @@ func (p *Parser) listType() (*ast.ListType, error) {
} }
l.Add(node) l.Add(node)
needComma = true needComma = true
case token.BOOL:
// TODO(arslan) should we support? not supported by HCL yet
case token.LBRACK: case token.LBRACK:
// TODO(arslan) should we support nested lists? Even though it's node, err := p.listType()
// written in README of HCL, it's not a part of the grammar if err != nil {
// (not defined in parse.y) return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf(
"error while trying to parse list within list: %s", err),
}
}
l.Add(node)
case token.RBRACK: case token.RBRACK:
// finished // finished
l.Rbrack = p.tok.Pos l.Rbrack = p.tok.Pos

View file

@ -0,0 +1,575 @@
package parser
import (
"fmt"
"io/ioutil"
"path/filepath"
"reflect"
"runtime"
"strings"
"testing"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/hcl/hcl/token"
)
func TestType(t *testing.T) {
var literals = []struct {
typ token.Type
src string
}{
{token.STRING, `foo = "foo"`},
{token.NUMBER, `foo = 123`},
{token.NUMBER, `foo = -29`},
{token.FLOAT, `foo = 123.12`},
{token.FLOAT, `foo = -123.12`},
{token.BOOL, `foo = true`},
{token.HEREDOC, "foo = <<EOF\nHello\nWorld\nEOF"},
}
for _, l := range literals {
p := newParser([]byte(l.src))
item, err := p.objectItem()
if err != nil {
t.Error(err)
}
lit, ok := item.Val.(*ast.LiteralType)
if !ok {
t.Errorf("node should be of type LiteralType, got: %T", item.Val)
}
if lit.Token.Type != l.typ {
t.Errorf("want: %s, got: %s", l.typ, lit.Token.Type)
}
}
}
func TestListType(t *testing.T) {
var literals = []struct {
src string
tokens []token.Type
}{
{
`foo = ["123", 123]`,
[]token.Type{token.STRING, token.NUMBER},
},
{
`foo = [123, "123",]`,
[]token.Type{token.NUMBER, token.STRING},
},
{
`foo = [false]`,
[]token.Type{token.BOOL},
},
{
`foo = []`,
[]token.Type{},
},
{
`foo = [1,
"string",
<<EOF
heredoc contents
EOF
]`,
[]token.Type{token.NUMBER, token.STRING, token.HEREDOC},
},
}
for _, l := range literals {
p := newParser([]byte(l.src))
item, err := p.objectItem()
if err != nil {
t.Error(err)
}
list, ok := item.Val.(*ast.ListType)
if !ok {
t.Errorf("node should be of type LiteralType, got: %T", item.Val)
}
tokens := []token.Type{}
for _, li := range list.List {
if tp, ok := li.(*ast.LiteralType); ok {
tokens = append(tokens, tp.Token.Type)
}
}
equals(t, l.tokens, tokens)
}
}
func TestListOfMaps(t *testing.T) {
src := `foo = [
{key = "bar"},
{key = "baz", key2 = "qux"},
]`
p := newParser([]byte(src))
file, err := p.Parse()
if err != nil {
t.Fatalf("err: %s", err)
}
// Here we make all sorts of assumptions about the input structure w/ type
// assertions. The intent is only for this to be a "smoke test" ensuring
// parsing actually performed its duty - giving this test something a bit
// more robust than _just_ "no error occurred".
expected := []string{`"bar"`, `"baz"`, `"qux"`}
actual := make([]string, 0, 3)
ol := file.Node.(*ast.ObjectList)
objItem := ol.Items[0]
list := objItem.Val.(*ast.ListType)
for _, node := range list.List {
obj := node.(*ast.ObjectType)
for _, item := range obj.List.Items {
val := item.Val.(*ast.LiteralType)
actual = append(actual, val.Token.Text)
}
}
if !reflect.DeepEqual(expected, actual) {
t.Fatalf("Expected: %#v, got %#v", expected, actual)
}
}
func TestListOfMaps_requiresComma(t *testing.T) {
src := `foo = [
{key = "bar"}
{key = "baz"}
]`
p := newParser([]byte(src))
_, err := p.Parse()
if err == nil {
t.Fatalf("Expected error, got none!")
}
expected := "error parsing list, expected comma or list end"
if !strings.Contains(err.Error(), expected) {
t.Fatalf("Expected err:\n %s\nTo contain:\n %s\n", err, expected)
}
}
func TestListType_leadComment(t *testing.T) {
var literals = []struct {
src string
comment []string
}{
{
`foo = [
1,
# bar
2,
3,
]`,
[]string{"", "# bar", ""},
},
}
for _, l := range literals {
p := newParser([]byte(l.src))
item, err := p.objectItem()
if err != nil {
t.Fatal(err)
}
list, ok := item.Val.(*ast.ListType)
if !ok {
t.Fatalf("node should be of type LiteralType, got: %T", item.Val)
}
if len(list.List) != len(l.comment) {
t.Fatalf("bad: %d", len(list.List))
}
for i, li := range list.List {
lt := li.(*ast.LiteralType)
comment := l.comment[i]
if (lt.LeadComment == nil) != (comment == "") {
t.Fatalf("bad: %#v", lt)
}
if comment == "" {
continue
}
actual := lt.LeadComment.List[0].Text
if actual != comment {
t.Fatalf("bad: %q %q", actual, comment)
}
}
}
}
func TestListType_lineComment(t *testing.T) {
var literals = []struct {
src string
comment []string
}{
{
`foo = [
1,
2, # bar
3,
]`,
[]string{"", "# bar", ""},
},
}
for _, l := range literals {
p := newParser([]byte(l.src))
item, err := p.objectItem()
if err != nil {
t.Fatal(err)
}
list, ok := item.Val.(*ast.ListType)
if !ok {
t.Fatalf("node should be of type LiteralType, got: %T", item.Val)
}
if len(list.List) != len(l.comment) {
t.Fatalf("bad: %d", len(list.List))
}
for i, li := range list.List {
lt := li.(*ast.LiteralType)
comment := l.comment[i]
if (lt.LineComment == nil) != (comment == "") {
t.Fatalf("bad: %s", lt)
}
if comment == "" {
continue
}
actual := lt.LineComment.List[0].Text
if actual != comment {
t.Fatalf("bad: %q %q", actual, comment)
}
}
}
}
func TestObjectType(t *testing.T) {
var literals = []struct {
src string
nodeType []ast.Node
itemLen int
}{
{
`foo = {}`,
nil,
0,
},
{
`foo = {
bar = "fatih"
}`,
[]ast.Node{&ast.LiteralType{}},
1,
},
{
`foo = {
bar = "fatih"
baz = ["arslan"]
}`,
[]ast.Node{
&ast.LiteralType{},
&ast.ListType{},
},
2,
},
{
`foo = {
bar {}
}`,
[]ast.Node{
&ast.ObjectType{},
},
1,
},
{
`foo {
bar {}
foo = true
}`,
[]ast.Node{
&ast.ObjectType{},
&ast.LiteralType{},
},
2,
},
}
for _, l := range literals {
t.Logf("Source: %s", l.src)
p := newParser([]byte(l.src))
// p.enableTrace = true
item, err := p.objectItem()
if err != nil {
t.Error(err)
continue
}
// we know that the ObjectKey name is foo for all cases, what matters
// is the object
obj, ok := item.Val.(*ast.ObjectType)
if !ok {
t.Errorf("node should be of type LiteralType, got: %T", item.Val)
continue
}
// check if the total length of items are correct
equals(t, l.itemLen, len(obj.List.Items))
// check if the types are correct
for i, item := range obj.List.Items {
equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val))
}
}
}
func TestObjectKey(t *testing.T) {
keys := []struct {
exp []token.Type
src string
}{
{[]token.Type{token.IDENT}, `foo {}`},
{[]token.Type{token.IDENT}, `foo = {}`},
{[]token.Type{token.IDENT}, `foo = bar`},
{[]token.Type{token.IDENT}, `foo = 123`},
{[]token.Type{token.IDENT}, `foo = "${var.bar}`},
{[]token.Type{token.STRING}, `"foo" {}`},
{[]token.Type{token.STRING}, `"foo" = {}`},
{[]token.Type{token.STRING}, `"foo" = "${var.bar}`},
{[]token.Type{token.IDENT, token.IDENT}, `foo bar {}`},
{[]token.Type{token.IDENT, token.STRING}, `foo "bar" {}`},
{[]token.Type{token.STRING, token.IDENT}, `"foo" bar {}`},
{[]token.Type{token.IDENT, token.IDENT, token.IDENT}, `foo bar baz {}`},
}
for _, k := range keys {
p := newParser([]byte(k.src))
keys, err := p.objectKey()
if err != nil {
t.Fatal(err)
}
tokens := []token.Type{}
for _, o := range keys {
tokens = append(tokens, o.Token.Type)
}
equals(t, k.exp, tokens)
}
errKeys := []struct {
src string
}{
{`foo 12 {}`},
{`foo bar = {}`},
{`foo []`},
{`12 {}`},
}
for _, k := range errKeys {
p := newParser([]byte(k.src))
_, err := p.objectKey()
if err == nil {
t.Errorf("case '%s' should give an error", k.src)
}
}
}
func TestCommentGroup(t *testing.T) {
var cases = []struct {
src string
groups int
}{
{"# Hello\n# World", 1},
{"# Hello\r\n# Windows", 1},
}
for _, tc := range cases {
t.Run(tc.src, func(t *testing.T) {
p := newParser([]byte(tc.src))
file, err := p.Parse()
if err != nil {
t.Fatalf("parse error: %s", err)
}
if len(file.Comments) != tc.groups {
t.Fatalf("bad: %#v", file.Comments)
}
})
}
}
// Official HCL tests
func TestParse(t *testing.T) {
cases := []struct {
Name string
Err bool
}{
{
"assign_colon.hcl",
true,
},
{
"comment.hcl",
false,
},
{
"comment_crlf.hcl",
false,
},
{
"comment_lastline.hcl",
false,
},
{
"comment_single.hcl",
false,
},
{
"empty.hcl",
false,
},
{
"list_comma.hcl",
false,
},
{
"multiple.hcl",
false,
},
{
"object_list_comma.hcl",
false,
},
{
"structure.hcl",
false,
},
{
"structure_basic.hcl",
false,
},
{
"structure_empty.hcl",
false,
},
{
"complex.hcl",
false,
},
{
"complex_crlf.hcl",
false,
},
{
"types.hcl",
false,
},
{
"array_comment.hcl",
false,
},
{
"array_comment_2.hcl",
true,
},
{
"missing_braces.hcl",
true,
},
{
"unterminated_object.hcl",
true,
},
{
"unterminated_object_2.hcl",
true,
},
{
"key_without_value.hcl",
true,
},
{
"object_key_without_value.hcl",
true,
},
{
"object_key_assign_without_value.hcl",
true,
},
{
"object_key_assign_without_value2.hcl",
true,
},
{
"object_key_assign_without_value3.hcl",
true,
},
{
"git_crypt.hcl",
true,
},
}
const fixtureDir = "./test-fixtures"
for _, tc := range cases {
t.Run(tc.Name, func(t *testing.T) {
d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.Name))
if err != nil {
t.Fatalf("err: %s", err)
}
v, err := Parse(d)
if (err != nil) != tc.Err {
t.Fatalf("Input: %s\n\nError: %s\n\nAST: %#v", tc.Name, err, v)
}
})
}
}
func TestParse_inline(t *testing.T) {
cases := []struct {
Value string
Err bool
}{
{"t t e{{}}", true},
{"o{{}}", true},
{"t t e d N{{}}", true},
{"t t e d{{}}", true},
{"N{}N{{}}", true},
{"v\nN{{}}", true},
{"v=/\n[,", true},
{"v=10kb", true},
{"v=/foo", true},
}
for _, tc := range cases {
t.Logf("Testing: %q", tc.Value)
ast, err := Parse([]byte(tc.Value))
if (err != nil) != tc.Err {
t.Fatalf("Input: %q\n\nError: %s\n\nAST: %#v", tc.Value, err, ast)
}
}
}
// equals fails the test if exp is not equal to act.
func equals(tb testing.TB, exp, act interface{}) {
if !reflect.DeepEqual(exp, act) {
_, file, line, _ := runtime.Caller(1)
fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act)
tb.FailNow()
}
}

View file

@ -0,0 +1,4 @@
foo = [
"1",
"2", # comment
]

View file

@ -0,0 +1,6 @@
provisioner "remote-exec" {
scripts = [
"${path.module}/scripts/install-consul.sh" // missing comma
"${path.module}/scripts/install-haproxy.sh"
]
}

View file

@ -0,0 +1,6 @@
resource = [{
"foo": {
"bar": {},
"baz": [1, 2, "foo"],
}
}]

View file

@ -0,0 +1,5 @@
resource = [{
foo = [{
bar = {}
}]
}]

View file

@ -0,0 +1,15 @@
// Foo
/* Bar */
/*
/*
Baz
*/
# Another
# Multiple
# Lines
foo = "bar"

View file

@ -0,0 +1,15 @@
// Foo
/* Bar */
/*
/*
Baz
*/
# Another
# Multiple
# Lines
foo = "bar"

View file

@ -0,0 +1 @@
#foo

View file

@ -0,0 +1 @@
# Hello

View file

@ -0,0 +1,42 @@
variable "foo" {
default = "bar"
description = "bar"
}
variable "groups" { }
provider "aws" {
access_key = "foo"
secret_key = "bar"
}
provider "do" {
api_key = "${var.foo}"
}
resource "aws_security_group" "firewall" {
count = 5
}
resource aws_instance "web" {
ami = "${var.foo}"
security_groups = [
"foo",
"${aws_security_group.firewall.foo}",
"${element(split(\",\", var.groups)}",
]
network_interface = {
device_index = 0
description = "Main network interface"
}
}
resource "aws_instance" "db" {
security_groups = "${aws_security_group.firewall.*.id}"
VPC = "foo"
depends_on = ["aws_instance.web"]
}
output "web_ip" {
value = "${aws_instance.web.private_ip}"
}

View file

@ -0,0 +1,42 @@
variable "foo" {
default = "bar"
description = "bar"
}
variable "groups" { }
provider "aws" {
access_key = "foo"
secret_key = "bar"
}
provider "do" {
api_key = "${var.foo}"
}
resource "aws_security_group" "firewall" {
count = 5
}
resource aws_instance "web" {
ami = "${var.foo}"
security_groups = [
"foo",
"${aws_security_group.firewall.foo}",
"${element(split(\",\", var.groups)}",
]
network_interface = {
device_index = 0
description = "Main network interface"
}
}
resource "aws_instance" "db" {
security_groups = "${aws_security_group.firewall.*.id}"
VPC = "foo"
depends_on = ["aws_instance.web"]
}
output "web_ip" {
value = "${aws_instance.web.private_ip}"
}

View file

@ -0,0 +1 @@
foo.bar = "baz"

View file

Binary file not shown.

View file

@ -0,0 +1 @@
foo

View file

@ -0,0 +1 @@
foo = [1, 2, "foo"]

View file

@ -0,0 +1 @@
foo = [1, 2, "foo",]

View file

@ -0,0 +1,4 @@
# should error, but not crash
resource "template_file" "cloud_config" {
template = "$file("${path.module}/some/path")"
}

View file

@ -0,0 +1,2 @@
foo = "bar"
key = 7

View file

@ -0,0 +1,3 @@
foo {
bar =
}

View file

@ -0,0 +1,4 @@
foo {
baz = 7
bar =
}

View file

@ -0,0 +1,4 @@
foo {
bar =
baz = 7
}

View file

@ -0,0 +1,3 @@
foo {
bar
}

View file

@ -0,0 +1 @@
foo = {one = 1, two = 2}

View file

@ -0,0 +1,3 @@
default = {
"eu-west-1": "ami-b1cf19c6",
}

View file

@ -0,0 +1,5 @@
// This is a test structure for the lexer
foo bar "baz" {
key = 7
foo = "bar"
}

View file

@ -0,0 +1,5 @@
foo {
value = 7
"value" = 8
"complex::value" = 9
}

View file

@ -0,0 +1 @@
resource "foo" "bar" {}

View file

@ -0,0 +1,7 @@
foo = "bar"
bar = 7
baz = [1,2,3]
foo = -12
bar = 3.14159
foo = true
bar = false

View file

@ -0,0 +1,2 @@
foo "baz" {
bar = "baz"

View file

@ -0,0 +1,6 @@
resource "aws_eip" "EIP1" { a { a { a { a { a {
count = "1"
resource "aws_eip" "EIP2" {
count = "1"
}

779
vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go generated vendored Normal file
View file

@ -0,0 +1,779 @@
package printer
import (
"bytes"
"fmt"
"sort"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/hcl/hcl/token"
)
const (
blank = byte(' ')
newline = byte('\n')
tab = byte('\t')
infinity = 1 << 30 // offset or line
)
var (
unindent = []byte("\uE123") // in the private use space
)
type printer struct {
cfg Config
prev token.Pos
comments []*ast.CommentGroup // may be nil, contains all comments
standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node)
enableTrace bool
indentTrace int
}
type ByPosition []*ast.CommentGroup
func (b ByPosition) Len() int { return len(b) }
func (b ByPosition) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) }
// collectComments comments all standalone comments which are not lead or line
// comment
func (p *printer) collectComments(node ast.Node) {
// first collect all comments. This is already stored in
// ast.File.(comments)
ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
switch t := nn.(type) {
case *ast.File:
p.comments = t.Comments
return nn, false
}
return nn, true
})
standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0)
for _, c := range p.comments {
standaloneComments[c.Pos()] = c
}
// next remove all lead and line comments from the overall comment map.
// This will give us comments which are standalone, comments which are not
// assigned to any kind of node.
ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
switch t := nn.(type) {
case *ast.LiteralType:
if t.LeadComment != nil {
for _, comment := range t.LeadComment.List {
if _, ok := standaloneComments[comment.Pos()]; ok {
delete(standaloneComments, comment.Pos())
}
}
}
if t.LineComment != nil {
for _, comment := range t.LineComment.List {
if _, ok := standaloneComments[comment.Pos()]; ok {
delete(standaloneComments, comment.Pos())
}
}
}
case *ast.ObjectItem:
if t.LeadComment != nil {
for _, comment := range t.LeadComment.List {
if _, ok := standaloneComments[comment.Pos()]; ok {
delete(standaloneComments, comment.Pos())
}
}
}
if t.LineComment != nil {
for _, comment := range t.LineComment.List {
if _, ok := standaloneComments[comment.Pos()]; ok {
delete(standaloneComments, comment.Pos())
}
}
}
}
return nn, true
})
for _, c := range standaloneComments {
p.standaloneComments = append(p.standaloneComments, c)
}
sort.Sort(ByPosition(p.standaloneComments))
}
// output prints creates b printable HCL output and returns it.
func (p *printer) output(n interface{}) []byte {
var buf bytes.Buffer
switch t := n.(type) {
case *ast.File:
// File doesn't trace so we add the tracing here
defer un(trace(p, "File"))
return p.output(t.Node)
case *ast.ObjectList:
defer un(trace(p, "ObjectList"))
var index int
for {
// Determine the location of the next actual non-comment
// item. If we're at the end, the next item is at "infinity"
var nextItem token.Pos
if index != len(t.Items) {
nextItem = t.Items[index].Pos()
} else {
nextItem = token.Pos{Offset: infinity, Line: infinity}
}
// Go through the standalone comments in the file and print out
// the comments that we should be for this object item.
for _, c := range p.standaloneComments {
// Go through all the comments in the group. The group
// should be printed together, not separated by double newlines.
printed := false
newlinePrinted := false
for _, comment := range c.List {
// We only care about comments after the previous item
// we've printed so that comments are printed in the
// correct locations (between two objects for example).
// And before the next item.
if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
// if we hit the end add newlines so we can print the comment
// we don't do this if prev is invalid which means the
// beginning of the file since the first comment should
// be at the first line.
if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) {
buf.Write([]byte{newline, newline})
newlinePrinted = true
}
// Write the actual comment.
buf.WriteString(comment.Text)
buf.WriteByte(newline)
// Set printed to true to note that we printed something
printed = true
}
}
// If we're not at the last item, write a new line so
// that there is a newline separating this comment from
// the next object.
if printed && index != len(t.Items) {
buf.WriteByte(newline)
}
}
if index == len(t.Items) {
break
}
buf.Write(p.output(t.Items[index]))
if index != len(t.Items)-1 {
// Always write a newline to separate us from the next item
buf.WriteByte(newline)
// Need to determine if we're going to separate the next item
// with a blank line. The logic here is simple, though there
// are a few conditions:
//
// 1. The next object is more than one line away anyways,
// so we need an empty line.
//
// 2. The next object is not a "single line" object, so
// we need an empty line.
//
// 3. This current object is not a single line object,
// so we need an empty line.
current := t.Items[index]
next := t.Items[index+1]
if next.Pos().Line != t.Items[index].Pos().Line+1 ||
!p.isSingleLineObject(next) ||
!p.isSingleLineObject(current) {
buf.WriteByte(newline)
}
}
index++
}
case *ast.ObjectKey:
buf.WriteString(t.Token.Text)
case *ast.ObjectItem:
p.prev = t.Pos()
buf.Write(p.objectItem(t))
case *ast.LiteralType:
buf.Write(p.literalType(t))
case *ast.ListType:
buf.Write(p.list(t))
case *ast.ObjectType:
buf.Write(p.objectType(t))
default:
fmt.Printf(" unknown type: %T\n", n)
}
return buf.Bytes()
}
func (p *printer) literalType(lit *ast.LiteralType) []byte {
result := []byte(lit.Token.Text)
switch lit.Token.Type {
case token.HEREDOC:
// Clear the trailing newline from heredocs
if result[len(result)-1] == '\n' {
result = result[:len(result)-1]
}
// Poison lines 2+ so that we don't indent them
result = p.heredocIndent(result)
case token.STRING:
// If this is a multiline string, poison lines 2+ so we don't
// indent them.
if bytes.IndexRune(result, '\n') >= 0 {
result = p.heredocIndent(result)
}
}
return result
}
// objectItem returns the printable HCL form of an object item. An object type
// starts with one/multiple keys and has a value. The value might be of any
// type.
func (p *printer) objectItem(o *ast.ObjectItem) []byte {
defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text)))
var buf bytes.Buffer
if o.LeadComment != nil {
for _, comment := range o.LeadComment.List {
buf.WriteString(comment.Text)
buf.WriteByte(newline)
}
}
for i, k := range o.Keys {
buf.WriteString(k.Token.Text)
buf.WriteByte(blank)
// reach end of key
if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 {
buf.WriteString("=")
buf.WriteByte(blank)
}
}
buf.Write(p.output(o.Val))
if o.Val.Pos().Line == o.Keys[0].Pos().Line && o.LineComment != nil {
buf.WriteByte(blank)
for _, comment := range o.LineComment.List {
buf.WriteString(comment.Text)
}
}
return buf.Bytes()
}
// objectType returns the printable HCL form of an object type. An object type
// begins with a brace and ends with a brace.
func (p *printer) objectType(o *ast.ObjectType) []byte {
defer un(trace(p, "ObjectType"))
var buf bytes.Buffer
buf.WriteString("{")
var index int
var nextItem token.Pos
var commented, newlinePrinted bool
for {
// Determine the location of the next actual non-comment
// item. If we're at the end, the next item is the closing brace
if index != len(o.List.Items) {
nextItem = o.List.Items[index].Pos()
} else {
nextItem = o.Rbrace
}
// Go through the standalone comments in the file and print out
// the comments that we should be for this object item.
for _, c := range p.standaloneComments {
printed := false
var lastCommentPos token.Pos
for _, comment := range c.List {
// We only care about comments after the previous item
// we've printed so that comments are printed in the
// correct locations (between two objects for example).
// And before the next item.
if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
// If there are standalone comments and the initial newline has not
// been printed yet, do it now.
if !newlinePrinted {
newlinePrinted = true
buf.WriteByte(newline)
}
// add newline if it's between other printed nodes
if index > 0 {
commented = true
buf.WriteByte(newline)
}
// Store this position
lastCommentPos = comment.Pos()
// output the comment itself
buf.Write(p.indent(p.heredocIndent([]byte(comment.Text))))
// Set printed to true to note that we printed something
printed = true
/*
if index != len(o.List.Items) {
buf.WriteByte(newline) // do not print on the end
}
*/
}
}
// Stuff to do if we had comments
if printed {
// Always write a newline
buf.WriteByte(newline)
// If there is another item in the object and our comment
// didn't hug it directly, then make sure there is a blank
// line separating them.
if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 {
buf.WriteByte(newline)
}
}
}
if index == len(o.List.Items) {
p.prev = o.Rbrace
break
}
// At this point we are sure that it's not a totally empty block: print
// the initial newline if it hasn't been printed yet by the previous
// block about standalone comments.
if !newlinePrinted {
buf.WriteByte(newline)
newlinePrinted = true
}
// check if we have adjacent one liner items. If yes we'll going to align
// the comments.
var aligned []*ast.ObjectItem
for _, item := range o.List.Items[index:] {
// we don't group one line lists
if len(o.List.Items) == 1 {
break
}
// one means a oneliner with out any lead comment
// two means a oneliner with lead comment
// anything else might be something else
cur := lines(string(p.objectItem(item)))
if cur > 2 {
break
}
curPos := item.Pos()
nextPos := token.Pos{}
if index != len(o.List.Items)-1 {
nextPos = o.List.Items[index+1].Pos()
}
prevPos := token.Pos{}
if index != 0 {
prevPos = o.List.Items[index-1].Pos()
}
// fmt.Println("DEBUG ----------------")
// fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos)
// fmt.Printf("cur = %+v curPos: %s\n", cur, curPos)
// fmt.Printf("next = %+v nextPos: %s\n", next, nextPos)
if curPos.Line+1 == nextPos.Line {
aligned = append(aligned, item)
index++
continue
}
if curPos.Line-1 == prevPos.Line {
aligned = append(aligned, item)
index++
// finish if we have a new line or comment next. This happens
// if the next item is not adjacent
if curPos.Line+1 != nextPos.Line {
break
}
continue
}
break
}
// put newlines if the items are between other non aligned items.
// newlines are also added if there is a standalone comment already, so
// check it too
if !commented && index != len(aligned) {
buf.WriteByte(newline)
}
if len(aligned) >= 1 {
p.prev = aligned[len(aligned)-1].Pos()
items := p.alignedItems(aligned)
buf.Write(p.indent(items))
} else {
p.prev = o.List.Items[index].Pos()
buf.Write(p.indent(p.objectItem(o.List.Items[index])))
index++
}
buf.WriteByte(newline)
}
buf.WriteString("}")
return buf.Bytes()
}
func (p *printer) alignedItems(items []*ast.ObjectItem) []byte {
var buf bytes.Buffer
// find the longest key and value length, needed for alignment
var longestKeyLen int // longest key length
var longestValLen int // longest value length
for _, item := range items {
key := len(item.Keys[0].Token.Text)
val := len(p.output(item.Val))
if key > longestKeyLen {
longestKeyLen = key
}
if val > longestValLen {
longestValLen = val
}
}
for i, item := range items {
if item.LeadComment != nil {
for _, comment := range item.LeadComment.List {
buf.WriteString(comment.Text)
buf.WriteByte(newline)
}
}
for i, k := range item.Keys {
keyLen := len(k.Token.Text)
buf.WriteString(k.Token.Text)
for i := 0; i < longestKeyLen-keyLen+1; i++ {
buf.WriteByte(blank)
}
// reach end of key
if i == len(item.Keys)-1 && len(item.Keys) == 1 {
buf.WriteString("=")
buf.WriteByte(blank)
}
}
val := p.output(item.Val)
valLen := len(val)
buf.Write(val)
if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil {
for i := 0; i < longestValLen-valLen+1; i++ {
buf.WriteByte(blank)
}
for _, comment := range item.LineComment.List {
buf.WriteString(comment.Text)
}
}
// do not print for the last item
if i != len(items)-1 {
buf.WriteByte(newline)
}
}
return buf.Bytes()
}
// list returns the printable HCL form of an list type.
func (p *printer) list(l *ast.ListType) []byte {
var buf bytes.Buffer
buf.WriteString("[")
var longestLine int
for _, item := range l.List {
// for now we assume that the list only contains literal types
if lit, ok := item.(*ast.LiteralType); ok {
lineLen := len(lit.Token.Text)
if lineLen > longestLine {
longestLine = lineLen
}
}
}
insertSpaceBeforeItem := false
lastHadLeadComment := false
for i, item := range l.List {
// Keep track of whether this item is a heredoc since that has
// unique behavior.
heredoc := false
if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
heredoc = true
}
if item.Pos().Line != l.Lbrack.Line {
// multiline list, add newline before we add each item
buf.WriteByte(newline)
insertSpaceBeforeItem = false
// If we have a lead comment, then we want to write that first
leadComment := false
if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil {
leadComment = true
// If this isn't the first item and the previous element
// didn't have a lead comment, then we need to add an extra
// newline to properly space things out. If it did have a
// lead comment previously then this would be done
// automatically.
if i > 0 && !lastHadLeadComment {
buf.WriteByte(newline)
}
for _, comment := range lit.LeadComment.List {
buf.Write(p.indent([]byte(comment.Text)))
buf.WriteByte(newline)
}
}
// also indent each line
val := p.output(item)
curLen := len(val)
buf.Write(p.indent(val))
// if this item is a heredoc, then we output the comma on
// the next line. This is the only case this happens.
comma := []byte{','}
if heredoc {
buf.WriteByte(newline)
comma = p.indent(comma)
}
buf.Write(comma)
if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
// if the next item doesn't have any comments, do not align
buf.WriteByte(blank) // align one space
for i := 0; i < longestLine-curLen; i++ {
buf.WriteByte(blank)
}
for _, comment := range lit.LineComment.List {
buf.WriteString(comment.Text)
}
}
lastItem := i == len(l.List)-1
if lastItem {
buf.WriteByte(newline)
}
if leadComment && !lastItem {
buf.WriteByte(newline)
}
lastHadLeadComment = leadComment
} else {
if insertSpaceBeforeItem {
buf.WriteByte(blank)
insertSpaceBeforeItem = false
}
// Output the item itself
// also indent each line
val := p.output(item)
curLen := len(val)
buf.Write(val)
// If this is a heredoc item we always have to output a newline
// so that it parses properly.
if heredoc {
buf.WriteByte(newline)
}
// If this isn't the last element, write a comma.
if i != len(l.List)-1 {
buf.WriteString(",")
insertSpaceBeforeItem = true
}
if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
// if the next item doesn't have any comments, do not align
buf.WriteByte(blank) // align one space
for i := 0; i < longestLine-curLen; i++ {
buf.WriteByte(blank)
}
for _, comment := range lit.LineComment.List {
buf.WriteString(comment.Text)
}
}
}
}
buf.WriteString("]")
return buf.Bytes()
}
// indent indents the lines of the given buffer for each non-empty line
func (p *printer) indent(buf []byte) []byte {
var prefix []byte
if p.cfg.SpacesWidth != 0 {
for i := 0; i < p.cfg.SpacesWidth; i++ {
prefix = append(prefix, blank)
}
} else {
prefix = []byte{tab}
}
var res []byte
bol := true
for _, c := range buf {
if bol && c != '\n' {
res = append(res, prefix...)
}
res = append(res, c)
bol = c == '\n'
}
return res
}
// unindent removes all the indentation from the tombstoned lines
func (p *printer) unindent(buf []byte) []byte {
var res []byte
for i := 0; i < len(buf); i++ {
skip := len(buf)-i <= len(unindent)
if !skip {
skip = !bytes.Equal(unindent, buf[i:i+len(unindent)])
}
if skip {
res = append(res, buf[i])
continue
}
// We have a marker. we have to backtrace here and clean out
// any whitespace ahead of our tombstone up to a \n
for j := len(res) - 1; j >= 0; j-- {
if res[j] == '\n' {
break
}
res = res[:j]
}
// Skip the entire unindent marker
i += len(unindent) - 1
}
return res
}
// heredocIndent marks all the 2nd and further lines as unindentable
func (p *printer) heredocIndent(buf []byte) []byte {
var res []byte
bol := false
for _, c := range buf {
if bol && c != '\n' {
res = append(res, unindent...)
}
res = append(res, c)
bol = c == '\n'
}
return res
}
// isSingleLineObject tells whether the given object item is a single
// line object such as "obj {}".
//
// A single line object:
//
// * has no lead comments (hence multi-line)
// * has no assignment
// * has no values in the stanza (within {})
//
func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool {
// If there is a lead comment, can't be one line
if val.LeadComment != nil {
return false
}
// If there is assignment, we always break by line
if val.Assign.IsValid() {
return false
}
// If it isn't an object type, then its not a single line object
ot, ok := val.Val.(*ast.ObjectType)
if !ok {
return false
}
// If the object has no items, it is single line!
return len(ot.List.Items) == 0
}
func lines(txt string) int {
endline := 1
for i := 0; i < len(txt); i++ {
if txt[i] == '\n' {
endline++
}
}
return endline
}
// ----------------------------------------------------------------------------
// Tracing support
func (p *printer) printTrace(a ...interface{}) {
if !p.enableTrace {
return
}
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
const n = len(dots)
i := 2 * p.indentTrace
for i > n {
fmt.Print(dots)
i -= n
}
// i <= n
fmt.Print(dots[0:i])
fmt.Println(a...)
}
func trace(p *printer, msg string) *printer {
p.printTrace(msg, "(")
p.indentTrace++
return p
}
// Usage pattern: defer un(trace(p, "..."))
func un(p *printer) {
p.indentTrace--
p.printTrace(")")
}

66
vendor/github.com/hashicorp/hcl/hcl/printer/printer.go generated vendored Normal file
View file

@ -0,0 +1,66 @@
// Package printer implements printing of AST nodes to HCL format.
package printer
import (
"bytes"
"io"
"text/tabwriter"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/hcl/hcl/parser"
)
var DefaultConfig = Config{
SpacesWidth: 2,
}
// A Config node controls the output of Fprint.
type Config struct {
SpacesWidth int // if set, it will use spaces instead of tabs for alignment
}
func (c *Config) Fprint(output io.Writer, node ast.Node) error {
p := &printer{
cfg: *c,
comments: make([]*ast.CommentGroup, 0),
standaloneComments: make([]*ast.CommentGroup, 0),
// enableTrace: true,
}
p.collectComments(node)
if _, err := output.Write(p.unindent(p.output(node))); err != nil {
return err
}
// flush tabwriter, if any
var err error
if tw, _ := output.(*tabwriter.Writer); tw != nil {
err = tw.Flush()
}
return err
}
// Fprint "pretty-prints" an HCL node to output
// It calls Config.Fprint with default settings.
func Fprint(output io.Writer, node ast.Node) error {
return DefaultConfig.Fprint(output, node)
}
// Format formats src HCL and returns the result.
func Format(src []byte) ([]byte, error) {
node, err := parser.Parse(src)
if err != nil {
return nil, err
}
var buf bytes.Buffer
if err := DefaultConfig.Fprint(&buf, node); err != nil {
return nil, err
}
// Add trailing newline to result
buf.WriteString("\n")
return buf.Bytes(), nil
}

View file

@ -0,0 +1,149 @@
package printer
import (
"bytes"
"errors"
"flag"
"fmt"
"io/ioutil"
"path/filepath"
"testing"
"github.com/hashicorp/hcl/hcl/parser"
)
var update = flag.Bool("update", false, "update golden files")
const (
dataDir = "testdata"
)
type entry struct {
source, golden string
}
// Use go test -update to create/update the respective golden files.
var data = []entry{
{"complexhcl.input", "complexhcl.golden"},
{"list.input", "list.golden"},
{"list_comment.input", "list_comment.golden"},
{"comment.input", "comment.golden"},
{"comment_crlf.input", "comment.golden"},
{"comment_aligned.input", "comment_aligned.golden"},
{"comment_array.input", "comment_array.golden"},
{"comment_end_file.input", "comment_end_file.golden"},
{"comment_multiline_indent.input", "comment_multiline_indent.golden"},
{"comment_multiline_no_stanza.input", "comment_multiline_no_stanza.golden"},
{"comment_multiline_stanza.input", "comment_multiline_stanza.golden"},
{"comment_newline.input", "comment_newline.golden"},
{"comment_object_multi.input", "comment_object_multi.golden"},
{"comment_standalone.input", "comment_standalone.golden"},
{"empty_block.input", "empty_block.golden"},
{"list_of_objects.input", "list_of_objects.golden"},
{"multiline_string.input", "multiline_string.golden"},
{"object_singleline.input", "object_singleline.golden"},
{"object_with_heredoc.input", "object_with_heredoc.golden"},
}
func TestFiles(t *testing.T) {
for _, e := range data {
source := filepath.Join(dataDir, e.source)
golden := filepath.Join(dataDir, e.golden)
t.Run(e.source, func(t *testing.T) {
check(t, source, golden)
})
}
}
func check(t *testing.T, source, golden string) {
src, err := ioutil.ReadFile(source)
if err != nil {
t.Error(err)
return
}
res, err := format(src)
if err != nil {
t.Error(err)
return
}
// update golden files if necessary
if *update {
if err := ioutil.WriteFile(golden, res, 0644); err != nil {
t.Error(err)
}
return
}
// get golden
gld, err := ioutil.ReadFile(golden)
if err != nil {
t.Error(err)
return
}
// formatted source and golden must be the same
if err := diff(source, golden, res, gld); err != nil {
t.Error(err)
return
}
}
// diff compares a and b.
func diff(aname, bname string, a, b []byte) error {
var buf bytes.Buffer // holding long error message
// compare lengths
if len(a) != len(b) {
fmt.Fprintf(&buf, "\nlength changed: len(%s) = %d, len(%s) = %d", aname, len(a), bname, len(b))
}
// compare contents
line := 1
offs := 1
for i := 0; i < len(a) && i < len(b); i++ {
ch := a[i]
if ch != b[i] {
fmt.Fprintf(&buf, "\n%s:%d:%d: %q", aname, line, i-offs+1, lineAt(a, offs))
fmt.Fprintf(&buf, "\n%s:%d:%d: %q", bname, line, i-offs+1, lineAt(b, offs))
fmt.Fprintf(&buf, "\n\n")
break
}
if ch == '\n' {
line++
offs = i + 1
}
}
if buf.Len() > 0 {
return errors.New(buf.String())
}
return nil
}
// format parses src, prints the corresponding AST, verifies the resulting
// src is syntactically correct, and returns the resulting src or an error
// if any.
func format(src []byte) ([]byte, error) {
formatted, err := Format(src)
if err != nil {
return nil, err
}
// make sure formatted output is syntactically correct
if _, err := parser.Parse(formatted); err != nil {
return nil, fmt.Errorf("parse: %s\n%s", err, formatted)
}
return formatted, nil
}
// lineAt returns the line in text starting at offset offs.
func lineAt(text []byte, offs int) []byte {
i := offs
for i < len(text) && text[i] != '\n' {
i++
}
return text[offs:i]
}

View file

@ -0,0 +1,36 @@
// A standalone comment is a comment which is not attached to any kind of node
// This comes from Terraform, as a test
variable "foo" {
# Standalone comment should be still here
default = "bar"
description = "bar" # yooo
}
/* This is a multi line standalone
comment*/
// fatih arslan
/* This is a developer test
account and a multine comment */
developer = ["fatih", "arslan"] // fatih arslan
# One line here
numbers = [1, 2] // another line here
# Another comment
variable = {
description = "bar" # another yooo
foo {
# Nested standalone
bar = "fatih"
}
}
// lead comment
foo {
bar = "fatih" // line comment 2
} // line comment 3

View file

@ -0,0 +1,37 @@
// A standalone comment is a comment which is not attached to any kind of node
// This comes from Terraform, as a test
variable "foo" {
# Standalone comment should be still here
default = "bar"
description = "bar" # yooo
}
/* This is a multi line standalone
comment*/
// fatih arslan
/* This is a developer test
account and a multine comment */
developer = [ "fatih", "arslan"] // fatih arslan
# One line here
numbers = [1,2] // another line here
# Another comment
variable = {
description = "bar" # another yooo
foo {
# Nested standalone
bar = "fatih"
}
}
// lead comment
foo {
bar = "fatih" // line comment 2
} // line comment 3

View file

@ -0,0 +1,32 @@
aligned {
# We have some aligned items below
foo = "fatih" # yoo1
default = "bar" # yoo2
bar = "bar and foo" # yoo3
default = {
bar = "example"
}
#deneme arslan
fatih = ["fatih"] # yoo4
#fatih arslan
fatiharslan = ["arslan"] // yoo5
default = {
bar = "example"
}
security_groups = [
"foo", # kenya 1
"${aws_security_group.firewall.foo}", # kenya 2
]
security_groups2 = [
"foo", # kenya 1
"bar", # kenya 1.5
"${aws_security_group.firewall.foo}", # kenya 2
"foobar", # kenya 3
]
}

View file

@ -0,0 +1,28 @@
aligned {
# We have some aligned items below
foo = "fatih" # yoo1
default = "bar" # yoo2
bar = "bar and foo" # yoo3
default = {
bar = "example"
}
#deneme arslan
fatih = ["fatih"] # yoo4
#fatih arslan
fatiharslan = ["arslan"] // yoo5
default = {
bar = "example"
}
security_groups = [
"foo", # kenya 1
"${aws_security_group.firewall.foo}", # kenya 2
]
security_groups2 = [
"foo", # kenya 1
"bar", # kenya 1.5
"${aws_security_group.firewall.foo}", # kenya 2
"foobar", # kenya 3
]
}

View file

@ -0,0 +1,13 @@
banana = [
# I really want to comment this item in the array.
"a",
# This as well
"b",
"c", # And C
"d",
# And another
"e",
]

View file

@ -0,0 +1,13 @@
banana = [
# I really want to comment this item in the array.
"a",
# This as well
"b",
"c", # And C
"d",
# And another
"e",
]

View file

@ -0,0 +1,37 @@
// A standalone comment is a comment which is not attached to any kind of node
// This comes from Terraform, as a test
variable "foo" {
# Standalone comment should be still here
default = "bar"
description = "bar" # yooo
}
/* This is a multi line standalone
comment*/
// fatih arslan
/* This is a developer test
account and a multine comment */
developer = [ "fatih", "arslan"] // fatih arslan
# One line here
numbers = [1,2] // another line here
# Another comment
variable = {
description = "bar" # another yooo
foo {
# Nested standalone
bar = "fatih"
}
}
// lead comment
foo {
bar = "fatih" // line comment 2
} // line comment 3

View file

@ -0,0 +1,6 @@
resource "blah" "blah" {}
//
//
//

View file

@ -0,0 +1,5 @@
resource "blah" "blah" {}
//
//
//

Some files were not shown because too many files have changed in this diff Show more