mirror of
https://github.com/Luzifer/duplicity-backup.git
synced 2024-12-20 10:31:20 +00:00
Initial version
This commit is contained in:
commit
7b05eebe6a
176 changed files with 25611 additions and 0 deletions
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
duplicity-backup
|
9
.travis.yml
Normal file
9
.travis.yml
Normal file
|
@ -0,0 +1,9 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.5
|
||||
- 1.6
|
||||
- tip
|
||||
|
||||
install: make setup-testenv
|
||||
script: make test
|
168
Godeps/Godeps.json
generated
Normal file
168
Godeps/Godeps.json
generated
Normal file
|
@ -0,0 +1,168 @@
|
|||
{
|
||||
"ImportPath": "github.com/Luzifer/duplicity-backup",
|
||||
"GoVersion": "go1.6",
|
||||
"GodepVersion": "v62",
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/Luzifer/go_helpers/which",
|
||||
"Comment": "v1.3.0",
|
||||
"Rev": "6abbbafaada02b63dd8f9e185921fd8b3c35b6c2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Luzifer/rconfig",
|
||||
"Comment": "v1.0.3-2-g2677653",
|
||||
"Rev": "26776536e61487fdffbd3ce87f827177a5903f98"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/asaskevich/govalidator",
|
||||
"Comment": "v4-6-gdf81827",
|
||||
"Rev": "df81827fdd59d8b4fb93d8910b286ab7a3919520"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/go-homedir",
|
||||
"Rev": "981ab348d865cf048eb7d17e78ac7192632d8415"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/nightlyone/lockfile",
|
||||
"Rev": "b30dcbfa86e3a1eaa4e6622de2ce57be2c138c10"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo",
|
||||
"Comment": "v1.2.0-14-g1b59c57",
|
||||
"Rev": "1b59c57df76ede42c08590546916e6a18685857d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/config",
|
||||
"Comment": "v1.2.0-14-g1b59c57",
|
||||
"Rev": "1b59c57df76ede42c08590546916e6a18685857d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/codelocation",
|
||||
"Comment": "v1.2.0-14-g1b59c57",
|
||||
"Rev": "1b59c57df76ede42c08590546916e6a18685857d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/containernode",
|
||||
"Comment": "v1.2.0-14-g1b59c57",
|
||||
"Rev": "1b59c57df76ede42c08590546916e6a18685857d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/failer",
|
||||
"Comment": "v1.2.0-14-g1b59c57",
|
||||
"Rev": "1b59c57df76ede42c08590546916e6a18685857d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/leafnodes",
|
||||
"Comment": "v1.2.0-14-g1b59c57",
|
||||
"Rev": "1b59c57df76ede42c08590546916e6a18685857d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/remote",
|
||||
"Comment": "v1.2.0-14-g1b59c57",
|
||||
"Rev": "1b59c57df76ede42c08590546916e6a18685857d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/spec",
|
||||
"Comment": "v1.2.0-14-g1b59c57",
|
||||
"Rev": "1b59c57df76ede42c08590546916e6a18685857d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/specrunner",
|
||||
"Comment": "v1.2.0-14-g1b59c57",
|
||||
"Rev": "1b59c57df76ede42c08590546916e6a18685857d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/suite",
|
||||
"Comment": "v1.2.0-14-g1b59c57",
|
||||
"Rev": "1b59c57df76ede42c08590546916e6a18685857d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/testingtproxy",
|
||||
"Comment": "v1.2.0-14-g1b59c57",
|
||||
"Rev": "1b59c57df76ede42c08590546916e6a18685857d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/writer",
|
||||
"Comment": "v1.2.0-14-g1b59c57",
|
||||
"Rev": "1b59c57df76ede42c08590546916e6a18685857d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/reporters",
|
||||
"Comment": "v1.2.0-14-g1b59c57",
|
||||
"Rev": "1b59c57df76ede42c08590546916e6a18685857d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/reporters/stenographer",
|
||||
"Comment": "v1.2.0-14-g1b59c57",
|
||||
"Rev": "1b59c57df76ede42c08590546916e6a18685857d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/types",
|
||||
"Comment": "v1.2.0-14-g1b59c57",
|
||||
"Rev": "1b59c57df76ede42c08590546916e6a18685857d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega",
|
||||
"Comment": "v1.0-55-g6331bf5",
|
||||
"Rev": "6331bf5a5b5e7a832348789eb3cedff7a6917103"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/format",
|
||||
"Comment": "v1.0-55-g6331bf5",
|
||||
"Rev": "6331bf5a5b5e7a832348789eb3cedff7a6917103"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/internal/assertion",
|
||||
"Comment": "v1.0-55-g6331bf5",
|
||||
"Rev": "6331bf5a5b5e7a832348789eb3cedff7a6917103"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/internal/asyncassertion",
|
||||
"Comment": "v1.0-55-g6331bf5",
|
||||
"Rev": "6331bf5a5b5e7a832348789eb3cedff7a6917103"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/internal/testingtsupport",
|
||||
"Comment": "v1.0-55-g6331bf5",
|
||||
"Rev": "6331bf5a5b5e7a832348789eb3cedff7a6917103"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/matchers",
|
||||
"Comment": "v1.0-55-g6331bf5",
|
||||
"Rev": "6331bf5a5b5e7a832348789eb3cedff7a6917103"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/matchers/support/goraph/bipartitegraph",
|
||||
"Comment": "v1.0-55-g6331bf5",
|
||||
"Rev": "6331bf5a5b5e7a832348789eb3cedff7a6917103"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/matchers/support/goraph/edge",
|
||||
"Comment": "v1.0-55-g6331bf5",
|
||||
"Rev": "6331bf5a5b5e7a832348789eb3cedff7a6917103"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/matchers/support/goraph/node",
|
||||
"Comment": "v1.0-55-g6331bf5",
|
||||
"Rev": "6331bf5a5b5e7a832348789eb3cedff7a6917103"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/matchers/support/goraph/util",
|
||||
"Comment": "v1.0-55-g6331bf5",
|
||||
"Rev": "6331bf5a5b5e7a832348789eb3cedff7a6917103"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/types",
|
||||
"Comment": "v1.0-55-g6331bf5",
|
||||
"Rev": "6331bf5a5b5e7a832348789eb3cedff7a6917103"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/pflag",
|
||||
"Rev": "b084184666e02084b8ccb9b704bf0d79c466eb1d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/yaml.v2",
|
||||
"Rev": "53feefa2559fb8dfa8d81baad31be332c97d6c77"
|
||||
}
|
||||
]
|
||||
}
|
5
Godeps/Readme
generated
Normal file
5
Godeps/Readme
generated
Normal file
|
@ -0,0 +1,5 @@
|
|||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
9
Makefile
Normal file
9
Makefile
Normal file
|
@ -0,0 +1,9 @@
|
|||
bindata:
|
||||
go-bindata help.txt
|
||||
|
||||
setup-testenv:
|
||||
go get github.com/onsi/ginkgo/ginkgo
|
||||
go get github.com/onsi/gomega
|
||||
|
||||
test:
|
||||
$(GOPATH)/bin/ginkgo
|
235
bindata.go
Normal file
235
bindata.go
Normal file
|
@ -0,0 +1,235 @@
|
|||
// Code generated by go-bindata.
|
||||
// sources:
|
||||
// help.txt
|
||||
// DO NOT EDIT!
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func bindataRead(data []byte, name string) ([]byte, error) {
|
||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Read %q: %v", name, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
_, err = io.Copy(&buf, gz)
|
||||
clErr := gz.Close()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Read %q: %v", name, err)
|
||||
}
|
||||
if clErr != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
type asset struct {
|
||||
bytes []byte
|
||||
info os.FileInfo
|
||||
}
|
||||
|
||||
type bindataFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
func (fi bindataFileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
func (fi bindataFileInfo) Size() int64 {
|
||||
return fi.size
|
||||
}
|
||||
func (fi bindataFileInfo) Mode() os.FileMode {
|
||||
return fi.mode
|
||||
}
|
||||
func (fi bindataFileInfo) ModTime() time.Time {
|
||||
return fi.modTime
|
||||
}
|
||||
func (fi bindataFileInfo) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
func (fi bindataFileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
var _helpTxt = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xac\x93\x3f\x73\xdb\x30\x0c\xc5\x77\x7d\x0a\x8c\xed\xa0\x64\xcf\xd6\x8b\x2f\x53\x87\x5e\x7b\xbd\x0e\x3e\x0f\x30\x05\x49\xbc\xd0\xa4\x0e\x04\x93\xda\x43\x3f\x7b\xc1\x3f\x76\xe4\xa4\xde\xca\x4d\xe4\xc3\x8f\x4f\x8f\xc0\x2f\xc6\x65\x21\x06\xe4\x90\xfc\x00\x43\x5a\x9c\x35\x56\x8e\x20\x01\x0c\x13\x0a\xc1\x1e\xcd\x73\x5a\xe0\xd5\xca\x0c\x8e\x62\x84\x19\x4f\x27\x47\x5d\xf7\x33\xe2\x44\x0f\x1d\xbc\x55\xf5\x4d\xbb\x35\xe1\x70\x40\x3f\xec\xba\xee\xcb\x0b\x5a\x87\x7b\x47\xf0\x58\xf7\x62\xae\x68\xba\x1b\xeb\xf1\xea\x62\x34\x26\xf0\x60\xfd\x94\x3d\xc9\x7c\xd9\xe7\xa4\x6e\x94\x65\x1c\xa1\xbf\x09\xdb\x90\x23\x65\xe5\x3a\xfa\x2d\x8c\x9e\x42\x8a\xab\xff\x1c\x6d\xa5\x38\x1b\xa5\x37\x89\x99\xbc\xf4\x65\x73\x4d\xf9\xaa\xa7\xb1\x40\xea\x91\x09\x5e\xd0\x7a\x1a\xc0\xfa\x95\x27\xe5\x30\x45\x09\x4c\xb0\xcd\x42\x58\x50\xe6\x1d\x6c\x05\x79\x22\xd9\x01\x7c\xaf\xa7\x11\xa2\xfe\x8f\xab\x34\xb8\x87\xc1\x72\xf9\xb9\x22\xcb\x5f\x64\x54\x76\x5c\xe3\x2e\x88\xf5\xba\xe0\xe8\x85\xf8\x28\xf3\x39\xa4\x8f\x9c\x28\x28\x29\xde\x0a\xfc\x47\xd2\xa7\x61\x7b\xaa\x39\x35\x6d\x18\xaf\xd2\xa6\x25\x44\xdb\x68\x7a\x9b\x1d\x8f\x37\x9f\x2f\x1c\x16\xcc\xae\x5a\x69\x0e\x4b\x53\x8d\x80\x93\x66\x16\x05\x5c\x30\xe8\x5a\xf2\xdd\x93\xc3\xa9\xf4\x44\xdf\xab\x70\xb4\x53\xdf\x42\xe9\xc7\x6b\x68\x3e\x4b\x8c\x62\x83\x87\x31\x68\x60\xb3\x5d\x3f\xe4\x6b\xed\xe4\xee\x96\xab\xf3\xfa\xb4\xa1\x11\x93\x93\x07\xf8\x73\x7f\x57\x6f\xbc\x7f\xdf\xc0\x77\x47\x3c\xb8\xcf\xc5\x93\x7a\x7d\xbe\x38\x72\xd7\xa8\xa7\xbc\xad\x79\xcf\xc1\x0d\x25\xab\x2c\x7e\xf3\xd6\x1c\x69\xdf\x91\x49\xd9\xf6\xff\xf1\x96\x2f\xa9\xde\x06\xda\xa7\x29\xfb\x1a\x3e\xa2\xbe\xb1\xf5\xb2\x8a\xa7\x4d\x64\xcc\x7e\x43\x92\x25\x49\x45\xb0\xf4\x9c\x7c\x86\xf8\xf7\x88\x4d\x00\x04\xd1\x16\x2b\x8a\x3c\xff\x5a\x08\x66\x46\x3f\x95\x91\xe9\x7b\xed\x83\x98\x9f\xe3\x5f\xab\x18\xa8\x33\xd3\xc6\x0a\x16\x0e\x13\xe3\x01\xce\x65\xea\x47\xc3\xb1\x12\xbb\xbf\x01\x00\x00\xff\xff\xd7\x01\xa8\x37\x87\x04\x00\x00")
|
||||
|
||||
func helpTxtBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_helpTxt,
|
||||
"help.txt",
|
||||
)
|
||||
}
|
||||
|
||||
func helpTxt() (*asset, error) {
|
||||
bytes, err := helpTxtBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "help.txt", size: 1159, mode: os.FileMode(420), modTime: time.Unix(1463914687, 0)}
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Asset loads and returns the asset for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func Asset(name string) ([]byte, error) {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[cannonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.bytes, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
|
||||
// MustAsset is like Asset but panics when Asset would return an error.
|
||||
// It simplifies safe initialization of global variables.
|
||||
func MustAsset(name string) []byte {
|
||||
a, err := Asset(name)
|
||||
if err != nil {
|
||||
panic("asset: Asset(" + name + "): " + err.Error())
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// AssetInfo loads and returns the asset info for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func AssetInfo(name string) (os.FileInfo, error) {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[cannonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.info, nil
|
||||
}
|
||||
return nil, fmt.Errorf("AssetInfo %s not found", name)
|
||||
}
|
||||
|
||||
// AssetNames returns the names of the assets.
|
||||
func AssetNames() []string {
|
||||
names := make([]string, 0, len(_bindata))
|
||||
for name := range _bindata {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// _bindata is a table, holding each asset generator, mapped to its name.
|
||||
var _bindata = map[string]func() (*asset, error){
|
||||
"help.txt": helpTxt,
|
||||
}
|
||||
|
||||
// AssetDir returns the file names below a certain
|
||||
// directory embedded in the file by go-bindata.
|
||||
// For example if you run go-bindata on data/... and data contains the
|
||||
// following hierarchy:
|
||||
// data/
|
||||
// foo.txt
|
||||
// img/
|
||||
// a.png
|
||||
// b.png
|
||||
// then AssetDir("data") would return []string{"foo.txt", "img"}
|
||||
// AssetDir("data/img") would return []string{"a.png", "b.png"}
|
||||
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
|
||||
// AssetDir("") will return []string{"data"}.
|
||||
func AssetDir(name string) ([]string, error) {
|
||||
node := _bintree
|
||||
if len(name) != 0 {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
pathList := strings.Split(cannonicalName, "/")
|
||||
for _, p := range pathList {
|
||||
node = node.Children[p]
|
||||
if node == nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if node.Func != nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
rv := make([]string, 0, len(node.Children))
|
||||
for childName := range node.Children {
|
||||
rv = append(rv, childName)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
type bintree struct {
|
||||
Func func() (*asset, error)
|
||||
Children map[string]*bintree
|
||||
}
|
||||
var _bintree = &bintree{nil, map[string]*bintree{
|
||||
"help.txt": &bintree{helpTxt, map[string]*bintree{}},
|
||||
}}
|
||||
|
||||
// RestoreAsset restores an asset under the given directory
|
||||
func RestoreAsset(dir, name string) error {
|
||||
data, err := Asset(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := AssetInfo(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RestoreAssets restores an asset under the given directory recursively
|
||||
func RestoreAssets(dir, name string) error {
|
||||
children, err := AssetDir(name)
|
||||
// File
|
||||
if err != nil {
|
||||
return RestoreAsset(dir, name)
|
||||
}
|
||||
// Dir
|
||||
for _, child := range children {
|
||||
err = RestoreAssets(dir, filepath.Join(name, child))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _filePath(dir, name string) string {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
|
||||
}
|
||||
|
172
config.example.yaml
Normal file
172
config.example.yaml
Normal file
|
@ -0,0 +1,172 @@
|
|||
###
|
||||
# Backup source
|
||||
###
|
||||
#
|
||||
# Root of the backup to create. If no explicit excludes or includes are
|
||||
# defined all files inside this directory will be backed up.
|
||||
root: /home
|
||||
|
||||
# Hostname for notifications (if left out the hostname of the machine
|
||||
# is used for this.
|
||||
#hostname: mystation
|
||||
|
||||
###
|
||||
# Backup destination
|
||||
###
|
||||
#
|
||||
# Destination for the backup. Check the duplicity man-page for all
|
||||
# possible destinations to use.
|
||||
dest: s3+http://foobar-backup-bucket/backup-folder/
|
||||
|
||||
# Some examples of destinations usable for your backup:
|
||||
#dest: ftp://user[:password]@other.host[:port]/some_dir
|
||||
#dest: rsync://user@host.com[:port]//absolute_path
|
||||
#dest: ssh://user[:password]@other.host[:port]/[/]some_dir
|
||||
|
||||
# The "ftp_password" is used for several backends despite the options
|
||||
# name. You can use this option instead of passing the password in the
|
||||
# `dest` parameter as that one is visible in the process list during
|
||||
# the backup is running.
|
||||
#ftp_password: password
|
||||
|
||||
###
|
||||
# Amazon WebServices S3 configuration
|
||||
###
|
||||
#
|
||||
# Uncomment the lines in this section if you're using Amazon S3
|
||||
aws:
|
||||
# access_key_id: foobar_aws_key_id
|
||||
# secret_access_key: foobar_aws_access_key
|
||||
|
||||
# Without setting the storage class the standard storage is used. With
|
||||
# this option you can switch to "infrequent access" (--s3-use-ia) or
|
||||
# "reduced redundancy" (--s3-use-rrs) storage class.
|
||||
# storage_class: --s3-use-ia
|
||||
|
||||
###
|
||||
# Google Cloud Storage configuration
|
||||
###
|
||||
#
|
||||
# Uncomment the lines in this section if you're using GCS
|
||||
google_cloud:
|
||||
# access_key_id: foobar_gcs_key_id
|
||||
# secret_access_key: foobar_gcs_secret_id
|
||||
|
||||
###
|
||||
# OpenStack Object Storage (Swift) configuration
|
||||
###
|
||||
#
|
||||
# Uncomment the lines in this section if you're using OpenStack
|
||||
swift:
|
||||
# username: foobar_swift_tenant:foobar_swift_username
|
||||
# password: foobar_swift_password
|
||||
# auth_url: foobar_swift_authurl
|
||||
# auth_version: 2
|
||||
|
||||
###
|
||||
# Include list of directories
|
||||
###
|
||||
#
|
||||
# Specify directories inside your `root` to backup only these ones
|
||||
# You can't specify directories outside the root. If you want to backup
|
||||
# all the data inside the root leave this list empty.
|
||||
inclist:
|
||||
- /home/myuser/projects
|
||||
|
||||
###
|
||||
# Exclude list of directories
|
||||
###
|
||||
#
|
||||
# Specify directories inside your `root` to exclude these ones
|
||||
# You can't specify directories outside the root. If you want to backup
|
||||
# all the data inside the root leave this list empty.
|
||||
exclist:
|
||||
- /home/muser/projects/testproject
|
||||
|
||||
###
|
||||
# Other file selection options
|
||||
###
|
||||
#
|
||||
# Instead managing the inclist / exclist parameters in this file you
|
||||
# can write a text file containing lines with +/- patterns to include
|
||||
# or exclude files and directories from the backup.
|
||||
# See http://duplicity.nongnu.org/duplicity.1.html#sect9 for details
|
||||
#incexcfile: /home/myuser/.config/backup-files.txt
|
||||
|
||||
# Exclude all device files. This can be useful for security/permissions
|
||||
# reasons or if rdiff-backup is not handling device files correctly.
|
||||
#excdevicefiles: true
|
||||
|
||||
###
|
||||
# Encryption configuration
|
||||
###
|
||||
#
|
||||
encryption:
|
||||
# Enable / disable encryption of your backup. If you enable encryption
|
||||
# you either need to specify a password or a GPG key.
|
||||
enable: true
|
||||
|
||||
# If you're using a `gpg_sign_key` to sign your backup this password is
|
||||
# used to unlock the GPG key. If you're not using a GPG key it is used
|
||||
# to symmetrically encrypt the backup.
|
||||
passphrase: foobar_gpg_passphrase
|
||||
|
||||
# Specify the GPG key(s) to use for encryption / signing the backup.
|
||||
# You may use different keys for those tasks. If you specify a signing
|
||||
# key you need to specify the password above to unlock the key.
|
||||
# gpg_encryption_key: foobar_gpg_key
|
||||
# gpg_sign_key: foobar_gpg_key
|
||||
|
||||
# If you want to hide the GPG key IDs for security reasons you can
|
||||
# enable this option.
|
||||
# hide_key_id: true
|
||||
|
||||
# You can specify the keyring which contains your above specified keys
|
||||
# in case they are not present in the default keyring.
|
||||
# secret_keyring: /home/myuser/.gnupg/duplicity.gpg
|
||||
|
||||
###
|
||||
# Static backup options
|
||||
###
|
||||
#
|
||||
# Here you can specify other options for duplicity not handled in this
|
||||
# configuration file. Reference is the manpage of duplicity. Please
|
||||
# ensure you're specifying the options in command array format.
|
||||
static_options: ["--full-if-older-than", "14D", "--s3-use-new-style"]
|
||||
|
||||
###
|
||||
# Backup cleanup options
|
||||
###
|
||||
#
|
||||
cleanup:
|
||||
# Chose the cleanup type and the configured value for that cleanup type:
|
||||
# remove-older-than <time>
|
||||
# remove-all-but-n-full <count>
|
||||
# remove-all-inc-of-but-n-full <count>
|
||||
# none
|
||||
type: remove-all-but-n-full
|
||||
value: 4
|
||||
|
||||
###
|
||||
# Logging
|
||||
###
|
||||
#
|
||||
# Set the directory the logs are written to
|
||||
logdir: /tmp/duplicity/
|
||||
|
||||
###
|
||||
# Nofification configuration
|
||||
###
|
||||
#
|
||||
# Uncomment and configure your preferred notification channel
|
||||
notifications:
|
||||
# slack:
|
||||
# hook_url: https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX
|
||||
# channel: "#general"
|
||||
# username: duplicity-backup
|
||||
# emoji: :package:
|
||||
# mondash:
|
||||
# board: yourboardurl
|
||||
# token: yoursecrettoken
|
||||
# metric: duplicity backup (%h)
|
||||
# instance: https://mondash.org/
|
328
configfile.go
Normal file
328
configfile.go
Normal file
|
@ -0,0 +1,328 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
valid "github.com/asaskevich/govalidator"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type configFile struct {
|
||||
RootPath string `yaml:"root" valid:"required"`
|
||||
Hostname string `yaml:"hostname"`
|
||||
Destination string `yaml:"dest" valid:"required"`
|
||||
FTPPassword string `yaml:"ftp_password"`
|
||||
AWS struct {
|
||||
AccessKeyID string `yaml:"access_key_id"`
|
||||
SecretAccessKey string `yaml:"secret_access_key"`
|
||||
StorageClass string `yaml:"storage_class"`
|
||||
} `yaml:"aws"`
|
||||
GoogleCloud struct {
|
||||
AccessKeyID string `yaml:"access_key_id"`
|
||||
SecretAccessKey string `yaml:"secret_access_key"`
|
||||
} `yaml:"google_cloud"`
|
||||
Swift struct {
|
||||
Username string `yaml:"username"`
|
||||
Password string `yaml:"password"`
|
||||
AuthURL string `yaml:"auth_url"`
|
||||
AuthVersion int `yaml:"auth_version"`
|
||||
} `yaml:"swift"`
|
||||
Include []string `yaml:"inclist"`
|
||||
Exclude []string `yaml:"exclist"`
|
||||
IncExcFile string `yaml:"incexcfile"`
|
||||
ExcludeDeviceFiles bool `yaml:"excdevicefiles"`
|
||||
Encryption struct {
|
||||
Enable bool `yaml:"enable"`
|
||||
Passphrase string `yaml:"passphrase"`
|
||||
GPGEncryptionKey string `yaml:"gpg_encryption_key"`
|
||||
GPGSignKey string `yaml:"gpg_sign_key"`
|
||||
HideKeyID bool `yaml:"hide_key_id"`
|
||||
SecretKeyRing string `yaml:"secret_keyring"`
|
||||
} `yaml:"encryption"`
|
||||
StaticBackupOptions []string `yaml:"static_options"`
|
||||
Cleanup struct {
|
||||
Type string `yaml:"type"`
|
||||
Value string `yaml:"value"`
|
||||
} `yaml:"cleanup"`
|
||||
LogDirectory string `yaml:"logdir" valid:"required"`
|
||||
Notifications struct {
|
||||
Slack struct {
|
||||
HookURL string `yaml:"hook_url"`
|
||||
Channel string `yaml:"channel"`
|
||||
Username string `yaml:"username"`
|
||||
Emoji string `yaml:"emoji"`
|
||||
} `yaml:"slack"`
|
||||
MonDash struct {
|
||||
Board string `yaml:"board"`
|
||||
Token string `yaml:"token"`
|
||||
Metric string `yaml:"metric"`
|
||||
Instance string `yaml:"instance"`
|
||||
} `yaml:"mondash"`
|
||||
} `yaml:"notifications"`
|
||||
}
|
||||
|
||||
func (c *configFile) validate() error {
|
||||
result, err := valid.ValidateStruct(c)
|
||||
if !result || err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.Encryption.Enable && c.Encryption.GPGSignKey != "" && c.Encryption.Passphrase == "" {
|
||||
return errors.New("With gpg_sign_key passphrase is required")
|
||||
}
|
||||
|
||||
if c.Encryption.Enable && c.Encryption.GPGEncryptionKey == "" && c.Encryption.Passphrase == "" {
|
||||
return errors.New("Encryption is enabled but no encryption key or passphrase is specified")
|
||||
}
|
||||
|
||||
if c.Destination[0:2] == "s3" && (c.AWS.AccessKeyID == "" || c.AWS.SecretAccessKey == "") {
|
||||
return errors.New("Destination is S3 but AWS credentials are not configured")
|
||||
}
|
||||
|
||||
if c.Destination[0:2] == "gs" && (c.GoogleCloud.AccessKeyID == "" || c.GoogleCloud.SecretAccessKey == "") {
|
||||
return errors.New("Destination is S3 but AWS credentials are not configured")
|
||||
}
|
||||
|
||||
if _, err := os.Stat(c.IncExcFile); c.IncExcFile != "" && err == os.ErrNotExist {
|
||||
return errors.New("Specified incexcfile does not exist")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadConfigFile(in io.Reader) (*configFile, error) {
|
||||
fileContent, err := ioutil.ReadAll(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := &configFile{}
|
||||
if err := yaml.Unmarshal(fileContent, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res, res.validate()
|
||||
}
|
||||
|
||||
func (c *configFile) GenerateCommand(argv []string, time string) (commandLine []string, env []string, err error) {
|
||||
var (
|
||||
tmpArg, tmpEnv []string
|
||||
option, root, dest string
|
||||
addTime bool
|
||||
command = argv[0]
|
||||
)
|
||||
|
||||
switch command {
|
||||
case "backup":
|
||||
option = ""
|
||||
root = c.RootPath
|
||||
dest = c.Destination
|
||||
commandLine, env, err = c.generateFullCommand(option, time, root, dest, addTime, "")
|
||||
case "cleanup":
|
||||
option = "cleanup"
|
||||
commandLine, env, err = c.generateLiteCommand(option, time, addTime)
|
||||
case "list-current-files":
|
||||
option = "list-current-files"
|
||||
commandLine, env, err = c.generateLiteCommand(option, time, addTime)
|
||||
case "restore":
|
||||
addTime = true
|
||||
option = "restore"
|
||||
root = c.Destination
|
||||
restoreFile := ""
|
||||
|
||||
if len(argv) == 3 {
|
||||
restoreFile = argv[1]
|
||||
dest = argv[2]
|
||||
} else if len(argv) == 2 {
|
||||
dest = argv[1]
|
||||
} else {
|
||||
err = errors.New("You need to specify one ore more parameters. See help message.")
|
||||
return
|
||||
}
|
||||
|
||||
commandLine, env, err = c.generateFullCommand(option, time, root, dest, addTime, restoreFile)
|
||||
case "status":
|
||||
option = "collection-status"
|
||||
commandLine, env, err = c.generateLiteCommand(option, time, addTime)
|
||||
case "verify":
|
||||
option = "verify"
|
||||
root = c.Destination
|
||||
dest = c.RootPath
|
||||
commandLine, env, err = c.generateFullCommand(option, time, root, dest, addTime, "")
|
||||
case "__remove_old":
|
||||
commandLine, env, err = c.generateRemoveCommand()
|
||||
default:
|
||||
err = fmt.Errorf("Did not understand command '%s', please see 'help' for details what to do.", command)
|
||||
return
|
||||
}
|
||||
|
||||
// Add destination credentials
|
||||
tmpEnv = c.generateCredentialExport()
|
||||
env = append(env, tmpEnv...)
|
||||
|
||||
// Clean empty entries from the list
|
||||
tmpArg = []string{}
|
||||
tmpEnv = []string{}
|
||||
|
||||
for _, i := range commandLine {
|
||||
if i != "" {
|
||||
tmpArg = append(tmpArg, i)
|
||||
}
|
||||
}
|
||||
|
||||
for _, i := range env {
|
||||
if i != "" {
|
||||
tmpEnv = append(tmpEnv, i)
|
||||
}
|
||||
}
|
||||
|
||||
commandLine = tmpArg
|
||||
env = tmpEnv
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *configFile) generateCredentialExport() (env []string) {
|
||||
if c.AWS.AccessKeyID != "" {
|
||||
env = append(env, "AWS_ACCESS_KEY_ID="+c.AWS.AccessKeyID)
|
||||
env = append(env, "AWS_SECRET_ACCESS_KEY="+c.AWS.SecretAccessKey)
|
||||
}
|
||||
if c.GoogleCloud.AccessKeyID != "" {
|
||||
env = append(env, "GS_ACCESS_KEY_ID="+c.GoogleCloud.AccessKeyID)
|
||||
env = append(env, "GS_SECRET_ACCESS_KEY="+c.GoogleCloud.SecretAccessKey)
|
||||
}
|
||||
if c.Swift.Username != "" {
|
||||
env = append(env, "SWIFT_USERNAME="+c.Swift.Username)
|
||||
env = append(env, "SWIFT_PASSWORD="+c.Swift.Password)
|
||||
env = append(env, "SWIFT_AUTHURL="+c.Swift.AuthURL)
|
||||
env = append(env, "SWIFT_AUTHVERSION="+strconv.FormatInt(int64(c.Swift.AuthVersion), 10))
|
||||
}
|
||||
if c.FTPPassword != "" {
|
||||
env = append(env, "FTP_PASSWORD="+c.FTPPassword)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *configFile) generateRemoveCommand() (commandLine []string, env []string, err error) {
|
||||
var tmpArg, tmpEnv []string
|
||||
// Assemble command
|
||||
commandLine = append(commandLine, c.Cleanup.Type, c.Cleanup.Value)
|
||||
// Static Options
|
||||
commandLine = append(commandLine, c.StaticBackupOptions...)
|
||||
// Encryption options
|
||||
tmpArg, tmpEnv = c.generateEncryption(c.Cleanup.Type)
|
||||
commandLine = append(commandLine, tmpArg...)
|
||||
env = append(env, tmpEnv...)
|
||||
// Enforce cleanup
|
||||
commandLine = append(commandLine, "--force")
|
||||
// Remote repo
|
||||
commandLine = append(commandLine, c.Destination)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *configFile) generateLiteCommand(option, time string, addTime bool) (commandLine []string, env []string, err error) {
|
||||
var tmpArg, tmpEnv []string
|
||||
// Assemble command
|
||||
commandLine = append(commandLine, option)
|
||||
// Static Options
|
||||
commandLine = append(commandLine, c.StaticBackupOptions...)
|
||||
if addTime && time != "" {
|
||||
commandLine = append(commandLine, "--time", time)
|
||||
}
|
||||
// Encryption options
|
||||
tmpArg, tmpEnv = c.generateEncryption(option)
|
||||
commandLine = append(commandLine, tmpArg...)
|
||||
env = append(env, tmpEnv...)
|
||||
// Remote repo
|
||||
commandLine = append(commandLine, c.Destination)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *configFile) generateFullCommand(option, time, root, dest string, addTime bool, restoreFile string) (commandLine []string, env []string, err error) {
|
||||
var tmpArg, tmpEnv []string
|
||||
// Assemble command
|
||||
commandLine = append(commandLine, option)
|
||||
// Static Options
|
||||
commandLine = append(commandLine, c.StaticBackupOptions...)
|
||||
if addTime && time != "" {
|
||||
commandLine = append(commandLine, "--time", time)
|
||||
}
|
||||
if restoreFile != "" {
|
||||
commandLine = append(commandLine, "--file-to-restore", restoreFile)
|
||||
}
|
||||
// AWS Storage Class (empty if not used, will get stripped)
|
||||
commandLine = append(commandLine, c.AWS.StorageClass)
|
||||
// Encryption options
|
||||
tmpArg, tmpEnv = c.generateEncryption(option)
|
||||
commandLine = append(commandLine, tmpArg...)
|
||||
env = append(env, tmpEnv...)
|
||||
// Includes / Excludes
|
||||
tmpArg, tmpEnv = c.generateIncludeExclude()
|
||||
commandLine = append(commandLine, tmpArg...)
|
||||
env = append(env, tmpEnv...)
|
||||
// Source / Destination
|
||||
commandLine = append(commandLine, root, dest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *configFile) generateIncludeExclude() (arguments []string, env []string) {
|
||||
if c.ExcludeDeviceFiles {
|
||||
arguments = append(arguments, "--exclude-device-files")
|
||||
}
|
||||
|
||||
for _, exc := range c.Exclude {
|
||||
arguments = append(arguments, "--exclude="+exc)
|
||||
}
|
||||
|
||||
for _, inc := range c.Include {
|
||||
arguments = append(arguments, "--include="+inc)
|
||||
}
|
||||
|
||||
if c.IncExcFile != "" {
|
||||
arguments = append(arguments, "--include-globbing-filelist", c.IncExcFile)
|
||||
}
|
||||
|
||||
if len(c.Include) > 0 || c.IncExcFile != "" {
|
||||
arguments = append(arguments, "--exclude=**")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *configFile) generateEncryption(command string) (arguments []string, env []string) {
|
||||
if !c.Encryption.Enable {
|
||||
arguments = append(arguments, "--no-encryption")
|
||||
return
|
||||
}
|
||||
|
||||
if c.Encryption.Passphrase != "" {
|
||||
env = append(env, "PASSPHRASE="+c.Encryption.Passphrase)
|
||||
}
|
||||
|
||||
if c.Encryption.GPGEncryptionKey != "" {
|
||||
if c.Encryption.HideKeyID {
|
||||
arguments = append(arguments, "--hidden-encrypt-key="+c.Encryption.GPGEncryptionKey)
|
||||
} else {
|
||||
arguments = append(arguments, "--encrypt-key="+c.Encryption.GPGEncryptionKey)
|
||||
}
|
||||
}
|
||||
|
||||
if c.Encryption.GPGSignKey != "" && command != "restore" {
|
||||
arguments = append(arguments, "--sign-key="+c.Encryption.GPGSignKey)
|
||||
}
|
||||
|
||||
if c.Encryption.GPGEncryptionKey != "" && c.Encryption.SecretKeyRing != "" {
|
||||
arguments = append(arguments, "--encrypt-secret-keyring="+c.Encryption.SecretKeyRing)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
245
configfile_test.go
Normal file
245
configfile_test.go
Normal file
|
@ -0,0 +1,245 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("Configfile", func() {
|
||||
var config = `---
|
||||
root: /
|
||||
hostname: testing
|
||||
dest: s3+http://my-backup/myhost/
|
||||
aws:
|
||||
access_key_id: AKIAJKCC13246798732A
|
||||
secret_access_key: Oosdkfjadgiuagbiajbgaliurtbjsbfgaldfbgdf
|
||||
inclist:
|
||||
- /data
|
||||
encryption:
|
||||
enable: true
|
||||
passphrase: 5pJZqnzrmFSi1wqZtcUh
|
||||
static_options: ["--full-if-older-than", "7D", "--s3-use-new-style"]
|
||||
cleanup:
|
||||
type: remove-all-but-n-full
|
||||
value: 2
|
||||
logdir: /var/log/duplicity/
|
||||
`
|
||||
|
||||
var (
|
||||
commandLine, env, argv []string
|
||||
loadErr, err error
|
||||
t string
|
||||
cf *configFile
|
||||
)
|
||||
|
||||
JustBeforeEach(func() {
|
||||
cfg := bytes.NewBuffer([]byte(config))
|
||||
cf, loadErr = loadConfigFile(cfg)
|
||||
if loadErr != nil {
|
||||
panic(loadErr)
|
||||
}
|
||||
commandLine, env, err = cf.GenerateCommand(argv, t)
|
||||
})
|
||||
|
||||
Context("Backup with given config", func() {
|
||||
BeforeEach(func() {
|
||||
argv = []string{"backup"}
|
||||
})
|
||||
|
||||
It("should not have errored", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should have exported secrets in ENV variables", func() {
|
||||
Expect(env).To(Equal([]string{
|
||||
"PASSPHRASE=5pJZqnzrmFSi1wqZtcUh",
|
||||
"AWS_ACCESS_KEY_ID=AKIAJKCC13246798732A",
|
||||
"AWS_SECRET_ACCESS_KEY=Oosdkfjadgiuagbiajbgaliurtbjsbfgaldfbgdf",
|
||||
}))
|
||||
})
|
||||
|
||||
It("should have generated the expected commandLine", func() {
|
||||
Expect(commandLine).To(Equal([]string{
|
||||
"--full-if-older-than", "7D",
|
||||
"--s3-use-new-style",
|
||||
"--include=/data",
|
||||
"--exclude=**",
|
||||
"/", "s3+http://my-backup/myhost/",
|
||||
}))
|
||||
})
|
||||
})
|
||||
|
||||
Context("auto-removal with given config", func() {
|
||||
BeforeEach(func() {
|
||||
argv = []string{"__remove_old"}
|
||||
})
|
||||
|
||||
It("should not have errored", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should have exported secrets in ENV variables", func() {
|
||||
Expect(env).To(Equal([]string{
|
||||
"PASSPHRASE=5pJZqnzrmFSi1wqZtcUh",
|
||||
"AWS_ACCESS_KEY_ID=AKIAJKCC13246798732A",
|
||||
"AWS_SECRET_ACCESS_KEY=Oosdkfjadgiuagbiajbgaliurtbjsbfgaldfbgdf",
|
||||
}))
|
||||
})
|
||||
|
||||
It("should have generated the expected commandLine", func() {
|
||||
Expect(commandLine).To(Equal([]string{
|
||||
"remove-all-but-n-full", "2",
|
||||
"--full-if-older-than", "7D",
|
||||
"--s3-use-new-style", "--force",
|
||||
"s3+http://my-backup/myhost/",
|
||||
}))
|
||||
})
|
||||
})
|
||||
|
||||
Context("verify with given config", func() {
|
||||
BeforeEach(func() {
|
||||
argv = []string{"verify"}
|
||||
})
|
||||
|
||||
It("should not have errored", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should have exported secrets in ENV variables", func() {
|
||||
Expect(env).To(Equal([]string{
|
||||
"PASSPHRASE=5pJZqnzrmFSi1wqZtcUh",
|
||||
"AWS_ACCESS_KEY_ID=AKIAJKCC13246798732A",
|
||||
"AWS_SECRET_ACCESS_KEY=Oosdkfjadgiuagbiajbgaliurtbjsbfgaldfbgdf",
|
||||
}))
|
||||
})
|
||||
|
||||
It("should have generated the expected commandLine", func() {
|
||||
Expect(commandLine).To(Equal([]string{
|
||||
"verify",
|
||||
"--full-if-older-than", "7D",
|
||||
"--s3-use-new-style",
|
||||
"--include=/data",
|
||||
"--exclude=**",
|
||||
"s3+http://my-backup/myhost/", "/",
|
||||
}))
|
||||
})
|
||||
})
|
||||
|
||||
Context("list-current-files with given config", func() {
|
||||
BeforeEach(func() {
|
||||
argv = []string{"list-current-files"}
|
||||
})
|
||||
|
||||
It("should not have errored", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should have exported secrets in ENV variables", func() {
|
||||
Expect(env).To(Equal([]string{
|
||||
"PASSPHRASE=5pJZqnzrmFSi1wqZtcUh",
|
||||
"AWS_ACCESS_KEY_ID=AKIAJKCC13246798732A",
|
||||
"AWS_SECRET_ACCESS_KEY=Oosdkfjadgiuagbiajbgaliurtbjsbfgaldfbgdf",
|
||||
}))
|
||||
})
|
||||
|
||||
It("should have generated the expected commandLine", func() {
|
||||
Expect(commandLine).To(Equal([]string{
|
||||
"list-current-files",
|
||||
"--full-if-older-than", "7D",
|
||||
"--s3-use-new-style",
|
||||
"s3+http://my-backup/myhost/",
|
||||
}))
|
||||
})
|
||||
})
|
||||
|
||||
Context("status with given config", func() {
|
||||
BeforeEach(func() {
|
||||
argv = []string{"status"}
|
||||
})
|
||||
|
||||
It("should not have errored", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should have exported secrets in ENV variables", func() {
|
||||
Expect(env).To(Equal([]string{
|
||||
"PASSPHRASE=5pJZqnzrmFSi1wqZtcUh",
|
||||
"AWS_ACCESS_KEY_ID=AKIAJKCC13246798732A",
|
||||
"AWS_SECRET_ACCESS_KEY=Oosdkfjadgiuagbiajbgaliurtbjsbfgaldfbgdf",
|
||||
}))
|
||||
})
|
||||
|
||||
It("should have generated the expected commandLine", func() {
|
||||
Expect(commandLine).To(Equal([]string{
|
||||
"collection-status",
|
||||
"--full-if-older-than", "7D",
|
||||
"--s3-use-new-style",
|
||||
"s3+http://my-backup/myhost/",
|
||||
}))
|
||||
})
|
||||
})
|
||||
|
||||
Context("restoring a single file with given config", func() {
|
||||
BeforeEach(func() {
|
||||
argv = []string{"restore", "data/myapp/config.yml", "/home/myuser/config.yml"}
|
||||
})
|
||||
|
||||
It("should not have errored", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should have exported secrets in ENV variables", func() {
|
||||
Expect(env).To(Equal([]string{
|
||||
"PASSPHRASE=5pJZqnzrmFSi1wqZtcUh",
|
||||
"AWS_ACCESS_KEY_ID=AKIAJKCC13246798732A",
|
||||
"AWS_SECRET_ACCESS_KEY=Oosdkfjadgiuagbiajbgaliurtbjsbfgaldfbgdf",
|
||||
}))
|
||||
})
|
||||
|
||||
It("should have generated the expected commandLine", func() {
|
||||
Expect(commandLine).To(Equal([]string{
|
||||
"restore",
|
||||
"--full-if-older-than", "7D",
|
||||
"--s3-use-new-style",
|
||||
"--file-to-restore", "data/myapp/config.yml",
|
||||
"--include=/data",
|
||||
"--exclude=**",
|
||||
"s3+http://my-backup/myhost/",
|
||||
"/home/myuser/config.yml",
|
||||
}))
|
||||
})
|
||||
})
|
||||
|
||||
Context("restoring everything with given config", func() {
|
||||
BeforeEach(func() {
|
||||
argv = []string{"restore", "/home/myuser/mybackup/"}
|
||||
})
|
||||
|
||||
It("should not have errored", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should have exported secrets in ENV variables", func() {
|
||||
Expect(env).To(Equal([]string{
|
||||
"PASSPHRASE=5pJZqnzrmFSi1wqZtcUh",
|
||||
"AWS_ACCESS_KEY_ID=AKIAJKCC13246798732A",
|
||||
"AWS_SECRET_ACCESS_KEY=Oosdkfjadgiuagbiajbgaliurtbjsbfgaldfbgdf",
|
||||
}))
|
||||
})
|
||||
|
||||
It("should have generated the expected commandLine", func() {
|
||||
Expect(commandLine).To(Equal([]string{
|
||||
"restore",
|
||||
"--full-if-older-than", "7D",
|
||||
"--s3-use-new-style",
|
||||
"--include=/data",
|
||||
"--exclude=**",
|
||||
"s3+http://my-backup/myhost/",
|
||||
"/home/myuser/mybackup/",
|
||||
}))
|
||||
})
|
||||
})
|
||||
|
||||
})
|
13
duplicity_backup_suite_test.go
Normal file
13
duplicity_backup_suite_test.go
Normal file
|
@ -0,0 +1,13 @@
|
|||
package main_test
|
||||
|
||||
import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDuplicityBackup(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "DuplicityBackup Suite")
|
||||
}
|
22
help.txt
Normal file
22
help.txt
Normal file
|
@ -0,0 +1,22 @@
|
|||
Wrapper around duplicity to create backup with less hazzle
|
||||
|
||||
Usage:
|
||||
duplicity-backup [command]
|
||||
|
||||
Available Commands:
|
||||
backup Create backup according to the backup rules
|
||||
cleanup Delete the extraneous duplicity files
|
||||
list-current-files Lists the files contained in the backup
|
||||
restore [file path] [target] Restores single file / dir to target directory
|
||||
restore [target] Restores everything to target directory
|
||||
status Summarize the status of the backup repository
|
||||
verify Compares backup contents against local files
|
||||
|
||||
Flags:
|
||||
--config-file / -f Configuration for this duplicity wrapper
|
||||
(Default: ~/.config/duplicity-backup.yaml)
|
||||
--lock-file / -l File to hold the lock for this wrapper execution
|
||||
(Default: ~/.config/duplicity-backup.lock)
|
||||
--debug / -d Print duplicity commands to output
|
||||
--drt-run / -n Do a test-run without changes
|
||||
--version Prints the current program version and exits
|
168
main.go
Normal file
168
main.go
Normal file
|
@ -0,0 +1,168 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Luzifer/go_helpers/which"
|
||||
"github.com/Luzifer/rconfig"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"github.com/nightlyone/lockfile"
|
||||
)
|
||||
|
||||
var (
|
||||
cfg = struct {
|
||||
ConfigFile string `flag:"config-file,f" default:"~/.config/duplicity-backup.yaml" description:"Configuration for this duplicity wrapper"`
|
||||
LockFile string `flag:"lock-file,l" default:"~/.config/duplicity-backup.lock" description:"File to hold the lock for this wrapper execution"`
|
||||
|
||||
RestoreTime string `flag:"time,t" description:"The time from which to restore or list files"`
|
||||
|
||||
DryRun bool `flag:"dry-run,n" default:"false" description:"Do a test-run without changes"`
|
||||
Debug bool `flag:"debug,d" default:"false" description:"Print duplicity commands to output"`
|
||||
VersionAndExit bool `flag:"version" default:"false" description:"Print version and exit"`
|
||||
}{}
|
||||
|
||||
duplicityBinary string
|
||||
logFile *os.File
|
||||
|
||||
version = "dev"
|
||||
)
|
||||
|
||||
func initCFG() {
|
||||
var err error
|
||||
if err = rconfig.Parse(&cfg); err != nil {
|
||||
log.Fatalf("Error while parsing arguments: %s", err)
|
||||
}
|
||||
|
||||
if cfg.VersionAndExit {
|
||||
fmt.Printf("duplicity-backup %s\n", version)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if cfg.ConfigFile, err = homedir.Expand(cfg.ConfigFile); err != nil {
|
||||
log.Fatalf("Unable to expand config-file: %s", err)
|
||||
}
|
||||
|
||||
if cfg.LockFile, err = homedir.Expand(cfg.LockFile); err != nil {
|
||||
log.Fatalf("Unable to expand lock: %s", err)
|
||||
}
|
||||
|
||||
if duplicityBinary, err = which.FindInPath("duplicity"); err != nil {
|
||||
log.Fatalf("Did not find duplicity binary in $PATH, please install it")
|
||||
}
|
||||
}
|
||||
|
||||
func logf(pattern string, fields ...interface{}) {
|
||||
t := time.Now().Format("2006-01-02 15:04:05")
|
||||
pattern = fmt.Sprintf("(%s) ", t) + pattern + "\n"
|
||||
fmt.Fprintf(logFile, pattern, fields...)
|
||||
fmt.Printf(pattern, fields...)
|
||||
}
|
||||
|
||||
func main() {
|
||||
initCFG()
|
||||
|
||||
var (
|
||||
err error
|
||||
config *configFile
|
||||
)
|
||||
|
||||
lock, err := lockfile.New(cfg.LockFile)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not initialize lockfile: %s", err)
|
||||
}
|
||||
|
||||
// If no command is passed assume we're requesting "help"
|
||||
argv := rconfig.Args()
|
||||
if len(argv) == 1 || argv[1] == "help" {
|
||||
helptext, _ := Asset("help.txt")
|
||||
fmt.Println(string(helptext))
|
||||
return
|
||||
}
|
||||
|
||||
// Get configuration
|
||||
configSource, err := os.Open(cfg.ConfigFile)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to open configuration file %s: %s", cfg.ConfigFile, err)
|
||||
}
|
||||
defer configSource.Close()
|
||||
config, err = loadConfigFile(configSource)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to read configuration file: %s", err)
|
||||
}
|
||||
|
||||
// Initialize logfile
|
||||
os.MkdirAll(config.LogDirectory, 0755)
|
||||
logFilePath := path.Join(config.LogDirectory, time.Now().Format("duplicity-backup_2006-01-02_15-04-05.txt"))
|
||||
if logFile, err = os.Create(logFilePath); err != nil {
|
||||
log.Fatalf("Unable to open logfile %s: %s", logFilePath, err)
|
||||
}
|
||||
defer logFile.Close()
|
||||
|
||||
logf("\n++++ duplicity-backup %s started with command '%s'", version, argv[1])
|
||||
|
||||
if err := lock.TryLock(); err != nil {
|
||||
logf("Could not aquire lock: %s", err)
|
||||
return
|
||||
}
|
||||
defer lock.Unlock()
|
||||
|
||||
if err := execute(config, argv[1:]); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if config.Cleanup.Type != "none" {
|
||||
logf("++++ Starting removal of old backups")
|
||||
|
||||
if err := execute(config, []string{"__remove_old"}); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
logf("++++ Backup finished successfully")
|
||||
}
|
||||
|
||||
func execute(config *configFile, argv []string) error {
|
||||
var (
|
||||
err error
|
||||
commandLine, env []string
|
||||
)
|
||||
commandLine, env, err = config.GenerateCommand(argv, cfg.RestoreTime)
|
||||
if err != nil {
|
||||
logf("[ERR] %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Ensure duplicity is talking to us
|
||||
commandLine = append([]string{"-v3"}, commandLine...)
|
||||
|
||||
if cfg.DryRun {
|
||||
commandLine = append([]string{"--dry-run"}, commandLine...)
|
||||
}
|
||||
|
||||
if cfg.Debug {
|
||||
logf("[DBG] Command: %s %s", duplicityBinary, strings.Join(commandLine, " "))
|
||||
}
|
||||
|
||||
output := bytes.NewBuffer([]byte{})
|
||||
cmd := exec.Command(duplicityBinary, commandLine...)
|
||||
cmd.Stdout = output
|
||||
cmd.Stderr = output
|
||||
cmd.Env = env
|
||||
err = cmd.Run()
|
||||
|
||||
logf("%s", output.String())
|
||||
if err != nil {
|
||||
logf("[ERR] Execution of duplicity command was unsuccessful! (exit-code was non-zero)")
|
||||
} else {
|
||||
logf("[INF] Execution of duplicity command was successful.")
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
54
vendor/github.com/Luzifer/go_helpers/which/which.go
generated
vendored
Normal file
54
vendor/github.com/Luzifer/go_helpers/which/which.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
package which
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Common named errors to match in programs using this library
|
||||
var (
|
||||
ErrBinaryNotFound = errors.New("Requested binary was not found")
|
||||
ErrNoSearchSpecified = errors.New("You need to specify a binary to search")
|
||||
)
|
||||
|
||||
// FindInPath searches the specified binary in directories listed in $PATH and returns first match
|
||||
func FindInPath(binary string) (string, error) {
|
||||
pathEnv := os.Getenv("PATH")
|
||||
if len(pathEnv) == 0 {
|
||||
return "", errors.New("Found empty $PATH, not able to search $PATH")
|
||||
}
|
||||
|
||||
for _, part := range strings.Split(pathEnv, ":") {
|
||||
if len(part) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if found, err := FindInDirectory(binary, part); err != nil {
|
||||
return "", err
|
||||
} else if found {
|
||||
return path.Join(part, binary), nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", ErrBinaryNotFound
|
||||
}
|
||||
|
||||
// FindInDirectory checks whether the specified file is present in the directory
|
||||
func FindInDirectory(binary, directory string) (bool, error) {
|
||||
if len(binary) == 0 {
|
||||
return false, ErrNoSearchSpecified
|
||||
}
|
||||
|
||||
_, err := os.Stat(path.Join(directory, binary))
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
return true, nil
|
||||
case os.IsNotExist(err):
|
||||
return false, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
}
|
8
vendor/github.com/Luzifer/rconfig/.travis.yml
generated
vendored
Normal file
8
vendor/github.com/Luzifer/rconfig/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
- 1.5
|
||||
- tip
|
||||
|
||||
script: go test -v -race -cover ./...
|
13
vendor/github.com/Luzifer/rconfig/LICENSE
generated
vendored
Normal file
13
vendor/github.com/Luzifer/rconfig/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
Copyright 2015 Knut Ahlers <knut@ahlers.me>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
94
vendor/github.com/Luzifer/rconfig/README.md
generated
vendored
Normal file
94
vendor/github.com/Luzifer/rconfig/README.md
generated
vendored
Normal file
|
@ -0,0 +1,94 @@
|
|||
[![Build Status](https://travis-ci.org/Luzifer/rconfig.svg?branch=master)](https://travis-ci.org/Luzifer/rconfig)
|
||||
[![License: Apache v2.0](https://badge.luzifer.io/v1/badge?color=5d79b5&title=license&text=Apache+v2.0)](http://www.apache.org/licenses/LICENSE-2.0)
|
||||
[![Documentation](https://badge.luzifer.io/v1/badge?title=godoc&text=reference)](https://godoc.org/github.com/Luzifer/rconfig)
|
||||
[![Go Report](http://goreportcard.com/badge/Luzifer/rconfig)](http://goreportcard.com/report/Luzifer/rconfig)
|
||||
|
||||
## Description
|
||||
|
||||
> Package rconfig implements a CLI configuration reader with struct-embedded defaults, environment variables and posix compatible flag parsing using the [pflag](https://github.com/spf13/pflag) library.
|
||||
|
||||
## Installation
|
||||
|
||||
Install by running:
|
||||
|
||||
```
|
||||
go get -u github.com/Luzifer/rconfig
|
||||
```
|
||||
|
||||
OR fetch a specific version:
|
||||
|
||||
```
|
||||
go get -u gopkg.in/luzifer/rconfig.v1
|
||||
```
|
||||
|
||||
Run tests by running:
|
||||
|
||||
```
|
||||
go test -v -race -cover github.com/Luzifer/rconfig
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
As a first step define a struct holding your configuration:
|
||||
|
||||
```go
|
||||
type config struct {
|
||||
Username string `default:"unknown" flag:"user" description:"Your name"`
|
||||
Details struct {
|
||||
Age int `default:"25" flag:"age" env:"age" description:"Your age"`
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Next create an instance of that struct and let `rconfig` fill that config:
|
||||
|
||||
```go
|
||||
var cfg config
|
||||
func init() {
|
||||
cfg = config{}
|
||||
rconfig.Parse(&cfg)
|
||||
}
|
||||
```
|
||||
|
||||
You're ready to access your configuration:
|
||||
|
||||
```go
|
||||
func main() {
|
||||
fmt.Printf("Hello %s, happy birthday for your %dth birthday.",
|
||||
cfg.Username,
|
||||
cfg.Details.Age)
|
||||
}
|
||||
```
|
||||
|
||||
### Provide variable defaults by using a file
|
||||
|
||||
Given you have a file `~/.myapp.yml` containing some secrets or usernames (for the example below username is assumed to be "luzifer") as a default configuration for your application you can use this source code to load the defaults from that file using the `vardefault` tag in your configuration struct.
|
||||
|
||||
The order of the directives (lower number = higher precedence):
|
||||
|
||||
1. Flags provided in command line
|
||||
1. Environment variables
|
||||
1. Variable defaults (`vardefault` tag in the struct)
|
||||
1. `default` tag in the struct
|
||||
|
||||
```go
|
||||
type config struct {
|
||||
Username string `vardefault:"username" flag:"username" description:"Your username"`
|
||||
}
|
||||
|
||||
var cfg = config{}
|
||||
|
||||
func init() {
|
||||
rconfig.SetVariableDefaults(rconfig.VarDefaultsFromYAMLFile("~/.myapp.yml"))
|
||||
rconfig.Parse(&cfg)
|
||||
}
|
||||
|
||||
func main() {
|
||||
fmt.Printf("Username = %s", cfg.Username)
|
||||
// Output: Username = luzifer
|
||||
}
|
||||
```
|
||||
|
||||
## More info
|
||||
|
||||
You can see the full reference documentation of the rconfig package [at godoc.org](https://godoc.org/github.com/Luzifer/rconfig), or through go's standard documentation system by running `godoc -http=:6060` and browsing to [http://localhost:6060/pkg/github.com/Luzifer/rconfig](http://localhost:6060/pkg/github.com/Luzifer/rconfig) after installation.
|
314
vendor/github.com/Luzifer/rconfig/config.go
generated
vendored
Normal file
314
vendor/github.com/Luzifer/rconfig/config.go
generated
vendored
Normal file
|
@ -0,0 +1,314 @@
|
|||
// Package rconfig implements a CLI configuration reader with struct-embedded
|
||||
// defaults, environment variables and posix compatible flag parsing using
|
||||
// the pflag library.
|
||||
package rconfig
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
var (
|
||||
fs *pflag.FlagSet
|
||||
variableDefaults map[string]string
|
||||
)
|
||||
|
||||
func init() {
|
||||
variableDefaults = make(map[string]string)
|
||||
}
|
||||
|
||||
// Parse takes the pointer to a struct filled with variables which should be read
|
||||
// from ENV, default or flag. The precedence in this is flag > ENV > default. So
|
||||
// if a flag is specified on the CLI it will overwrite the ENV and otherwise ENV
|
||||
// overwrites the default specified.
|
||||
//
|
||||
// For your configuration struct you can use the following struct-tags to control
|
||||
// the behavior of rconfig:
|
||||
//
|
||||
// default: Set a default value
|
||||
// vardefault: Read the default value from the variable defaults
|
||||
// env: Read the value from this environment variable
|
||||
// flag: Flag to read in format "long,short" (for example "listen,l")
|
||||
// description: A help text for Usage output to guide your users
|
||||
//
|
||||
// The format you need to specify those values you can see in the example to this
|
||||
// function.
|
||||
//
|
||||
func Parse(config interface{}) error {
|
||||
return parse(config, nil)
|
||||
}
|
||||
|
||||
// Args returns the non-flag command-line arguments.
|
||||
func Args() []string {
|
||||
return fs.Args()
|
||||
}
|
||||
|
||||
// Usage prints a basic usage with the corresponding defaults for the flags to
|
||||
// os.Stdout. The defaults are derived from the `default` struct-tag and the ENV.
|
||||
func Usage() {
|
||||
if fs != nil && fs.Parsed() {
|
||||
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
|
||||
fs.PrintDefaults()
|
||||
}
|
||||
}
|
||||
|
||||
// SetVariableDefaults presets the parser with a map of default values to be used
|
||||
// when specifying the vardefault tag
|
||||
func SetVariableDefaults(defaults map[string]string) {
|
||||
variableDefaults = defaults
|
||||
}
|
||||
|
||||
func parse(in interface{}, args []string) error {
|
||||
if args == nil {
|
||||
args = os.Args
|
||||
}
|
||||
|
||||
fs = pflag.NewFlagSet(os.Args[0], pflag.ExitOnError)
|
||||
if err := execTags(in, fs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return fs.Parse(args)
|
||||
}
|
||||
|
||||
func execTags(in interface{}, fs *pflag.FlagSet) error {
|
||||
if reflect.TypeOf(in).Kind() != reflect.Ptr {
|
||||
return errors.New("Calling parser with non-pointer")
|
||||
}
|
||||
|
||||
if reflect.ValueOf(in).Elem().Kind() != reflect.Struct {
|
||||
return errors.New("Calling parser with pointer to non-struct")
|
||||
}
|
||||
|
||||
st := reflect.ValueOf(in).Elem()
|
||||
for i := 0; i < st.NumField(); i++ {
|
||||
valField := st.Field(i)
|
||||
typeField := st.Type().Field(i)
|
||||
|
||||
if typeField.Tag.Get("default") == "" && typeField.Tag.Get("env") == "" && typeField.Tag.Get("flag") == "" && typeField.Type.Kind() != reflect.Struct {
|
||||
// None of our supported tags is present and it's not a sub-struct
|
||||
continue
|
||||
}
|
||||
|
||||
value := varDefault(typeField.Tag.Get("vardefault"), typeField.Tag.Get("default"))
|
||||
value = envDefault(typeField.Tag.Get("env"), value)
|
||||
parts := strings.Split(typeField.Tag.Get("flag"), ",")
|
||||
|
||||
switch typeField.Type.Kind() {
|
||||
case reflect.String:
|
||||
if typeField.Tag.Get("flag") != "" {
|
||||
if len(parts) == 1 {
|
||||
fs.StringVar(valField.Addr().Interface().(*string), parts[0], value, typeField.Tag.Get("description"))
|
||||
} else {
|
||||
fs.StringVarP(valField.Addr().Interface().(*string), parts[0], parts[1], value, typeField.Tag.Get("description"))
|
||||
}
|
||||
} else {
|
||||
valField.SetString(value)
|
||||
}
|
||||
|
||||
case reflect.Bool:
|
||||
v := value == "true"
|
||||
if typeField.Tag.Get("flag") != "" {
|
||||
if len(parts) == 1 {
|
||||
fs.BoolVar(valField.Addr().Interface().(*bool), parts[0], v, typeField.Tag.Get("description"))
|
||||
} else {
|
||||
fs.BoolVarP(valField.Addr().Interface().(*bool), parts[0], parts[1], v, typeField.Tag.Get("description"))
|
||||
}
|
||||
} else {
|
||||
valField.SetBool(v)
|
||||
}
|
||||
|
||||
case reflect.Int, reflect.Int8, reflect.Int32, reflect.Int64:
|
||||
vt, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
if value == "" {
|
||||
vt = 0
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if typeField.Tag.Get("flag") != "" {
|
||||
registerFlagInt(typeField.Type.Kind(), fs, valField.Addr().Interface(), parts, vt, typeField.Tag.Get("description"))
|
||||
} else {
|
||||
valField.SetInt(vt)
|
||||
}
|
||||
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
vt, err := strconv.ParseUint(value, 10, 64)
|
||||
if err != nil {
|
||||
if value == "" {
|
||||
vt = 0
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if typeField.Tag.Get("flag") != "" {
|
||||
registerFlagUint(typeField.Type.Kind(), fs, valField.Addr().Interface(), parts, vt, typeField.Tag.Get("description"))
|
||||
} else {
|
||||
valField.SetUint(vt)
|
||||
}
|
||||
|
||||
case reflect.Float32, reflect.Float64:
|
||||
vt, err := strconv.ParseFloat(value, 64)
|
||||
if err != nil {
|
||||
if value == "" {
|
||||
vt = 0.0
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if typeField.Tag.Get("flag") != "" {
|
||||
registerFlagFloat(typeField.Type.Kind(), fs, valField.Addr().Interface(), parts, vt, typeField.Tag.Get("description"))
|
||||
} else {
|
||||
valField.SetFloat(vt)
|
||||
}
|
||||
|
||||
case reflect.Struct:
|
||||
if err := execTags(valField.Addr().Interface(), fs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
switch typeField.Type.Elem().Kind() {
|
||||
case reflect.Int:
|
||||
def := []int{}
|
||||
for _, v := range strings.Split(value, ",") {
|
||||
it, err := strconv.ParseInt(strings.TrimSpace(v), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
def = append(def, int(it))
|
||||
}
|
||||
if len(parts) == 1 {
|
||||
fs.IntSliceVar(valField.Addr().Interface().(*[]int), parts[0], def, typeField.Tag.Get("description"))
|
||||
} else {
|
||||
fs.IntSliceVarP(valField.Addr().Interface().(*[]int), parts[0], parts[1], def, typeField.Tag.Get("description"))
|
||||
}
|
||||
case reflect.String:
|
||||
del := typeField.Tag.Get("delimiter")
|
||||
if len(del) == 0 {
|
||||
del = ","
|
||||
}
|
||||
def := strings.Split(value, del)
|
||||
if len(parts) == 1 {
|
||||
fs.StringSliceVar(valField.Addr().Interface().(*[]string), parts[0], def, typeField.Tag.Get("description"))
|
||||
} else {
|
||||
fs.StringSliceVarP(valField.Addr().Interface().(*[]string), parts[0], parts[1], def, typeField.Tag.Get("description"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func registerFlagFloat(t reflect.Kind, fs *pflag.FlagSet, field interface{}, parts []string, vt float64, desc string) {
|
||||
switch t {
|
||||
case reflect.Float32:
|
||||
if len(parts) == 1 {
|
||||
fs.Float32Var(field.(*float32), parts[0], float32(vt), desc)
|
||||
} else {
|
||||
fs.Float32VarP(field.(*float32), parts[0], parts[1], float32(vt), desc)
|
||||
}
|
||||
case reflect.Float64:
|
||||
if len(parts) == 1 {
|
||||
fs.Float64Var(field.(*float64), parts[0], float64(vt), desc)
|
||||
} else {
|
||||
fs.Float64VarP(field.(*float64), parts[0], parts[1], float64(vt), desc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func registerFlagInt(t reflect.Kind, fs *pflag.FlagSet, field interface{}, parts []string, vt int64, desc string) {
|
||||
switch t {
|
||||
case reflect.Int:
|
||||
if len(parts) == 1 {
|
||||
fs.IntVar(field.(*int), parts[0], int(vt), desc)
|
||||
} else {
|
||||
fs.IntVarP(field.(*int), parts[0], parts[1], int(vt), desc)
|
||||
}
|
||||
case reflect.Int8:
|
||||
if len(parts) == 1 {
|
||||
fs.Int8Var(field.(*int8), parts[0], int8(vt), desc)
|
||||
} else {
|
||||
fs.Int8VarP(field.(*int8), parts[0], parts[1], int8(vt), desc)
|
||||
}
|
||||
case reflect.Int32:
|
||||
if len(parts) == 1 {
|
||||
fs.Int32Var(field.(*int32), parts[0], int32(vt), desc)
|
||||
} else {
|
||||
fs.Int32VarP(field.(*int32), parts[0], parts[1], int32(vt), desc)
|
||||
}
|
||||
case reflect.Int64:
|
||||
if len(parts) == 1 {
|
||||
fs.Int64Var(field.(*int64), parts[0], int64(vt), desc)
|
||||
} else {
|
||||
fs.Int64VarP(field.(*int64), parts[0], parts[1], int64(vt), desc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func registerFlagUint(t reflect.Kind, fs *pflag.FlagSet, field interface{}, parts []string, vt uint64, desc string) {
|
||||
switch t {
|
||||
case reflect.Uint:
|
||||
if len(parts) == 1 {
|
||||
fs.UintVar(field.(*uint), parts[0], uint(vt), desc)
|
||||
} else {
|
||||
fs.UintVarP(field.(*uint), parts[0], parts[1], uint(vt), desc)
|
||||
}
|
||||
case reflect.Uint8:
|
||||
if len(parts) == 1 {
|
||||
fs.Uint8Var(field.(*uint8), parts[0], uint8(vt), desc)
|
||||
} else {
|
||||
fs.Uint8VarP(field.(*uint8), parts[0], parts[1], uint8(vt), desc)
|
||||
}
|
||||
case reflect.Uint16:
|
||||
if len(parts) == 1 {
|
||||
fs.Uint16Var(field.(*uint16), parts[0], uint16(vt), desc)
|
||||
} else {
|
||||
fs.Uint16VarP(field.(*uint16), parts[0], parts[1], uint16(vt), desc)
|
||||
}
|
||||
case reflect.Uint32:
|
||||
if len(parts) == 1 {
|
||||
fs.Uint32Var(field.(*uint32), parts[0], uint32(vt), desc)
|
||||
} else {
|
||||
fs.Uint32VarP(field.(*uint32), parts[0], parts[1], uint32(vt), desc)
|
||||
}
|
||||
case reflect.Uint64:
|
||||
if len(parts) == 1 {
|
||||
fs.Uint64Var(field.(*uint64), parts[0], uint64(vt), desc)
|
||||
} else {
|
||||
fs.Uint64VarP(field.(*uint64), parts[0], parts[1], uint64(vt), desc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func envDefault(env, def string) string {
|
||||
value := def
|
||||
|
||||
if env != "" {
|
||||
if e := os.Getenv(env); e != "" {
|
||||
value = e
|
||||
}
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func varDefault(name, def string) string {
|
||||
value := def
|
||||
|
||||
if name != "" {
|
||||
if v, ok := variableDefaults[name]; ok {
|
||||
value = v
|
||||
}
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
27
vendor/github.com/Luzifer/rconfig/vardefault_providers.go
generated
vendored
Normal file
27
vendor/github.com/Luzifer/rconfig/vardefault_providers.go
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
package rconfig
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// VarDefaultsFromYAMLFile reads contents of a file and calls VarDefaultsFromYAML
|
||||
func VarDefaultsFromYAMLFile(filename string) map[string]string {
|
||||
data, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return make(map[string]string)
|
||||
}
|
||||
|
||||
return VarDefaultsFromYAML(data)
|
||||
}
|
||||
|
||||
// VarDefaultsFromYAML creates a vardefaults map from YAML raw data
|
||||
func VarDefaultsFromYAML(in []byte) map[string]string {
|
||||
out := make(map[string]string)
|
||||
err := yaml.Unmarshal(in, &out)
|
||||
if err != nil {
|
||||
return make(map[string]string)
|
||||
}
|
||||
return out
|
||||
}
|
14
vendor/github.com/asaskevich/govalidator/.travis.yml
generated
vendored
Normal file
14
vendor/github.com/asaskevich/govalidator/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.1
|
||||
- 1.2
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- tip
|
||||
|
||||
notifications:
|
||||
email:
|
||||
- bwatas@gmail.com
|
21
vendor/github.com/asaskevich/govalidator/LICENSE
generated
vendored
Normal file
21
vendor/github.com/asaskevich/govalidator/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Alex Saskevich
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
398
vendor/github.com/asaskevich/govalidator/README.md
generated
vendored
Normal file
398
vendor/github.com/asaskevich/govalidator/README.md
generated
vendored
Normal file
|
@ -0,0 +1,398 @@
|
|||
govalidator
|
||||
===========
|
||||
[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) [![Coverage Status](https://img.shields.io/coveralls/asaskevich/govalidator.svg)](https://coveralls.io/r/asaskevich/govalidator?branch=master) [![wercker status](https://app.wercker.com/status/1ec990b09ea86c910d5f08b0e02c6043/s "wercker status")](https://app.wercker.com/project/bykey/1ec990b09ea86c910d5f08b0e02c6043)
|
||||
[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator)
|
||||
|
||||
A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js).
|
||||
|
||||
#### Installation
|
||||
Make sure that Go is installed on your computer.
|
||||
Type the following command in your terminal:
|
||||
|
||||
go get github.com/asaskevich/govalidator
|
||||
|
||||
or you can get specified release of the package with `gopkg.in`:
|
||||
|
||||
go get gopkg.in/asaskevich/govalidator.v4
|
||||
|
||||
After it the package is ready to use.
|
||||
|
||||
|
||||
#### Import package in your project
|
||||
Add following line in your `*.go` file:
|
||||
```go
|
||||
import "github.com/asaskevich/govalidator"
|
||||
```
|
||||
If you are unhappy to use long `govalidator`, you can do something like this:
|
||||
```go
|
||||
import (
|
||||
valid "github.com/asaskevich/govalidator"
|
||||
)
|
||||
```
|
||||
|
||||
#### Activate behavior to require all fields have a validation tag by default
|
||||
`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function.
|
||||
|
||||
```go
|
||||
import "github.com/asaskevich/govalidator"
|
||||
|
||||
func init() {
|
||||
govalidator.SetFieldsRequiredByDefault(true)
|
||||
}
|
||||
```
|
||||
|
||||
Here's some code to explain it:
|
||||
```go
|
||||
// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter):
|
||||
type exampleStruct struct {
|
||||
Name string ``
|
||||
Email string `valid:"email"`
|
||||
|
||||
// this, however, will only fail when Email is empty or an invalid email address:
|
||||
type exampleStruct2 struct {
|
||||
Name string `valid:"-"`
|
||||
Email string `valid:"email"`
|
||||
|
||||
// lastly, this will only fail when Email is an invalid email address but not when it's empty:
|
||||
type exampleStruct2 struct {
|
||||
Name string `valid:"-"`
|
||||
Email string `valid:"email,optional"`
|
||||
```
|
||||
|
||||
#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123))
|
||||
##### Custom validator function signature
|
||||
A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible.
|
||||
```go
|
||||
import "github.com/asaskevich/govalidator"
|
||||
|
||||
// old signature
|
||||
func(i interface{}) bool
|
||||
|
||||
// new signature
|
||||
func(i interface{}, o interface{}) bool
|
||||
```
|
||||
|
||||
##### Adding a custom validator
|
||||
This was changed to prevent data races when accessing custom validators.
|
||||
```go
|
||||
import "github.com/asaskevich/govalidator"
|
||||
|
||||
// before
|
||||
govalidator.CustomTypeTagMap["customByteArrayValidator"] = CustomTypeValidator(func(i interface{}, o interface{}) bool {
|
||||
// ...
|
||||
})
|
||||
|
||||
// after
|
||||
govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, o interface{}) bool {
|
||||
// ...
|
||||
}))
|
||||
```
|
||||
|
||||
#### List of functions:
|
||||
```go
|
||||
func Abs(value float64) float64
|
||||
func BlackList(str, chars string) string
|
||||
func ByteLength(str string, params ...string) bool
|
||||
func StringLength(str string, params ...string) bool
|
||||
func StringMatches(s string, params ...string) bool
|
||||
func CamelCaseToUnderscore(str string) string
|
||||
func Contains(str, substring string) bool
|
||||
func Count(array []interface{}, iterator ConditionIterator) int
|
||||
func Each(array []interface{}, iterator Iterator)
|
||||
func ErrorByField(e error, field string) string
|
||||
func Filter(array []interface{}, iterator ConditionIterator) []interface{}
|
||||
func Find(array []interface{}, iterator ConditionIterator) interface{}
|
||||
func GetLine(s string, index int) (string, error)
|
||||
func GetLines(s string) []string
|
||||
func IsHost(s string) bool
|
||||
func InRange(value, left, right float64) bool
|
||||
func IsASCII(str string) bool
|
||||
func IsAlpha(str string) bool
|
||||
func IsAlphanumeric(str string) bool
|
||||
func IsBase64(str string) bool
|
||||
func IsByteLength(str string, min, max int) bool
|
||||
func IsCreditCard(str string) bool
|
||||
func IsDataURI(str string) bool
|
||||
func IsDialString(str string) bool
|
||||
func IsDNSName(str string) bool
|
||||
func IsDivisibleBy(str, num string) bool
|
||||
func IsEmail(str string) bool
|
||||
func IsFilePath(str string) (bool, int)
|
||||
func IsFloat(str string) bool
|
||||
func IsFullWidth(str string) bool
|
||||
func IsHalfWidth(str string) bool
|
||||
func IsHexadecimal(str string) bool
|
||||
func IsHexcolor(str string) bool
|
||||
func IsIP(str string) bool
|
||||
func IsIPv4(str string) bool
|
||||
func IsIPv6(str string) bool
|
||||
func IsISBN(str string, version int) bool
|
||||
func IsISBN10(str string) bool
|
||||
func IsISBN13(str string) bool
|
||||
func IsISO3166Alpha2(str string) bool
|
||||
func IsISO3166Alpha3(str string) bool
|
||||
func IsInt(str string) bool
|
||||
func IsJSON(str string) bool
|
||||
func IsLatitude(str string) bool
|
||||
func IsLongitude(str string) bool
|
||||
func IsLowerCase(str string) bool
|
||||
func IsMAC(str string) bool
|
||||
func IsMongoID(str string) bool
|
||||
func IsMultibyte(str string) bool
|
||||
func IsNatural(value float64) bool
|
||||
func IsNegative(value float64) bool
|
||||
func IsNonNegative(value float64) bool
|
||||
func IsNonPositive(value float64) bool
|
||||
func IsNull(str string) bool
|
||||
func IsNumeric(str string) bool
|
||||
func IsPort(str string) bool
|
||||
func IsPositive(value float64) bool
|
||||
func IsPrintableASCII(str string) bool
|
||||
func IsRGBcolor(str string) bool
|
||||
func IsRequestURI(rawurl string) bool
|
||||
func IsRequestURL(rawurl string) bool
|
||||
func IsSSN(str string) bool
|
||||
func IsSemver(str string) bool
|
||||
func IsURL(str string) bool
|
||||
func IsUTFDigit(str string) bool
|
||||
func IsUTFLetter(str string) bool
|
||||
func IsUTFLetterNumeric(str string) bool
|
||||
func IsUTFNumeric(str string) bool
|
||||
func IsUUID(str string) bool
|
||||
func IsUUIDv3(str string) bool
|
||||
func IsUUIDv4(str string) bool
|
||||
func IsUUIDv5(str string) bool
|
||||
func IsUpperCase(str string) bool
|
||||
func IsVariableWidth(str string) bool
|
||||
func IsWhole(value float64) bool
|
||||
func LeftTrim(str, chars string) string
|
||||
func Map(array []interface{}, iterator ResultIterator) []interface{}
|
||||
func Matches(str, pattern string) bool
|
||||
func NormalizeEmail(str string) (string, error)
|
||||
func RemoveTags(s string) string
|
||||
func ReplacePattern(str, pattern, replace string) string
|
||||
func Reverse(s string) string
|
||||
func RightTrim(str, chars string) string
|
||||
func SafeFileName(str string) string
|
||||
func Sign(value float64) float64
|
||||
func StripLow(str string, keepNewLines bool) string
|
||||
func ToBoolean(str string) (bool, error)
|
||||
func ToFloat(str string) (float64, error)
|
||||
func ToInt(str string) (int64, error)
|
||||
func ToJSON(obj interface{}) (string, error)
|
||||
func ToString(obj interface{}) string
|
||||
func Trim(str, chars string) string
|
||||
func Truncate(str string, length int, ending string) string
|
||||
func UnderscoreToCamelCase(s string) string
|
||||
func ValidateStruct(s interface{}) (bool, error)
|
||||
func WhiteList(str, chars string) string
|
||||
type ConditionIterator
|
||||
type Error
|
||||
func (e Error) Error() string
|
||||
type Errors
|
||||
func (es Errors) Error() string
|
||||
type ISO3166Entry
|
||||
type Iterator
|
||||
type ParamValidator
|
||||
type ResultIterator
|
||||
type UnsupportedTypeError
|
||||
func (e *UnsupportedTypeError) Error() string
|
||||
type Validator
|
||||
```
|
||||
|
||||
#### Examples
|
||||
###### IsURL
|
||||
```go
|
||||
println(govalidator.IsURL(`http://user@pass:domain.com/path/page`))
|
||||
```
|
||||
###### ToString
|
||||
```go
|
||||
type User struct {
|
||||
FirstName string
|
||||
LastName string
|
||||
}
|
||||
|
||||
str := govalidator.ToString(&User{"John", "Juan"})
|
||||
println(str)
|
||||
```
|
||||
###### Each, Map, Filter, Count for slices
|
||||
Each iterates over the slice/array and calls Iterator for every item
|
||||
```go
|
||||
data := []interface{}{1, 2, 3, 4, 5}
|
||||
var fn govalidator.Iterator = func(value interface{}, index int) {
|
||||
println(value.(int))
|
||||
}
|
||||
govalidator.Each(data, fn)
|
||||
```
|
||||
```go
|
||||
data := []interface{}{1, 2, 3, 4, 5}
|
||||
var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} {
|
||||
return value.(int) * 3
|
||||
}
|
||||
_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15}
|
||||
```
|
||||
```go
|
||||
data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
|
||||
var fn govalidator.ConditionIterator = func(value interface{}, index int) bool {
|
||||
return value.(int)%2 == 0
|
||||
}
|
||||
_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10}
|
||||
_ = govalidator.Count(data, fn) // result = 5
|
||||
```
|
||||
###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2)
|
||||
If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this:
|
||||
```go
|
||||
govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
|
||||
return str == "duck"
|
||||
})
|
||||
```
|
||||
For completely custom validators (interface-based), see below.
|
||||
|
||||
Here is a list of available validators for struct fields (validator - used function):
|
||||
```go
|
||||
"alpha": IsAlpha,
|
||||
"alphanum": IsAlphanumeric,
|
||||
"ascii": IsASCII,
|
||||
"base64": IsBase64,
|
||||
"creditcard": IsCreditCard,
|
||||
"datauri": IsDataURI,
|
||||
"dialstring": IsDialString,
|
||||
"dns": IsDNSName,
|
||||
"email": IsEmail,
|
||||
"float": IsFloat,
|
||||
"fullwidth": IsFullWidth,
|
||||
"halfwidth": IsHalfWidth,
|
||||
"hexadecimal": IsHexadecimal,
|
||||
"hexcolor": IsHexcolor,
|
||||
"host": IsHost,
|
||||
"int": IsInt,
|
||||
"ip": IsIP,
|
||||
"ipv4": IsIPv4,
|
||||
"ipv6": IsIPv6,
|
||||
"isbn10": IsISBN10,
|
||||
"isbn13": IsISBN13,
|
||||
"json": IsJSON,
|
||||
"latitude": IsLatitude,
|
||||
"longitude": IsLongitude,
|
||||
"lowercase": IsLowerCase,
|
||||
"mac": IsMAC,
|
||||
"multibyte": IsMultibyte,
|
||||
"null": IsNull,
|
||||
"numeric": IsNumeric,
|
||||
"port": IsPort,
|
||||
"printableascii": IsPrintableASCII,
|
||||
"requri": IsRequestURI,
|
||||
"requrl": IsRequestURL,
|
||||
"rgbcolor": IsRGBcolor,
|
||||
"ssn": IsSSN,
|
||||
"semver": IsSemver,
|
||||
"uppercase": IsUpperCase,
|
||||
"url": IsURL,
|
||||
"utfdigit": IsUTFDigit,
|
||||
"utfletter": IsUTFLetter,
|
||||
"utfletternum": IsUTFLetterNumeric,
|
||||
"utfnumeric": IsUTFNumeric,
|
||||
"uuid": IsUUID,
|
||||
"uuidv3": IsUUIDv3,
|
||||
"uuidv4": IsUUIDv4,
|
||||
"uuidv5": IsUUIDv5,
|
||||
"variablewidth": IsVariableWidth,
|
||||
```
|
||||
Validators with parameters
|
||||
|
||||
```go
|
||||
"length(min|max)": ByteLength,
|
||||
"matches(pattern)": StringMatches,
|
||||
```
|
||||
|
||||
And here is small example of usage:
|
||||
```go
|
||||
type Post struct {
|
||||
Title string `valid:"alphanum,required"`
|
||||
Message string `valid:"duck,ascii"`
|
||||
AuthorIP string `valid:"ipv4"`
|
||||
Date string `valid:"-"`
|
||||
}
|
||||
post := &Post{
|
||||
Title: "My Example Post",
|
||||
Message: "duck",
|
||||
AuthorIP: "123.234.54.3",
|
||||
}
|
||||
|
||||
// Add your own struct validation tags
|
||||
govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
|
||||
return str == "duck"
|
||||
})
|
||||
|
||||
result, err := govalidator.ValidateStruct(post)
|
||||
if err != nil {
|
||||
println("error: " + err.Error())
|
||||
}
|
||||
println(result)
|
||||
```
|
||||
###### WhiteList
|
||||
```go
|
||||
// Remove all characters from string ignoring characters between "a" and "z"
|
||||
println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa")
|
||||
```
|
||||
|
||||
###### Custom validation functions
|
||||
Custom validation using your own domain specific validators is also available - here's an example of how to use it:
|
||||
```go
|
||||
import "github.com/asaskevich/govalidator"
|
||||
|
||||
type CustomByteArray [6]byte // custom types are supported and can be validated
|
||||
|
||||
type StructWithCustomByteArray struct {
|
||||
ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence
|
||||
Email string `valid:"email"`
|
||||
CustomMinLength int `valid:"-"`
|
||||
}
|
||||
|
||||
govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool {
|
||||
switch v := context.(type) { // you can type switch on the context interface being validated
|
||||
case StructWithCustomByteArray:
|
||||
// you can check and validate against some other field in the context,
|
||||
// return early or not validate against the context at all – your choice
|
||||
case SomeOtherType:
|
||||
// ...
|
||||
default:
|
||||
// expecting some other type? Throw/panic here or continue
|
||||
}
|
||||
|
||||
switch v := i.(type) { // type switch on the struct field being validated
|
||||
case CustomByteArray:
|
||||
for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes
|
||||
if e != 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}))
|
||||
govalidator.CustomTypeTagMap.Set("customMinLengthValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool {
|
||||
switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation
|
||||
case StructWithCustomByteArray:
|
||||
return len(v.ID) >= v.CustomMinLength
|
||||
}
|
||||
return false
|
||||
}))
|
||||
```
|
||||
|
||||
#### Notes
|
||||
Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator).
|
||||
Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator).
|
||||
|
||||
#### Support
|
||||
If you do have a contribution for the package feel free to put up a Pull Request or open Issue.
|
||||
|
||||
#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors)
|
||||
* [Daniel Lohse](https://github.com/annismckenzie)
|
||||
* [Attila Oláh](https://github.com/attilaolah)
|
||||
* [Daniel Korner](https://github.com/Dadie)
|
||||
* [Steven Wilkin](https://github.com/stevenwilkin)
|
||||
* [Deiwin Sarjas](https://github.com/deiwin)
|
||||
* [Noah Shibley](https://github.com/slugmobile)
|
||||
* [Nathan Davies](https://github.com/nathj07)
|
||||
* [Matt Sanford](https://github.com/mzsanford)
|
||||
* [Simon ccl1115](https://github.com/ccl1115)
|
58
vendor/github.com/asaskevich/govalidator/arrays.go
generated
vendored
Normal file
58
vendor/github.com/asaskevich/govalidator/arrays.go
generated
vendored
Normal file
|
@ -0,0 +1,58 @@
|
|||
package govalidator
|
||||
|
||||
// Iterator is the function that accepts element of slice/array and its index
|
||||
type Iterator func(interface{}, int)
|
||||
|
||||
// ResultIterator is the function that accepts element of slice/array and its index and returns any result
|
||||
type ResultIterator func(interface{}, int) interface{}
|
||||
|
||||
// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean
|
||||
type ConditionIterator func(interface{}, int) bool
|
||||
|
||||
// Each iterates over the slice and apply Iterator to every item
|
||||
func Each(array []interface{}, iterator Iterator) {
|
||||
for index, data := range array {
|
||||
iterator(data, index)
|
||||
}
|
||||
}
|
||||
|
||||
// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result.
|
||||
func Map(array []interface{}, iterator ResultIterator) []interface{} {
|
||||
var result = make([]interface{}, len(array))
|
||||
for index, data := range array {
|
||||
result[index] = iterator(data, index)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise.
|
||||
func Find(array []interface{}, iterator ConditionIterator) interface{} {
|
||||
for index, data := range array {
|
||||
if iterator(data, index) {
|
||||
return data
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice.
|
||||
func Filter(array []interface{}, iterator ConditionIterator) []interface{} {
|
||||
var result = make([]interface{}, 0)
|
||||
for index, data := range array {
|
||||
if iterator(data, index) {
|
||||
result = append(result, data)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator.
|
||||
func Count(array []interface{}, iterator ConditionIterator) int {
|
||||
count := 0
|
||||
for index, data := range array {
|
||||
if iterator(data, index) {
|
||||
count = count + 1
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
49
vendor/github.com/asaskevich/govalidator/converter.go
generated
vendored
Normal file
49
vendor/github.com/asaskevich/govalidator/converter.go
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
|||
package govalidator
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// ToString convert the input to a string.
|
||||
func ToString(obj interface{}) string {
|
||||
res := fmt.Sprintf("%v", obj)
|
||||
return string(res)
|
||||
}
|
||||
|
||||
// ToJSON convert the input to a valid JSON string
|
||||
func ToJSON(obj interface{}) (string, error) {
|
||||
res, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
res = []byte("")
|
||||
}
|
||||
return string(res), err
|
||||
}
|
||||
|
||||
// ToFloat convert the input string to a float, or 0.0 if the input is not a float.
|
||||
func ToFloat(str string) (float64, error) {
|
||||
res, err := strconv.ParseFloat(str, 64)
|
||||
if err != nil {
|
||||
res = 0.0
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
|
||||
// ToInt convert the input string to an integer, or 0 if the input is not an integer.
|
||||
func ToInt(str string) (int64, error) {
|
||||
res, err := strconv.ParseInt(str, 0, 64)
|
||||
if err != nil {
|
||||
res = 0
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
|
||||
// ToBoolean convert the input string to a boolean.
|
||||
func ToBoolean(str string) (bool, error) {
|
||||
res, err := strconv.ParseBool(str)
|
||||
if err != nil {
|
||||
res = false
|
||||
}
|
||||
return res, err
|
||||
}
|
31
vendor/github.com/asaskevich/govalidator/error.go
generated
vendored
Normal file
31
vendor/github.com/asaskevich/govalidator/error.go
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
package govalidator
|
||||
|
||||
// Errors is an array of multiple errors and conforms to the error interface.
|
||||
type Errors []error
|
||||
|
||||
// Errors returns itself.
|
||||
func (es Errors) Errors() []error {
|
||||
return es
|
||||
}
|
||||
|
||||
func (es Errors) Error() string {
|
||||
var err string
|
||||
for _, e := range es {
|
||||
err += e.Error() + ";"
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Error encapsulates a name, an error and whether there's a custom error message or not.
|
||||
type Error struct {
|
||||
Name string
|
||||
Err error
|
||||
CustomErrorMessageExists bool
|
||||
}
|
||||
|
||||
func (e Error) Error() string {
|
||||
if e.CustomErrorMessageExists {
|
||||
return e.Err.Error()
|
||||
}
|
||||
return e.Name + ": " + e.Err.Error()
|
||||
}
|
57
vendor/github.com/asaskevich/govalidator/numerics.go
generated
vendored
Normal file
57
vendor/github.com/asaskevich/govalidator/numerics.go
generated
vendored
Normal file
|
@ -0,0 +1,57 @@
|
|||
package govalidator
|
||||
|
||||
import "math"
|
||||
|
||||
// Abs returns absolute value of number
|
||||
func Abs(value float64) float64 {
|
||||
return value * Sign(value)
|
||||
}
|
||||
|
||||
// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise
|
||||
func Sign(value float64) float64 {
|
||||
if value > 0 {
|
||||
return 1
|
||||
} else if value < 0 {
|
||||
return -1
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// IsNegative returns true if value < 0
|
||||
func IsNegative(value float64) bool {
|
||||
return value < 0
|
||||
}
|
||||
|
||||
// IsPositive returns true if value > 0
|
||||
func IsPositive(value float64) bool {
|
||||
return value > 0
|
||||
}
|
||||
|
||||
// IsNonNegative returns true if value >= 0
|
||||
func IsNonNegative(value float64) bool {
|
||||
return value >= 0
|
||||
}
|
||||
|
||||
// IsNonPositive returns true if value <= 0
|
||||
func IsNonPositive(value float64) bool {
|
||||
return value <= 0
|
||||
}
|
||||
|
||||
// InRange returns true if value lies between left and right border
|
||||
func InRange(value, left, right float64) bool {
|
||||
if left > right {
|
||||
left, right = right, left
|
||||
}
|
||||
return value >= left && value <= right
|
||||
}
|
||||
|
||||
// IsWhole returns true if value is whole number
|
||||
func IsWhole(value float64) bool {
|
||||
return Abs(math.Remainder(value, 1)) == 0
|
||||
}
|
||||
|
||||
// IsNatural returns true if value is natural number (positive and whole)
|
||||
func IsNatural(value float64) bool {
|
||||
return IsWhole(value) && IsPositive(value)
|
||||
}
|
83
vendor/github.com/asaskevich/govalidator/patterns.go
generated
vendored
Normal file
83
vendor/github.com/asaskevich/govalidator/patterns.go
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
|||
package govalidator
|
||||
|
||||
import "regexp"
|
||||
|
||||
// Basic regular expressions for validating strings
|
||||
const (
|
||||
Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
|
||||
CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$"
|
||||
ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$"
|
||||
ISBN13 string = "^(?:[0-9]{13})$"
|
||||
UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
|
||||
UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
|
||||
UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
|
||||
UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
|
||||
Alpha string = "^[a-zA-Z]+$"
|
||||
Alphanumeric string = "^[a-zA-Z0-9]+$"
|
||||
Numeric string = "^[-+]?[0-9]+$"
|
||||
Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$"
|
||||
Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$"
|
||||
Hexadecimal string = "^[0-9a-fA-F]+$"
|
||||
Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
|
||||
RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$"
|
||||
ASCII string = "^[\x00-\x7F]+$"
|
||||
Multibyte string = "[^\x00-\x7F]"
|
||||
FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
|
||||
HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
|
||||
Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
|
||||
PrintableASCII string = "^[\x20-\x7E]+$"
|
||||
DataURI string = "^data:.+\\/(.+);base64$"
|
||||
Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
|
||||
Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
|
||||
DNSName string = `^([a-zA-Z0-9]{1}[a-zA-Z0-9_-]{1,62}){1}(\.[a-zA-Z0-9]{1}[a-zA-Z0-9_-]{1,62})*$`
|
||||
URL string = `^((ftp|https?):\/\/)?(\S+(:\S*)?@)?((([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|(([a-zA-Z0-9]+([-\.][a-zA-Z0-9]+)*)|((www\.)?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))(:(\d{1,5}))?((\/|\?|#)[^\s]*)?$`
|
||||
SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
|
||||
WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
|
||||
UnixPath string = `^((?:\/[a-zA-Z0-9\.\:]+(?:_[a-zA-Z0-9\:\.]+)*(?:\-[\:a-zA-Z0-9\.]+)*)+\/?)$`
|
||||
Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$"
|
||||
tagName string = "valid"
|
||||
)
|
||||
|
||||
// Used by IsFilePath func
|
||||
const (
|
||||
// Unknown is unresolved OS type
|
||||
Unknown = iota
|
||||
// Win is Windows type
|
||||
Win
|
||||
// Unix is *nix OS types
|
||||
Unix
|
||||
)
|
||||
|
||||
var (
|
||||
rxEmail = regexp.MustCompile(Email)
|
||||
rxCreditCard = regexp.MustCompile(CreditCard)
|
||||
rxISBN10 = regexp.MustCompile(ISBN10)
|
||||
rxISBN13 = regexp.MustCompile(ISBN13)
|
||||
rxUUID3 = regexp.MustCompile(UUID3)
|
||||
rxUUID4 = regexp.MustCompile(UUID4)
|
||||
rxUUID5 = regexp.MustCompile(UUID5)
|
||||
rxUUID = regexp.MustCompile(UUID)
|
||||
rxAlpha = regexp.MustCompile(Alpha)
|
||||
rxAlphanumeric = regexp.MustCompile(Alphanumeric)
|
||||
rxNumeric = regexp.MustCompile(Numeric)
|
||||
rxInt = regexp.MustCompile(Int)
|
||||
rxFloat = regexp.MustCompile(Float)
|
||||
rxHexadecimal = regexp.MustCompile(Hexadecimal)
|
||||
rxHexcolor = regexp.MustCompile(Hexcolor)
|
||||
rxRGBcolor = regexp.MustCompile(RGBcolor)
|
||||
rxASCII = regexp.MustCompile(ASCII)
|
||||
rxPrintableASCII = regexp.MustCompile(PrintableASCII)
|
||||
rxMultibyte = regexp.MustCompile(Multibyte)
|
||||
rxFullWidth = regexp.MustCompile(FullWidth)
|
||||
rxHalfWidth = regexp.MustCompile(HalfWidth)
|
||||
rxBase64 = regexp.MustCompile(Base64)
|
||||
rxDataURI = regexp.MustCompile(DataURI)
|
||||
rxLatitude = regexp.MustCompile(Latitude)
|
||||
rxLongitude = regexp.MustCompile(Longitude)
|
||||
rxDNSName = regexp.MustCompile(DNSName)
|
||||
rxURL = regexp.MustCompile(URL)
|
||||
rxSSN = regexp.MustCompile(SSN)
|
||||
rxWinPath = regexp.MustCompile(WinPath)
|
||||
rxUnixPath = regexp.MustCompile(UnixPath)
|
||||
rxSemver = regexp.MustCompile(Semver)
|
||||
)
|
378
vendor/github.com/asaskevich/govalidator/types.go
generated
vendored
Normal file
378
vendor/github.com/asaskevich/govalidator/types.go
generated
vendored
Normal file
|
@ -0,0 +1,378 @@
|
|||
package govalidator
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Validator is a wrapper for a validator function that returns bool and accepts string.
|
||||
type Validator func(str string) bool
|
||||
|
||||
// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type.
|
||||
// The second parameter should be the context (in the case of validating a struct: the whole object being validated).
|
||||
type CustomTypeValidator func(i interface{}, o interface{}) bool
|
||||
|
||||
// ParamValidator is a wrapper for validator functions that accepts additional parameters.
|
||||
type ParamValidator func(str string, params ...string) bool
|
||||
type tagOptionsMap map[string]string
|
||||
|
||||
// UnsupportedTypeError is a wrapper for reflect.Type
|
||||
type UnsupportedTypeError struct {
|
||||
Type reflect.Type
|
||||
}
|
||||
|
||||
// stringValues is a slice of reflect.Value holding *reflect.StringValue.
|
||||
// It implements the methods to sort by string.
|
||||
type stringValues []reflect.Value
|
||||
|
||||
// ParamTagMap is a map of functions accept variants parameters
|
||||
var ParamTagMap = map[string]ParamValidator{
|
||||
"length": ByteLength,
|
||||
"stringlength": StringLength,
|
||||
"matches": StringMatches,
|
||||
}
|
||||
|
||||
// ParamTagRegexMap maps param tags to their respective regexes.
|
||||
var ParamTagRegexMap = map[string]*regexp.Regexp{
|
||||
"length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"),
|
||||
"stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"),
|
||||
"matches": regexp.MustCompile(`matches\(([^)]+)\)`),
|
||||
}
|
||||
|
||||
type customTypeTagMap struct {
|
||||
validators map[string]CustomTypeValidator
|
||||
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) {
|
||||
tm.RLock()
|
||||
defer tm.RUnlock()
|
||||
v, ok := tm.validators[name]
|
||||
return v, ok
|
||||
}
|
||||
|
||||
func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) {
|
||||
tm.Lock()
|
||||
defer tm.Unlock()
|
||||
tm.validators[name] = ctv
|
||||
}
|
||||
|
||||
// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function.
|
||||
// Use this to validate compound or custom types that need to be handled as a whole, e.g.
|
||||
// `type UUID [16]byte` (this would be handled as an array of bytes).
|
||||
var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)}
|
||||
|
||||
// TagMap is a map of functions, that can be used as tags for ValidateStruct function.
|
||||
var TagMap = map[string]Validator{
|
||||
"email": IsEmail,
|
||||
"url": IsURL,
|
||||
"dialstring": IsDialString,
|
||||
"requrl": IsRequestURL,
|
||||
"requri": IsRequestURI,
|
||||
"alpha": IsAlpha,
|
||||
"utfletter": IsUTFLetter,
|
||||
"alphanum": IsAlphanumeric,
|
||||
"utfletternum": IsUTFLetterNumeric,
|
||||
"numeric": IsNumeric,
|
||||
"utfnumeric": IsUTFNumeric,
|
||||
"utfdigit": IsUTFDigit,
|
||||
"hexadecimal": IsHexadecimal,
|
||||
"hexcolor": IsHexcolor,
|
||||
"rgbcolor": IsRGBcolor,
|
||||
"lowercase": IsLowerCase,
|
||||
"uppercase": IsUpperCase,
|
||||
"int": IsInt,
|
||||
"float": IsFloat,
|
||||
"null": IsNull,
|
||||
"uuid": IsUUID,
|
||||
"uuidv3": IsUUIDv3,
|
||||
"uuidv4": IsUUIDv4,
|
||||
"uuidv5": IsUUIDv5,
|
||||
"creditcard": IsCreditCard,
|
||||
"isbn10": IsISBN10,
|
||||
"isbn13": IsISBN13,
|
||||
"json": IsJSON,
|
||||
"multibyte": IsMultibyte,
|
||||
"ascii": IsASCII,
|
||||
"printableascii": IsPrintableASCII,
|
||||
"fullwidth": IsFullWidth,
|
||||
"halfwidth": IsHalfWidth,
|
||||
"variablewidth": IsVariableWidth,
|
||||
"base64": IsBase64,
|
||||
"datauri": IsDataURI,
|
||||
"ip": IsIP,
|
||||
"port": IsPort,
|
||||
"ipv4": IsIPv4,
|
||||
"ipv6": IsIPv6,
|
||||
"dns": IsDNSName,
|
||||
"host": IsHost,
|
||||
"mac": IsMAC,
|
||||
"latitude": IsLatitude,
|
||||
"longitude": IsLongitude,
|
||||
"ssn": IsSSN,
|
||||
"semver": IsSemver,
|
||||
}
|
||||
|
||||
// ISO3166Entry stores country codes
|
||||
type ISO3166Entry struct {
|
||||
EnglishShortName string
|
||||
FrenchShortName string
|
||||
Alpha2Code string
|
||||
Alpha3Code string
|
||||
Numeric string
|
||||
}
|
||||
|
||||
//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes"
|
||||
var ISO3166List = []ISO3166Entry{
|
||||
{"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"},
|
||||
{"Albania", "Albanie (l')", "AL", "ALB", "008"},
|
||||
{"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"},
|
||||
{"Algeria", "Algérie (l')", "DZ", "DZA", "012"},
|
||||
{"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"},
|
||||
{"Andorra", "Andorre (l')", "AD", "AND", "020"},
|
||||
{"Angola", "Angola (l')", "AO", "AGO", "024"},
|
||||
{"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"},
|
||||
{"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"},
|
||||
{"Argentina", "Argentine (l')", "AR", "ARG", "032"},
|
||||
{"Australia", "Australie (l')", "AU", "AUS", "036"},
|
||||
{"Austria", "Autriche (l')", "AT", "AUT", "040"},
|
||||
{"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"},
|
||||
{"Bahrain", "Bahreïn", "BH", "BHR", "048"},
|
||||
{"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"},
|
||||
{"Armenia", "Arménie (l')", "AM", "ARM", "051"},
|
||||
{"Barbados", "Barbade (la)", "BB", "BRB", "052"},
|
||||
{"Belgium", "Belgique (la)", "BE", "BEL", "056"},
|
||||
{"Bermuda", "Bermudes (les)", "BM", "BMU", "060"},
|
||||
{"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"},
|
||||
{"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"},
|
||||
{"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"},
|
||||
{"Botswana", "Botswana (le)", "BW", "BWA", "072"},
|
||||
{"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"},
|
||||
{"Brazil", "Brésil (le)", "BR", "BRA", "076"},
|
||||
{"Belize", "Belize (le)", "BZ", "BLZ", "084"},
|
||||
{"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"},
|
||||
{"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"},
|
||||
{"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"},
|
||||
{"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"},
|
||||
{"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"},
|
||||
{"Myanmar", "Myanmar (le)", "MM", "MMR", "104"},
|
||||
{"Burundi", "Burundi (le)", "BI", "BDI", "108"},
|
||||
{"Belarus", "Bélarus (le)", "BY", "BLR", "112"},
|
||||
{"Cambodia", "Cambodge (le)", "KH", "KHM", "116"},
|
||||
{"Cameroon", "Cameroun (le)", "CM", "CMR", "120"},
|
||||
{"Canada", "Canada (le)", "CA", "CAN", "124"},
|
||||
{"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"},
|
||||
{"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"},
|
||||
{"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"},
|
||||
{"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"},
|
||||
{"Chad", "Tchad (le)", "TD", "TCD", "148"},
|
||||
{"Chile", "Chili (le)", "CL", "CHL", "152"},
|
||||
{"China", "Chine (la)", "CN", "CHN", "156"},
|
||||
{"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"},
|
||||
{"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"},
|
||||
{"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"},
|
||||
{"Colombia", "Colombie (la)", "CO", "COL", "170"},
|
||||
{"Comoros (the)", "Comores (les)", "KM", "COM", "174"},
|
||||
{"Mayotte", "Mayotte", "YT", "MYT", "175"},
|
||||
{"Congo (the)", "Congo (le)", "CG", "COG", "178"},
|
||||
{"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"},
|
||||
{"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"},
|
||||
{"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"},
|
||||
{"Croatia", "Croatie (la)", "HR", "HRV", "191"},
|
||||
{"Cuba", "Cuba", "CU", "CUB", "192"},
|
||||
{"Cyprus", "Chypre", "CY", "CYP", "196"},
|
||||
{"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"},
|
||||
{"Benin", "Bénin (le)", "BJ", "BEN", "204"},
|
||||
{"Denmark", "Danemark (le)", "DK", "DNK", "208"},
|
||||
{"Dominica", "Dominique (la)", "DM", "DMA", "212"},
|
||||
{"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"},
|
||||
{"Ecuador", "Équateur (l')", "EC", "ECU", "218"},
|
||||
{"El Salvador", "El Salvador", "SV", "SLV", "222"},
|
||||
{"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"},
|
||||
{"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"},
|
||||
{"Eritrea", "Érythrée (l')", "ER", "ERI", "232"},
|
||||
{"Estonia", "Estonie (l')", "EE", "EST", "233"},
|
||||
{"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"},
|
||||
{"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"},
|
||||
{"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"},
|
||||
{"Fiji", "Fidji (les)", "FJ", "FJI", "242"},
|
||||
{"Finland", "Finlande (la)", "FI", "FIN", "246"},
|
||||
{"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"},
|
||||
{"France", "France (la)", "FR", "FRA", "250"},
|
||||
{"French Guiana", "Guyane française (la )", "GF", "GUF", "254"},
|
||||
{"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"},
|
||||
{"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"},
|
||||
{"Djibouti", "Djibouti", "DJ", "DJI", "262"},
|
||||
{"Gabon", "Gabon (le)", "GA", "GAB", "266"},
|
||||
{"Georgia", "Géorgie (la)", "GE", "GEO", "268"},
|
||||
{"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"},
|
||||
{"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"},
|
||||
{"Germany", "Allemagne (l')", "DE", "DEU", "276"},
|
||||
{"Ghana", "Ghana (le)", "GH", "GHA", "288"},
|
||||
{"Gibraltar", "Gibraltar", "GI", "GIB", "292"},
|
||||
{"Kiribati", "Kiribati", "KI", "KIR", "296"},
|
||||
{"Greece", "Grèce (la)", "GR", "GRC", "300"},
|
||||
{"Greenland", "Groenland (le)", "GL", "GRL", "304"},
|
||||
{"Grenada", "Grenade (la)", "GD", "GRD", "308"},
|
||||
{"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"},
|
||||
{"Guam", "Guam", "GU", "GUM", "316"},
|
||||
{"Guatemala", "Guatemala (le)", "GT", "GTM", "320"},
|
||||
{"Guinea", "Guinée (la)", "GN", "GIN", "324"},
|
||||
{"Guyana", "Guyana (le)", "GY", "GUY", "328"},
|
||||
{"Haiti", "Haïti", "HT", "HTI", "332"},
|
||||
{"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"},
|
||||
{"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"},
|
||||
{"Honduras", "Honduras (le)", "HN", "HND", "340"},
|
||||
{"Hong Kong", "Hong Kong", "HK", "HKG", "344"},
|
||||
{"Hungary", "Hongrie (la)", "HU", "HUN", "348"},
|
||||
{"Iceland", "Islande (l')", "IS", "ISL", "352"},
|
||||
{"India", "Inde (l')", "IN", "IND", "356"},
|
||||
{"Indonesia", "Indonésie (l')", "ID", "IDN", "360"},
|
||||
{"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"},
|
||||
{"Iraq", "Iraq (l')", "IQ", "IRQ", "368"},
|
||||
{"Ireland", "Irlande (l')", "IE", "IRL", "372"},
|
||||
{"Israel", "Israël", "IL", "ISR", "376"},
|
||||
{"Italy", "Italie (l')", "IT", "ITA", "380"},
|
||||
{"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"},
|
||||
{"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"},
|
||||
{"Japan", "Japon (le)", "JP", "JPN", "392"},
|
||||
{"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"},
|
||||
{"Jordan", "Jordanie (la)", "JO", "JOR", "400"},
|
||||
{"Kenya", "Kenya (le)", "KE", "KEN", "404"},
|
||||
{"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"},
|
||||
{"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"},
|
||||
{"Kuwait", "Koweït (le)", "KW", "KWT", "414"},
|
||||
{"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"},
|
||||
{"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"},
|
||||
{"Lebanon", "Liban (le)", "LB", "LBN", "422"},
|
||||
{"Lesotho", "Lesotho (le)", "LS", "LSO", "426"},
|
||||
{"Latvia", "Lettonie (la)", "LV", "LVA", "428"},
|
||||
{"Liberia", "Libéria (le)", "LR", "LBR", "430"},
|
||||
{"Libya", "Libye (la)", "LY", "LBY", "434"},
|
||||
{"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"},
|
||||
{"Lithuania", "Lituanie (la)", "LT", "LTU", "440"},
|
||||
{"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"},
|
||||
{"Macao", "Macao", "MO", "MAC", "446"},
|
||||
{"Madagascar", "Madagascar", "MG", "MDG", "450"},
|
||||
{"Malawi", "Malawi (le)", "MW", "MWI", "454"},
|
||||
{"Malaysia", "Malaisie (la)", "MY", "MYS", "458"},
|
||||
{"Maldives", "Maldives (les)", "MV", "MDV", "462"},
|
||||
{"Mali", "Mali (le)", "ML", "MLI", "466"},
|
||||
{"Malta", "Malte", "MT", "MLT", "470"},
|
||||
{"Martinique", "Martinique (la)", "MQ", "MTQ", "474"},
|
||||
{"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"},
|
||||
{"Mauritius", "Maurice", "MU", "MUS", "480"},
|
||||
{"Mexico", "Mexique (le)", "MX", "MEX", "484"},
|
||||
{"Monaco", "Monaco", "MC", "MCO", "492"},
|
||||
{"Mongolia", "Mongolie (la)", "MN", "MNG", "496"},
|
||||
{"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"},
|
||||
{"Montenegro", "Monténégro (le)", "ME", "MNE", "499"},
|
||||
{"Montserrat", "Montserrat", "MS", "MSR", "500"},
|
||||
{"Morocco", "Maroc (le)", "MA", "MAR", "504"},
|
||||
{"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"},
|
||||
{"Oman", "Oman", "OM", "OMN", "512"},
|
||||
{"Namibia", "Namibie (la)", "NA", "NAM", "516"},
|
||||
{"Nauru", "Nauru", "NR", "NRU", "520"},
|
||||
{"Nepal", "Népal (le)", "NP", "NPL", "524"},
|
||||
{"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"},
|
||||
{"Curaçao", "Curaçao", "CW", "CUW", "531"},
|
||||
{"Aruba", "Aruba", "AW", "ABW", "533"},
|
||||
{"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"},
|
||||
{"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"},
|
||||
{"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"},
|
||||
{"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"},
|
||||
{"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"},
|
||||
{"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"},
|
||||
{"Niger (the)", "Niger (le)", "NE", "NER", "562"},
|
||||
{"Nigeria", "Nigéria (le)", "NG", "NGA", "566"},
|
||||
{"Niue", "Niue", "NU", "NIU", "570"},
|
||||
{"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"},
|
||||
{"Norway", "Norvège (la)", "NO", "NOR", "578"},
|
||||
{"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"},
|
||||
{"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"},
|
||||
{"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"},
|
||||
{"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"},
|
||||
{"Palau", "Palaos (les)", "PW", "PLW", "585"},
|
||||
{"Pakistan", "Pakistan (le)", "PK", "PAK", "586"},
|
||||
{"Panama", "Panama (le)", "PA", "PAN", "591"},
|
||||
{"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"},
|
||||
{"Paraguay", "Paraguay (le)", "PY", "PRY", "600"},
|
||||
{"Peru", "Pérou (le)", "PE", "PER", "604"},
|
||||
{"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"},
|
||||
{"Pitcairn", "Pitcairn", "PN", "PCN", "612"},
|
||||
{"Poland", "Pologne (la)", "PL", "POL", "616"},
|
||||
{"Portugal", "Portugal (le)", "PT", "PRT", "620"},
|
||||
{"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"},
|
||||
{"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"},
|
||||
{"Puerto Rico", "Porto Rico", "PR", "PRI", "630"},
|
||||
{"Qatar", "Qatar (le)", "QA", "QAT", "634"},
|
||||
{"Réunion", "Réunion (La)", "RE", "REU", "638"},
|
||||
{"Romania", "Roumanie (la)", "RO", "ROU", "642"},
|
||||
{"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"},
|
||||
{"Rwanda", "Rwanda (le)", "RW", "RWA", "646"},
|
||||
{"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"},
|
||||
{"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"},
|
||||
{"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"},
|
||||
{"Anguilla", "Anguilla", "AI", "AIA", "660"},
|
||||
{"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"},
|
||||
{"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"},
|
||||
{"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"},
|
||||
{"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"},
|
||||
{"San Marino", "Saint-Marin", "SM", "SMR", "674"},
|
||||
{"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"},
|
||||
{"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"},
|
||||
{"Senegal", "Sénégal (le)", "SN", "SEN", "686"},
|
||||
{"Serbia", "Serbie (la)", "RS", "SRB", "688"},
|
||||
{"Seychelles", "Seychelles (les)", "SC", "SYC", "690"},
|
||||
{"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"},
|
||||
{"Singapore", "Singapour", "SG", "SGP", "702"},
|
||||
{"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"},
|
||||
{"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"},
|
||||
{"Slovenia", "Slovénie (la)", "SI", "SVN", "705"},
|
||||
{"Somalia", "Somalie (la)", "SO", "SOM", "706"},
|
||||
{"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"},
|
||||
{"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"},
|
||||
{"Spain", "Espagne (l')", "ES", "ESP", "724"},
|
||||
{"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"},
|
||||
{"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"},
|
||||
{"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"},
|
||||
{"Suriname", "Suriname (le)", "SR", "SUR", "740"},
|
||||
{"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"},
|
||||
{"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"},
|
||||
{"Sweden", "Suède (la)", "SE", "SWE", "752"},
|
||||
{"Switzerland", "Suisse (la)", "CH", "CHE", "756"},
|
||||
{"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"},
|
||||
{"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"},
|
||||
{"Thailand", "Thaïlande (la)", "TH", "THA", "764"},
|
||||
{"Togo", "Togo (le)", "TG", "TGO", "768"},
|
||||
{"Tokelau", "Tokelau (les)", "TK", "TKL", "772"},
|
||||
{"Tonga", "Tonga (les)", "TO", "TON", "776"},
|
||||
{"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"},
|
||||
{"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"},
|
||||
{"Tunisia", "Tunisie (la)", "TN", "TUN", "788"},
|
||||
{"Turkey", "Turquie (la)", "TR", "TUR", "792"},
|
||||
{"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"},
|
||||
{"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"},
|
||||
{"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"},
|
||||
{"Uganda", "Ouganda (l')", "UG", "UGA", "800"},
|
||||
{"Ukraine", "Ukraine (l')", "UA", "UKR", "804"},
|
||||
{"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"},
|
||||
{"Egypt", "Égypte (l')", "EG", "EGY", "818"},
|
||||
{"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"},
|
||||
{"Guernsey", "Guernesey", "GG", "GGY", "831"},
|
||||
{"Jersey", "Jersey", "JE", "JEY", "832"},
|
||||
{"Isle of Man", "Île de Man", "IM", "IMN", "833"},
|
||||
{"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"},
|
||||
{"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"},
|
||||
{"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"},
|
||||
{"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"},
|
||||
{"Uruguay", "Uruguay (l')", "UY", "URY", "858"},
|
||||
{"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"},
|
||||
{"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"},
|
||||
{"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"},
|
||||
{"Samoa", "Samoa (le)", "WS", "WSM", "882"},
|
||||
{"Yemen", "Yémen (le)", "YE", "YEM", "887"},
|
||||
{"Zambia", "Zambie (la)", "ZM", "ZMB", "894"},
|
||||
}
|
213
vendor/github.com/asaskevich/govalidator/utils.go
generated
vendored
Normal file
213
vendor/github.com/asaskevich/govalidator/utils.go
generated
vendored
Normal file
|
@ -0,0 +1,213 @@
|
|||
package govalidator
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"html"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// Contains check if the string contains the substring.
|
||||
func Contains(str, substring string) bool {
|
||||
return strings.Contains(str, substring)
|
||||
}
|
||||
|
||||
// Matches check if string matches the pattern (pattern is regular expression)
|
||||
// In case of error return false
|
||||
func Matches(str, pattern string) bool {
|
||||
match, _ := regexp.MatchString(pattern, str)
|
||||
return match
|
||||
}
|
||||
|
||||
// LeftTrim trim characters from the left-side of the input.
|
||||
// If second argument is empty, it's will be remove leading spaces.
|
||||
func LeftTrim(str, chars string) string {
|
||||
pattern := ""
|
||||
if chars == "" {
|
||||
pattern = "^\\s+"
|
||||
} else {
|
||||
pattern = "^[" + chars + "]+"
|
||||
}
|
||||
r, _ := regexp.Compile(pattern)
|
||||
return string(r.ReplaceAll([]byte(str), []byte("")))
|
||||
}
|
||||
|
||||
// RightTrim trim characters from the right-side of the input.
|
||||
// If second argument is empty, it's will be remove spaces.
|
||||
func RightTrim(str, chars string) string {
|
||||
pattern := ""
|
||||
if chars == "" {
|
||||
pattern = "\\s+$"
|
||||
} else {
|
||||
pattern = "[" + chars + "]+$"
|
||||
}
|
||||
r, _ := regexp.Compile(pattern)
|
||||
return string(r.ReplaceAll([]byte(str), []byte("")))
|
||||
}
|
||||
|
||||
// Trim trim characters from both sides of the input.
|
||||
// If second argument is empty, it's will be remove spaces.
|
||||
func Trim(str, chars string) string {
|
||||
return LeftTrim(RightTrim(str, chars), chars)
|
||||
}
|
||||
|
||||
// WhiteList remove characters that do not appear in the whitelist.
|
||||
func WhiteList(str, chars string) string {
|
||||
pattern := "[^" + chars + "]+"
|
||||
r, _ := regexp.Compile(pattern)
|
||||
return string(r.ReplaceAll([]byte(str), []byte("")))
|
||||
}
|
||||
|
||||
// BlackList remove characters that appear in the blacklist.
|
||||
func BlackList(str, chars string) string {
|
||||
pattern := "[" + chars + "]+"
|
||||
r, _ := regexp.Compile(pattern)
|
||||
return string(r.ReplaceAll([]byte(str), []byte("")))
|
||||
}
|
||||
|
||||
// StripLow remove characters with a numerical value < 32 and 127, mostly control characters.
|
||||
// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD).
|
||||
func StripLow(str string, keepNewLines bool) string {
|
||||
chars := ""
|
||||
if keepNewLines {
|
||||
chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F"
|
||||
} else {
|
||||
chars = "\x00-\x1F\x7F"
|
||||
}
|
||||
return BlackList(str, chars)
|
||||
}
|
||||
|
||||
// ReplacePattern replace regular expression pattern in string
|
||||
func ReplacePattern(str, pattern, replace string) string {
|
||||
r, _ := regexp.Compile(pattern)
|
||||
return string(r.ReplaceAll([]byte(str), []byte(replace)))
|
||||
}
|
||||
|
||||
// Escape replace <, >, & and " with HTML entities.
|
||||
var Escape = html.EscapeString
|
||||
|
||||
func addSegment(inrune, segment []rune) []rune {
|
||||
if len(segment) == 0 {
|
||||
return inrune
|
||||
}
|
||||
if len(inrune) != 0 {
|
||||
inrune = append(inrune, '_')
|
||||
}
|
||||
inrune = append(inrune, segment...)
|
||||
return inrune
|
||||
}
|
||||
|
||||
// UnderscoreToCamelCase converts from underscore separated form to camel case form.
|
||||
// Ex.: my_func => MyFunc
|
||||
func UnderscoreToCamelCase(s string) string {
|
||||
return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1)
|
||||
}
|
||||
|
||||
// CamelCaseToUnderscore converts from camel case form to underscore separated form.
|
||||
// Ex.: MyFunc => my_func
|
||||
func CamelCaseToUnderscore(str string) string {
|
||||
var output []rune
|
||||
var segment []rune
|
||||
for _, r := range str {
|
||||
if !unicode.IsLower(r) {
|
||||
output = addSegment(output, segment)
|
||||
segment = nil
|
||||
}
|
||||
segment = append(segment, unicode.ToLower(r))
|
||||
}
|
||||
output = addSegment(output, segment)
|
||||
return string(output)
|
||||
}
|
||||
|
||||
// Reverse return reversed string
|
||||
func Reverse(s string) string {
|
||||
r := []rune(s)
|
||||
for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 {
|
||||
r[i], r[j] = r[j], r[i]
|
||||
}
|
||||
return string(r)
|
||||
}
|
||||
|
||||
// GetLines split string by "\n" and return array of lines
|
||||
func GetLines(s string) []string {
|
||||
return strings.Split(s, "\n")
|
||||
}
|
||||
|
||||
// GetLine return specified line of multiline string
|
||||
func GetLine(s string, index int) (string, error) {
|
||||
lines := GetLines(s)
|
||||
if index < 0 || index >= len(lines) {
|
||||
return "", errors.New("line index out of bounds")
|
||||
}
|
||||
return lines[index], nil
|
||||
}
|
||||
|
||||
// RemoveTags remove all tags from HTML string
|
||||
func RemoveTags(s string) string {
|
||||
return ReplacePattern(s, "<[^>]*>", "")
|
||||
}
|
||||
|
||||
// SafeFileName return safe string that can be used in file names
|
||||
func SafeFileName(str string) string {
|
||||
name := strings.ToLower(str)
|
||||
name = path.Clean(path.Base(name))
|
||||
name = strings.Trim(name, " ")
|
||||
separators, err := regexp.Compile(`[ &_=+:]`)
|
||||
if err == nil {
|
||||
name = separators.ReplaceAllString(name, "-")
|
||||
}
|
||||
legal, err := regexp.Compile(`[^[:alnum:]-.]`)
|
||||
if err == nil {
|
||||
name = legal.ReplaceAllString(name, "")
|
||||
}
|
||||
for strings.Contains(name, "--") {
|
||||
name = strings.Replace(name, "--", "-", -1)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// NormalizeEmail canonicalize an email address.
|
||||
// The local part of the email address is lowercased for all domains; the hostname is always lowercased and
|
||||
// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail).
|
||||
// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and
|
||||
// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are
|
||||
// normalized to @gmail.com.
|
||||
func NormalizeEmail(str string) (string, error) {
|
||||
if !IsEmail(str) {
|
||||
return "", fmt.Errorf("%s is not an email", str)
|
||||
}
|
||||
parts := strings.Split(str, "@")
|
||||
parts[0] = strings.ToLower(parts[0])
|
||||
parts[1] = strings.ToLower(parts[1])
|
||||
if parts[1] == "gmail.com" || parts[1] == "googlemail.com" {
|
||||
parts[1] = "gmail.com"
|
||||
parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0]
|
||||
}
|
||||
return strings.Join(parts, "@"), nil
|
||||
}
|
||||
|
||||
// Truncate a string to the closest length without breaking words.
|
||||
func Truncate(str string, length int, ending string) string {
|
||||
var aftstr, befstr string
|
||||
if len(str) > length {
|
||||
words := strings.Fields(str)
|
||||
before, present := 0, 0
|
||||
for i := range words {
|
||||
befstr = aftstr
|
||||
before = present
|
||||
aftstr = aftstr + words[i] + " "
|
||||
present = len(aftstr)
|
||||
if present > length && i != 0 {
|
||||
if (length - before) < (present - length) {
|
||||
return Trim(befstr, " /\\.,\"'#!?&@+-") + ending
|
||||
}
|
||||
return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return str
|
||||
}
|
921
vendor/github.com/asaskevich/govalidator/validator.go
generated
vendored
Normal file
921
vendor/github.com/asaskevich/govalidator/validator.go
generated
vendored
Normal file
|
@ -0,0 +1,921 @@
|
|||
// Package govalidator is package of validators and sanitizers for strings, structs and collections.
|
||||
package govalidator
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var fieldsRequiredByDefault bool
|
||||
|
||||
// SetFieldsRequiredByDefault causes validation to fail when struct fields
|
||||
// do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`).
|
||||
// This struct definition will fail govalidator.ValidateStruct() (and the field values do not matter):
|
||||
// type exampleStruct struct {
|
||||
// Name string ``
|
||||
// Email string `valid:"email"`
|
||||
// This, however, will only fail when Email is empty or an invalid email address:
|
||||
// type exampleStruct2 struct {
|
||||
// Name string `valid:"-"`
|
||||
// Email string `valid:"email"`
|
||||
// Lastly, this will only fail when Email is an invalid email address but not when it's empty:
|
||||
// type exampleStruct2 struct {
|
||||
// Name string `valid:"-"`
|
||||
// Email string `valid:"email,optional"`
|
||||
func SetFieldsRequiredByDefault(value bool) {
|
||||
fieldsRequiredByDefault = value
|
||||
}
|
||||
|
||||
// IsEmail check if the string is an email.
|
||||
func IsEmail(str string) bool {
|
||||
// TODO uppercase letters are not supported
|
||||
return rxEmail.MatchString(str)
|
||||
}
|
||||
|
||||
// IsURL check if the string is an URL.
|
||||
func IsURL(str string) bool {
|
||||
if str == "" || len(str) >= 2083 || len(str) <= 3 || strings.HasPrefix(str, ".") {
|
||||
return false
|
||||
}
|
||||
u, err := url.Parse(str)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if strings.HasPrefix(u.Host, ".") {
|
||||
return false
|
||||
}
|
||||
if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) {
|
||||
return false
|
||||
}
|
||||
return rxURL.MatchString(str)
|
||||
|
||||
}
|
||||
|
||||
// IsRequestURL check if the string rawurl, assuming
|
||||
// it was recieved in an HTTP request, is a valid
|
||||
// URL confirm to RFC 3986
|
||||
func IsRequestURL(rawurl string) bool {
|
||||
url, err := url.ParseRequestURI(rawurl)
|
||||
if err != nil {
|
||||
return false //Couldn't even parse the rawurl
|
||||
}
|
||||
if len(url.Scheme) == 0 {
|
||||
return false //No Scheme found
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// IsRequestURI check if the string rawurl, assuming
|
||||
// it was recieved in an HTTP request, is an
|
||||
// absolute URI or an absolute path.
|
||||
func IsRequestURI(rawurl string) bool {
|
||||
_, err := url.ParseRequestURI(rawurl)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// IsAlpha check if the string contains only letters (a-zA-Z). Empty string is valid.
|
||||
func IsAlpha(str string) bool {
|
||||
if IsNull(str) {
|
||||
return true
|
||||
}
|
||||
return rxAlpha.MatchString(str)
|
||||
}
|
||||
|
||||
//IsUTFLetter check if the string contains only unicode letter characters.
|
||||
//Similar to IsAlpha but for all languages. Empty string is valid.
|
||||
func IsUTFLetter(str string) bool {
|
||||
if IsNull(str) {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, c := range str {
|
||||
if !unicode.IsLetter(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
|
||||
}
|
||||
|
||||
// IsAlphanumeric check if the string contains only letters and numbers. Empty string is valid.
|
||||
func IsAlphanumeric(str string) bool {
|
||||
if IsNull(str) {
|
||||
return true
|
||||
}
|
||||
return rxAlphanumeric.MatchString(str)
|
||||
}
|
||||
|
||||
// IsUTFLetterNumeric check if the string contains only unicode letters and numbers. Empty string is valid.
|
||||
func IsUTFLetterNumeric(str string) bool {
|
||||
if IsNull(str) {
|
||||
return true
|
||||
}
|
||||
for _, c := range str {
|
||||
if !unicode.IsLetter(c) && !unicode.IsNumber(c) { //letters && numbers are ok
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
|
||||
}
|
||||
|
||||
// IsNumeric check if the string contains only numbers. Empty string is valid.
|
||||
func IsNumeric(str string) bool {
|
||||
if IsNull(str) {
|
||||
return true
|
||||
}
|
||||
return rxNumeric.MatchString(str)
|
||||
}
|
||||
|
||||
// IsUTFNumeric check if the string contains only unicode numbers of any kind.
|
||||
// Numbers can be 0-9 but also Fractions ¾,Roman Ⅸ and Hangzhou 〩. Empty string is valid.
|
||||
func IsUTFNumeric(str string) bool {
|
||||
if IsNull(str) {
|
||||
return true
|
||||
}
|
||||
if strings.IndexAny(str, "+-") > 0 {
|
||||
return false
|
||||
}
|
||||
if len(str) > 1 {
|
||||
str = strings.TrimPrefix(str, "-")
|
||||
str = strings.TrimPrefix(str, "+")
|
||||
}
|
||||
for _, c := range str {
|
||||
if unicode.IsNumber(c) == false { //numbers && minus sign are ok
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
|
||||
}
|
||||
|
||||
// IsUTFDigit check if the string contains only unicode radix-10 decimal digits. Empty string is valid.
|
||||
func IsUTFDigit(str string) bool {
|
||||
if IsNull(str) {
|
||||
return true
|
||||
}
|
||||
if strings.IndexAny(str, "+-") > 0 {
|
||||
return false
|
||||
}
|
||||
if len(str) > 1 {
|
||||
str = strings.TrimPrefix(str, "-")
|
||||
str = strings.TrimPrefix(str, "+")
|
||||
}
|
||||
for _, c := range str {
|
||||
if !unicode.IsDigit(c) { //digits && minus sign are ok
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
|
||||
}
|
||||
|
||||
// IsHexadecimal check if the string is a hexadecimal number.
|
||||
func IsHexadecimal(str string) bool {
|
||||
return rxHexadecimal.MatchString(str)
|
||||
}
|
||||
|
||||
// IsHexcolor check if the string is a hexadecimal color.
|
||||
func IsHexcolor(str string) bool {
|
||||
return rxHexcolor.MatchString(str)
|
||||
}
|
||||
|
||||
// IsRGBcolor check if the string is a valid RGB color in form rgb(RRR, GGG, BBB).
|
||||
func IsRGBcolor(str string) bool {
|
||||
return rxRGBcolor.MatchString(str)
|
||||
}
|
||||
|
||||
// IsLowerCase check if the string is lowercase. Empty string is valid.
|
||||
func IsLowerCase(str string) bool {
|
||||
if IsNull(str) {
|
||||
return true
|
||||
}
|
||||
return str == strings.ToLower(str)
|
||||
}
|
||||
|
||||
// IsUpperCase check if the string is uppercase. Empty string is valid.
|
||||
func IsUpperCase(str string) bool {
|
||||
if IsNull(str) {
|
||||
return true
|
||||
}
|
||||
return str == strings.ToUpper(str)
|
||||
}
|
||||
|
||||
// IsInt check if the string is an integer. Empty string is valid.
|
||||
func IsInt(str string) bool {
|
||||
if IsNull(str) {
|
||||
return true
|
||||
}
|
||||
return rxInt.MatchString(str)
|
||||
}
|
||||
|
||||
// IsFloat check if the string is a float.
|
||||
func IsFloat(str string) bool {
|
||||
return str != "" && rxFloat.MatchString(str)
|
||||
}
|
||||
|
||||
// IsDivisibleBy check if the string is a number that's divisible by another.
|
||||
// If second argument is not valid integer or zero, it's return false.
|
||||
// Otherwise, if first argument is not valid integer or zero, it's return true (Invalid string converts to zero).
|
||||
func IsDivisibleBy(str, num string) bool {
|
||||
f, _ := ToFloat(str)
|
||||
p := int64(f)
|
||||
q, _ := ToInt(num)
|
||||
if q == 0 {
|
||||
return false
|
||||
}
|
||||
return (p == 0) || (p%q == 0)
|
||||
}
|
||||
|
||||
// IsNull check if the string is null.
|
||||
func IsNull(str string) bool {
|
||||
return len(str) == 0
|
||||
}
|
||||
|
||||
// IsByteLength check if the string's length (in bytes) falls in a range.
|
||||
func IsByteLength(str string, min, max int) bool {
|
||||
return len(str) >= min && len(str) <= max
|
||||
}
|
||||
|
||||
// IsUUIDv3 check if the string is a UUID version 3.
|
||||
func IsUUIDv3(str string) bool {
|
||||
return rxUUID3.MatchString(str)
|
||||
}
|
||||
|
||||
// IsUUIDv4 check if the string is a UUID version 4.
|
||||
func IsUUIDv4(str string) bool {
|
||||
return rxUUID4.MatchString(str)
|
||||
}
|
||||
|
||||
// IsUUIDv5 check if the string is a UUID version 5.
|
||||
func IsUUIDv5(str string) bool {
|
||||
return rxUUID5.MatchString(str)
|
||||
}
|
||||
|
||||
// IsUUID check if the string is a UUID (version 3, 4 or 5).
|
||||
func IsUUID(str string) bool {
|
||||
return rxUUID.MatchString(str)
|
||||
}
|
||||
|
||||
// IsCreditCard check if the string is a credit card.
|
||||
func IsCreditCard(str string) bool {
|
||||
r, _ := regexp.Compile("[^0-9]+")
|
||||
sanitized := r.ReplaceAll([]byte(str), []byte(""))
|
||||
if !rxCreditCard.MatchString(string(sanitized)) {
|
||||
return false
|
||||
}
|
||||
var sum int64
|
||||
var digit string
|
||||
var tmpNum int64
|
||||
var shouldDouble bool
|
||||
for i := len(sanitized) - 1; i >= 0; i-- {
|
||||
digit = string(sanitized[i:(i + 1)])
|
||||
tmpNum, _ = ToInt(digit)
|
||||
if shouldDouble {
|
||||
tmpNum *= 2
|
||||
if tmpNum >= 10 {
|
||||
sum += ((tmpNum % 10) + 1)
|
||||
} else {
|
||||
sum += tmpNum
|
||||
}
|
||||
} else {
|
||||
sum += tmpNum
|
||||
}
|
||||
shouldDouble = !shouldDouble
|
||||
}
|
||||
|
||||
if sum%10 == 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsISBN10 check if the string is an ISBN version 10.
|
||||
func IsISBN10(str string) bool {
|
||||
return IsISBN(str, 10)
|
||||
}
|
||||
|
||||
// IsISBN13 check if the string is an ISBN version 13.
|
||||
func IsISBN13(str string) bool {
|
||||
return IsISBN(str, 13)
|
||||
}
|
||||
|
||||
// IsISBN check if the string is an ISBN (version 10 or 13).
|
||||
// If version value is not equal to 10 or 13, it will be check both variants.
|
||||
func IsISBN(str string, version int) bool {
|
||||
r, _ := regexp.Compile("[\\s-]+")
|
||||
sanitized := r.ReplaceAll([]byte(str), []byte(""))
|
||||
var checksum int32
|
||||
var i int32
|
||||
if version == 10 {
|
||||
if !rxISBN10.MatchString(string(sanitized)) {
|
||||
return false
|
||||
}
|
||||
for i = 0; i < 9; i++ {
|
||||
checksum += (i + 1) * int32(sanitized[i]-'0')
|
||||
}
|
||||
if sanitized[9] == 'X' {
|
||||
checksum += 10 * 10
|
||||
} else {
|
||||
checksum += 10 * int32(sanitized[9]-'0')
|
||||
}
|
||||
if checksum%11 == 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
} else if version == 13 {
|
||||
if !rxISBN13.MatchString(string(sanitized)) {
|
||||
return false
|
||||
}
|
||||
factor := []int32{1, 3}
|
||||
for i = 0; i < 12; i++ {
|
||||
checksum += factor[i%2] * int32(sanitized[i]-'0')
|
||||
}
|
||||
if (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
return IsISBN(str, 10) || IsISBN(str, 13)
|
||||
}
|
||||
|
||||
// IsJSON check if the string is valid JSON (note: uses json.Unmarshal).
|
||||
func IsJSON(str string) bool {
|
||||
var js json.RawMessage
|
||||
return json.Unmarshal([]byte(str), &js) == nil
|
||||
}
|
||||
|
||||
// IsMultibyte check if the string contains one or more multibyte chars. Empty string is valid.
|
||||
func IsMultibyte(str string) bool {
|
||||
if IsNull(str) {
|
||||
return true
|
||||
}
|
||||
return rxMultibyte.MatchString(str)
|
||||
}
|
||||
|
||||
// IsASCII check if the string contains ASCII chars only. Empty string is valid.
|
||||
func IsASCII(str string) bool {
|
||||
if IsNull(str) {
|
||||
return true
|
||||
}
|
||||
return rxASCII.MatchString(str)
|
||||
}
|
||||
|
||||
// IsPrintableASCII check if the string contains printable ASCII chars only. Empty string is valid.
|
||||
func IsPrintableASCII(str string) bool {
|
||||
if IsNull(str) {
|
||||
return true
|
||||
}
|
||||
return rxPrintableASCII.MatchString(str)
|
||||
}
|
||||
|
||||
// IsFullWidth check if the string contains any full-width chars. Empty string is valid.
|
||||
func IsFullWidth(str string) bool {
|
||||
if IsNull(str) {
|
||||
return true
|
||||
}
|
||||
return rxFullWidth.MatchString(str)
|
||||
}
|
||||
|
||||
// IsHalfWidth check if the string contains any half-width chars. Empty string is valid.
|
||||
func IsHalfWidth(str string) bool {
|
||||
if IsNull(str) {
|
||||
return true
|
||||
}
|
||||
return rxHalfWidth.MatchString(str)
|
||||
}
|
||||
|
||||
// IsVariableWidth check if the string contains a mixture of full and half-width chars. Empty string is valid.
|
||||
func IsVariableWidth(str string) bool {
|
||||
if IsNull(str) {
|
||||
return true
|
||||
}
|
||||
return rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str)
|
||||
}
|
||||
|
||||
// IsBase64 check if a string is base64 encoded.
|
||||
func IsBase64(str string) bool {
|
||||
return rxBase64.MatchString(str)
|
||||
}
|
||||
|
||||
// IsFilePath check is a string is Win or Unix file path and returns it's type.
|
||||
func IsFilePath(str string) (bool, int) {
|
||||
if rxWinPath.MatchString(str) {
|
||||
//check windows path limit see:
|
||||
// http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath
|
||||
if len(str[3:]) > 32767 {
|
||||
return false, Win
|
||||
}
|
||||
return true, Win
|
||||
} else if rxUnixPath.MatchString(str) {
|
||||
return true, Unix
|
||||
}
|
||||
return false, Unknown
|
||||
}
|
||||
|
||||
// IsDataURI checks if a string is base64 encoded data URI such as an image
|
||||
func IsDataURI(str string) bool {
|
||||
dataURI := strings.Split(str, ",")
|
||||
if !rxDataURI.MatchString(dataURI[0]) {
|
||||
return false
|
||||
}
|
||||
return IsBase64(dataURI[1])
|
||||
}
|
||||
|
||||
// IsISO3166Alpha2 checks if a string is valid two-letter country code
|
||||
func IsISO3166Alpha2(str string) bool {
|
||||
for _, entry := range ISO3166List {
|
||||
if str == entry.Alpha2Code {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsISO3166Alpha3 checks if a string is valid three-letter country code
|
||||
func IsISO3166Alpha3(str string) bool {
|
||||
for _, entry := range ISO3166List {
|
||||
if str == entry.Alpha3Code {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsDNSName will validate the given string as a DNS name
|
||||
func IsDNSName(str string) bool {
|
||||
if str == "" || len(strings.Replace(str, ".", "", -1)) > 255 {
|
||||
// constraints already violated
|
||||
return false
|
||||
}
|
||||
return rxDNSName.MatchString(str)
|
||||
}
|
||||
|
||||
// IsDialString validates the given string for usage with the various Dial() functions
|
||||
func IsDialString(str string) bool {
|
||||
|
||||
if h, p, err := net.SplitHostPort(str); err == nil && h != "" && p != "" && (IsDNSName(h) || IsIP(h)) && IsPort(p) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// IsIP checks if a string is either IP version 4 or 6.
|
||||
func IsIP(str string) bool {
|
||||
return net.ParseIP(str) != nil
|
||||
}
|
||||
|
||||
// IsPort checks if a string represents a valid port
|
||||
func IsPort(str string) bool {
|
||||
if i, err := strconv.Atoi(str); err == nil && i > 0 && i < 65536 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsIPv4 check if the string is an IP version 4.
|
||||
func IsIPv4(str string) bool {
|
||||
ip := net.ParseIP(str)
|
||||
return ip != nil && strings.Contains(str, ".")
|
||||
}
|
||||
|
||||
// IsIPv6 check if the string is an IP version 6.
|
||||
func IsIPv6(str string) bool {
|
||||
ip := net.ParseIP(str)
|
||||
return ip != nil && strings.Contains(str, ":")
|
||||
}
|
||||
|
||||
// IsMAC check if a string is valid MAC address.
|
||||
// Possible MAC formats:
|
||||
// 01:23:45:67:89:ab
|
||||
// 01:23:45:67:89:ab:cd:ef
|
||||
// 01-23-45-67-89-ab
|
||||
// 01-23-45-67-89-ab-cd-ef
|
||||
// 0123.4567.89ab
|
||||
// 0123.4567.89ab.cdef
|
||||
func IsMAC(str string) bool {
|
||||
_, err := net.ParseMAC(str)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// IsHost checks if the string is a valid IP (both v4 and v6) or a valid DNS name
|
||||
func IsHost(str string) bool {
|
||||
return IsIP(str) || IsDNSName(str)
|
||||
}
|
||||
|
||||
// IsMongoID check if the string is a valid hex-encoded representation of a MongoDB ObjectId.
|
||||
func IsMongoID(str string) bool {
|
||||
return rxHexadecimal.MatchString(str) && (len(str) == 24)
|
||||
}
|
||||
|
||||
// IsLatitude check if a string is valid latitude.
|
||||
func IsLatitude(str string) bool {
|
||||
return rxLatitude.MatchString(str)
|
||||
}
|
||||
|
||||
// IsLongitude check if a string is valid longitude.
|
||||
func IsLongitude(str string) bool {
|
||||
return rxLongitude.MatchString(str)
|
||||
}
|
||||
|
||||
// ValidateStruct use tags for fields.
|
||||
// result will be equal to `false` if there are any errors.
|
||||
func ValidateStruct(s interface{}) (bool, error) {
|
||||
if s == nil {
|
||||
return true, nil
|
||||
}
|
||||
result := true
|
||||
var err error
|
||||
val := reflect.ValueOf(s)
|
||||
if val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr {
|
||||
val = val.Elem()
|
||||
}
|
||||
// we only accept structs
|
||||
if val.Kind() != reflect.Struct {
|
||||
return false, fmt.Errorf("function only accepts structs; got %s", val.Kind())
|
||||
}
|
||||
var errs Errors
|
||||
for i := 0; i < val.NumField(); i++ {
|
||||
valueField := val.Field(i)
|
||||
typeField := val.Type().Field(i)
|
||||
if typeField.PkgPath != "" {
|
||||
continue // Private field
|
||||
}
|
||||
resultField, err2 := typeCheck(valueField, typeField, val)
|
||||
if err2 != nil {
|
||||
errs = append(errs, err2)
|
||||
}
|
||||
result = result && resultField
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
err = errs
|
||||
}
|
||||
return result, err
|
||||
}
|
||||
|
||||
// parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""}
|
||||
func parseTagIntoMap(tag string) tagOptionsMap {
|
||||
optionsMap := make(tagOptionsMap)
|
||||
options := strings.SplitN(tag, ",", -1)
|
||||
for _, option := range options {
|
||||
validationOptions := strings.Split(option, "~")
|
||||
if !isValidTag(validationOptions[0]) {
|
||||
continue
|
||||
}
|
||||
if len(validationOptions) == 2 {
|
||||
optionsMap[validationOptions[0]] = validationOptions[1]
|
||||
} else {
|
||||
optionsMap[validationOptions[0]] = ""
|
||||
}
|
||||
}
|
||||
return optionsMap
|
||||
}
|
||||
|
||||
func isValidTag(s string) bool {
|
||||
if s == "" {
|
||||
return false
|
||||
}
|
||||
for _, c := range s {
|
||||
switch {
|
||||
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
|
||||
// Backslash and quote chars are reserved, but
|
||||
// otherwise any punctuation chars are allowed
|
||||
// in a tag name.
|
||||
default:
|
||||
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// IsSSN will validate the given string as a U.S. Social Security Number
|
||||
func IsSSN(str string) bool {
|
||||
if str == "" || len(str) != 11 {
|
||||
return false
|
||||
}
|
||||
return rxSSN.MatchString(str)
|
||||
}
|
||||
|
||||
// IsSemver check if string is valid semantic version
|
||||
func IsSemver(str string) bool {
|
||||
return rxSemver.MatchString(str)
|
||||
}
|
||||
|
||||
// ByteLength check string's length
|
||||
func ByteLength(str string, params ...string) bool {
|
||||
if len(params) == 2 {
|
||||
min, _ := ToInt(params[0])
|
||||
max, _ := ToInt(params[1])
|
||||
return len(str) >= int(min) && len(str) <= int(max)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// StringMatches checks if a string matches a given pattern.
|
||||
func StringMatches(s string, params ...string) bool {
|
||||
if len(params) == 1 {
|
||||
pattern := params[0]
|
||||
return Matches(s, pattern)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// StringLength check string's length (including multi byte strings)
|
||||
func StringLength(str string, params ...string) bool {
|
||||
|
||||
if len(params) == 2 {
|
||||
strLength := utf8.RuneCountInString(str)
|
||||
min, _ := ToInt(params[0])
|
||||
max, _ := ToInt(params[1])
|
||||
return strLength >= int(min) && strLength <= int(max)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap) (bool, error) {
|
||||
if requiredOption, isRequired := options["required"]; isRequired {
|
||||
if len(requiredOption) > 0 {
|
||||
return false, Error{t.Name, fmt.Errorf(requiredOption), true}
|
||||
}
|
||||
return false, Error{t.Name, fmt.Errorf("non zero value required"), false}
|
||||
} else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional {
|
||||
return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false}
|
||||
}
|
||||
// not required and empty is valid
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value) (bool, error) {
|
||||
if !v.IsValid() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
tag := t.Tag.Get(tagName)
|
||||
|
||||
// Check if the field should be ignored
|
||||
switch tag {
|
||||
case "":
|
||||
if !fieldsRequiredByDefault {
|
||||
return true, nil
|
||||
}
|
||||
return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false}
|
||||
case "-":
|
||||
return true, nil
|
||||
}
|
||||
|
||||
options := parseTagIntoMap(tag)
|
||||
var customTypeErrors Errors
|
||||
var customTypeValidatorsExist bool
|
||||
for validatorName, customErrorMessage := range options {
|
||||
if validatefunc, ok := CustomTypeTagMap.Get(validatorName); ok {
|
||||
customTypeValidatorsExist = true
|
||||
if result := validatefunc(v.Interface(), o.Interface()); !result {
|
||||
if len(customErrorMessage) > 0 {
|
||||
customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf(customErrorMessage), CustomErrorMessageExists: true})
|
||||
continue
|
||||
}
|
||||
customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false})
|
||||
}
|
||||
}
|
||||
}
|
||||
if customTypeValidatorsExist {
|
||||
if len(customTypeErrors.Errors()) > 0 {
|
||||
return false, customTypeErrors
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if isEmptyValue(v) {
|
||||
// an empty value is not validated, check only required
|
||||
return checkRequired(v, t, options)
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Bool,
|
||||
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
|
||||
reflect.Float32, reflect.Float64,
|
||||
reflect.String:
|
||||
// for each tag option check the map of validator functions
|
||||
for validator, customErrorMessage := range options {
|
||||
var negate bool
|
||||
customMsgExists := (len(customErrorMessage) > 0)
|
||||
// Check wether the tag looks like '!something' or 'something'
|
||||
if validator[0] == '!' {
|
||||
validator = string(validator[1:])
|
||||
negate = true
|
||||
}
|
||||
|
||||
// Check for param validators
|
||||
for key, value := range ParamTagRegexMap {
|
||||
ps := value.FindStringSubmatch(validator)
|
||||
if len(ps) > 0 {
|
||||
if validatefunc, ok := ParamTagMap[key]; ok {
|
||||
switch v.Kind() {
|
||||
case reflect.String:
|
||||
field := fmt.Sprint(v) // make value into string, then validate with regex
|
||||
if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) {
|
||||
var err error
|
||||
if !negate {
|
||||
if customMsgExists {
|
||||
err = fmt.Errorf(customErrorMessage)
|
||||
} else {
|
||||
err = fmt.Errorf("%s does not validate as %s", field, validator)
|
||||
}
|
||||
|
||||
} else {
|
||||
if customMsgExists {
|
||||
err = fmt.Errorf(customErrorMessage)
|
||||
} else {
|
||||
err = fmt.Errorf("%s does validate as %s", field, validator)
|
||||
}
|
||||
}
|
||||
return false, Error{t.Name, err, customMsgExists}
|
||||
}
|
||||
default:
|
||||
// type not yet supported, fail
|
||||
return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if validatefunc, ok := TagMap[validator]; ok {
|
||||
switch v.Kind() {
|
||||
case reflect.String:
|
||||
field := fmt.Sprint(v) // make value into string, then validate with regex
|
||||
if result := validatefunc(field); !result && !negate || result && negate {
|
||||
var err error
|
||||
|
||||
if !negate {
|
||||
if customMsgExists {
|
||||
err = fmt.Errorf(customErrorMessage)
|
||||
} else {
|
||||
err = fmt.Errorf("%s does not validate as %s", field, validator)
|
||||
}
|
||||
} else {
|
||||
if customMsgExists {
|
||||
err = fmt.Errorf(customErrorMessage)
|
||||
} else {
|
||||
err = fmt.Errorf("%s does validate as %s", field, validator)
|
||||
}
|
||||
}
|
||||
return false, Error{t.Name, err, customMsgExists}
|
||||
}
|
||||
default:
|
||||
//Not Yet Supported Types (Fail here!)
|
||||
err := fmt.Errorf("Validator %s doesn't support kind %s for value %v", validator, v.Kind(), v)
|
||||
return false, Error{t.Name, err, false}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
case reflect.Map:
|
||||
if v.Type().Key().Kind() != reflect.String {
|
||||
return false, &UnsupportedTypeError{v.Type()}
|
||||
}
|
||||
var sv stringValues
|
||||
sv = v.MapKeys()
|
||||
sort.Sort(sv)
|
||||
result := true
|
||||
for _, k := range sv {
|
||||
resultItem, err := ValidateStruct(v.MapIndex(k).Interface())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
result = result && resultItem
|
||||
}
|
||||
return result, nil
|
||||
case reflect.Slice:
|
||||
result := true
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
var resultItem bool
|
||||
var err error
|
||||
if v.Index(i).Kind() != reflect.Struct {
|
||||
resultItem, err = typeCheck(v.Index(i), t, o)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
} else {
|
||||
resultItem, err = ValidateStruct(v.Index(i).Interface())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
result = result && resultItem
|
||||
}
|
||||
return result, nil
|
||||
case reflect.Array:
|
||||
result := true
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
var resultItem bool
|
||||
var err error
|
||||
if v.Index(i).Kind() != reflect.Struct {
|
||||
resultItem, err = typeCheck(v.Index(i), t, o)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
} else {
|
||||
resultItem, err = ValidateStruct(v.Index(i).Interface())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
result = result && resultItem
|
||||
}
|
||||
return result, nil
|
||||
case reflect.Interface:
|
||||
// If the value is an interface then encode its element
|
||||
if v.IsNil() {
|
||||
return true, nil
|
||||
}
|
||||
return ValidateStruct(v.Interface())
|
||||
case reflect.Ptr:
|
||||
// If the value is a pointer then check its element
|
||||
if v.IsNil() {
|
||||
return true, nil
|
||||
}
|
||||
return typeCheck(v.Elem(), t, o)
|
||||
case reflect.Struct:
|
||||
return ValidateStruct(v.Interface())
|
||||
default:
|
||||
return false, &UnsupportedTypeError{v.Type()}
|
||||
}
|
||||
}
|
||||
|
||||
func isEmptyValue(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.String, reflect.Array:
|
||||
return v.Len() == 0
|
||||
case reflect.Map, reflect.Slice:
|
||||
return v.Len() == 0 || v.IsNil()
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return v.IsNil()
|
||||
}
|
||||
|
||||
return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface())
|
||||
}
|
||||
|
||||
// ErrorByField returns error for specified field of the struct
|
||||
// validated by ValidateStruct or empty string if there are no errors
|
||||
// or this field doesn't exists or doesn't have any errors.
|
||||
func ErrorByField(e error, field string) string {
|
||||
if e == nil {
|
||||
return ""
|
||||
}
|
||||
return ErrorsByField(e)[field]
|
||||
}
|
||||
|
||||
// ErrorsByField returns map of errors of the struct validated
|
||||
// by ValidateStruct or empty map if there are no errors.
|
||||
func ErrorsByField(e error) map[string]string {
|
||||
m := make(map[string]string)
|
||||
if e == nil {
|
||||
return m
|
||||
}
|
||||
// prototype for ValidateStruct
|
||||
|
||||
switch e.(type) {
|
||||
case Error:
|
||||
m[e.(Error).Name] = e.(Error).Err.Error()
|
||||
case Errors:
|
||||
for _, item := range e.(Errors).Errors() {
|
||||
m[item.(Error).Name] = item.(Error).Err.Error()
|
||||
}
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// Error returns string equivalent for reflect.Type
|
||||
func (e *UnsupportedTypeError) Error() string {
|
||||
return "validator: unsupported type: " + e.Type.String()
|
||||
}
|
||||
|
||||
func (sv stringValues) Len() int { return len(sv) }
|
||||
func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
|
||||
func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) }
|
||||
func (sv stringValues) get(i int) string { return sv[i].String() }
|
15
vendor/github.com/asaskevich/govalidator/wercker.yml
generated
vendored
Normal file
15
vendor/github.com/asaskevich/govalidator/wercker.yml
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
box: wercker/golang
|
||||
build:
|
||||
steps:
|
||||
- setup-go-workspace
|
||||
|
||||
- script:
|
||||
name: go get
|
||||
code: |
|
||||
go version
|
||||
go get -t ./...
|
||||
|
||||
- script:
|
||||
name: go test
|
||||
code: |
|
||||
go test -race ./...
|
21
vendor/github.com/mitchellh/go-homedir/LICENSE
generated
vendored
Normal file
21
vendor/github.com/mitchellh/go-homedir/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Mitchell Hashimoto
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
14
vendor/github.com/mitchellh/go-homedir/README.md
generated
vendored
Normal file
14
vendor/github.com/mitchellh/go-homedir/README.md
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
# go-homedir
|
||||
|
||||
This is a Go library for detecting the user's home directory without
|
||||
the use of cgo, so the library can be used in cross-compilation environments.
|
||||
|
||||
Usage is incredibly simple, just call `homedir.Dir()` to get the home directory
|
||||
for a user, and `homedir.Expand()` to expand the `~` in a path to the home
|
||||
directory.
|
||||
|
||||
**Why not just use `os/user`?** The built-in `os/user` package requires
|
||||
cgo on Darwin systems. This means that any Go code that uses that package
|
||||
cannot cross compile. But 99% of the time the use for `os/user` is just to
|
||||
retrieve the home directory, which we can do for the current user without
|
||||
cgo. This library does that, enabling cross-compilation.
|
132
vendor/github.com/mitchellh/go-homedir/homedir.go
generated
vendored
Normal file
132
vendor/github.com/mitchellh/go-homedir/homedir.go
generated
vendored
Normal file
|
@ -0,0 +1,132 @@
|
|||
package homedir
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// DisableCache will disable caching of the home directory. Caching is enabled
|
||||
// by default.
|
||||
var DisableCache bool
|
||||
|
||||
var homedirCache string
|
||||
var cacheLock sync.RWMutex
|
||||
|
||||
// Dir returns the home directory for the executing user.
|
||||
//
|
||||
// This uses an OS-specific method for discovering the home directory.
|
||||
// An error is returned if a home directory cannot be detected.
|
||||
func Dir() (string, error) {
|
||||
if !DisableCache {
|
||||
cacheLock.RLock()
|
||||
cached := homedirCache
|
||||
cacheLock.RUnlock()
|
||||
if cached != "" {
|
||||
return cached, nil
|
||||
}
|
||||
}
|
||||
|
||||
cacheLock.Lock()
|
||||
defer cacheLock.Unlock()
|
||||
|
||||
var result string
|
||||
var err error
|
||||
if runtime.GOOS == "windows" {
|
||||
result, err = dirWindows()
|
||||
} else {
|
||||
// Unix-like system, so just assume Unix
|
||||
result, err = dirUnix()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
homedirCache = result
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Expand expands the path to include the home directory if the path
|
||||
// is prefixed with `~`. If it isn't prefixed with `~`, the path is
|
||||
// returned as-is.
|
||||
func Expand(path string) (string, error) {
|
||||
if len(path) == 0 {
|
||||
return path, nil
|
||||
}
|
||||
|
||||
if path[0] != '~' {
|
||||
return path, nil
|
||||
}
|
||||
|
||||
if len(path) > 1 && path[1] != '/' && path[1] != '\\' {
|
||||
return "", errors.New("cannot expand user-specific home dir")
|
||||
}
|
||||
|
||||
dir, err := Dir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return filepath.Join(dir, path[1:]), nil
|
||||
}
|
||||
|
||||
func dirUnix() (string, error) {
|
||||
// First prefer the HOME environmental variable
|
||||
if home := os.Getenv("HOME"); home != "" {
|
||||
return home, nil
|
||||
}
|
||||
|
||||
// If that fails, try getent
|
||||
var stdout bytes.Buffer
|
||||
cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid()))
|
||||
cmd.Stdout = &stdout
|
||||
if err := cmd.Run(); err != nil {
|
||||
// If "getent" is missing, ignore it
|
||||
if err == exec.ErrNotFound {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
if passwd := strings.TrimSpace(stdout.String()); passwd != "" {
|
||||
// username:password:uid:gid:gecos:home:shell
|
||||
passwdParts := strings.SplitN(passwd, ":", 7)
|
||||
if len(passwdParts) > 5 {
|
||||
return passwdParts[5], nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If all else fails, try the shell
|
||||
stdout.Reset()
|
||||
cmd = exec.Command("sh", "-c", "cd && pwd")
|
||||
cmd.Stdout = &stdout
|
||||
if err := cmd.Run(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
result := strings.TrimSpace(stdout.String())
|
||||
if result == "" {
|
||||
return "", errors.New("blank output when reading home directory")
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func dirWindows() (string, error) {
|
||||
drive := os.Getenv("HOMEDRIVE")
|
||||
path := os.Getenv("HOMEPATH")
|
||||
home := drive + path
|
||||
if drive == "" || path == "" {
|
||||
home = os.Getenv("USERPROFILE")
|
||||
}
|
||||
if home == "" {
|
||||
return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank")
|
||||
}
|
||||
|
||||
return home, nil
|
||||
}
|
27
vendor/github.com/nightlyone/lockfile/.gitignore
generated
vendored
Normal file
27
vendor/github.com/nightlyone/lockfile/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# popular temporaries
|
||||
.err
|
||||
.out
|
||||
.diff
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
3
vendor/github.com/nightlyone/lockfile/.gitmodules
generated
vendored
Normal file
3
vendor/github.com/nightlyone/lockfile/.gitmodules
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
[submodule "git-hooks"]
|
||||
path = git-hooks
|
||||
url = https://github.com/nightlyone/git-hooks
|
2
vendor/github.com/nightlyone/lockfile/.travis.yml
generated
vendored
Normal file
2
vendor/github.com/nightlyone/lockfile/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
language: go
|
||||
|
19
vendor/github.com/nightlyone/lockfile/LICENSE
generated
vendored
Normal file
19
vendor/github.com/nightlyone/lockfile/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
Copyright (c) 2012 Ingo Oeser
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
52
vendor/github.com/nightlyone/lockfile/README.md
generated
vendored
Normal file
52
vendor/github.com/nightlyone/lockfile/README.md
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
lockfile
|
||||
=========
|
||||
Handle locking via pid files.
|
||||
|
||||
[![Build Status Unix][1]][2]
|
||||
[![Build status Windows][3]][4]
|
||||
|
||||
[1]: https://secure.travis-ci.org/nightlyone/lockfile.png
|
||||
[2]: https://travis-ci.org/nightlyone/lockfile
|
||||
[3]: https://ci.appveyor.com/api/projects/status/7mojkmauj81uvp8u/branch/master?svg=true
|
||||
[4]: https://ci.appveyor.com/project/nightlyone/lockfile/branch/master
|
||||
|
||||
|
||||
|
||||
install
|
||||
-------
|
||||
Install [Go 1][5], either [from source][6] or [with a prepackaged binary][7].
|
||||
For Windows suport, Go 1.4 or newer is required.
|
||||
|
||||
Then run
|
||||
|
||||
go get github.com/nightlyone/lockfile
|
||||
|
||||
[5]: http://golang.org
|
||||
[6]: http://golang.org/doc/install/source
|
||||
[7]: http://golang.org/doc/install
|
||||
|
||||
LICENSE
|
||||
-------
|
||||
BSD
|
||||
|
||||
documentation
|
||||
-------------
|
||||
[package documentation at godoc.org](http://godoc.org/github.com/nightlyone/lockfile)
|
||||
|
||||
install
|
||||
-------------------
|
||||
go get github.com/nightlyone/lockfile
|
||||
|
||||
|
||||
contributing
|
||||
============
|
||||
|
||||
Contributions are welcome. Please open an issue or send me a pull request for a dedicated branch.
|
||||
Make sure the git commit hooks show it works.
|
||||
|
||||
git commit hooks
|
||||
-----------------------
|
||||
enable commit hooks via
|
||||
|
||||
cd .git ; rm -rf hooks; ln -s ../git-hooks hooks ; cd ..
|
||||
|
12
vendor/github.com/nightlyone/lockfile/appveyor.yml
generated
vendored
Normal file
12
vendor/github.com/nightlyone/lockfile/appveyor.yml
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
clone_folder: c:\gopath\src\github.com\nightlyone\lockfile
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
|
||||
install:
|
||||
- go version
|
||||
- go env
|
||||
- go get -v -t ./...
|
||||
|
||||
build_script:
|
||||
- go test -v ./...
|
125
vendor/github.com/nightlyone/lockfile/lockfile.go
generated
vendored
Normal file
125
vendor/github.com/nightlyone/lockfile/lockfile.go
generated
vendored
Normal file
|
@ -0,0 +1,125 @@
|
|||
// Handle pid file based locking.
|
||||
package lockfile
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
type Lockfile string
|
||||
|
||||
var (
|
||||
ErrBusy = errors.New("Locked by other process") // If you get this, retry after a short sleep might help
|
||||
ErrNeedAbsPath = errors.New("Lockfiles must be given as absolute path names")
|
||||
ErrInvalidPid = errors.New("Lockfile contains invalid pid for system")
|
||||
ErrDeadOwner = errors.New("Lockfile contains pid of process not existent on this system anymore")
|
||||
)
|
||||
|
||||
// Describe a new filename located at path. It is expected to be an absolute path
|
||||
func New(path string) (Lockfile, error) {
|
||||
if !filepath.IsAbs(path) {
|
||||
return Lockfile(""), ErrNeedAbsPath
|
||||
}
|
||||
return Lockfile(path), nil
|
||||
}
|
||||
|
||||
// Who owns the lockfile?
|
||||
func (l Lockfile) GetOwner() (*os.Process, error) {
|
||||
name := string(l)
|
||||
|
||||
// Ok, see, if we have a stale lockfile here
|
||||
content, err := ioutil.ReadFile(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pid int
|
||||
_, err = fmt.Sscanln(string(content), &pid)
|
||||
if err != nil {
|
||||
return nil, ErrInvalidPid
|
||||
}
|
||||
|
||||
// try hard for pids. If no pid, the lockfile is junk anyway and we delete it.
|
||||
if pid > 0 {
|
||||
p, err := os.FindProcess(pid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p, isProcessAlive(p)
|
||||
} else {
|
||||
return nil, ErrInvalidPid
|
||||
}
|
||||
panic("Not reached")
|
||||
}
|
||||
|
||||
// Try to get Lockfile lock. Returns nil, if successful and and error describing the reason, it didn't work out.
|
||||
// Please note, that existing lockfiles containing pids of dead processes and lockfiles containing no pid at all
|
||||
// are deleted.
|
||||
func (l Lockfile) TryLock() error {
|
||||
name := string(l)
|
||||
|
||||
// This has been checked by New already. If we trigger here,
|
||||
// the caller didn't use New and re-implemented it's functionality badly.
|
||||
// So panic, that he might find this easily during testing.
|
||||
if !filepath.IsAbs(string(name)) {
|
||||
panic(ErrNeedAbsPath)
|
||||
}
|
||||
|
||||
tmplock, err := ioutil.TempFile(filepath.Dir(name), "")
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
defer tmplock.Close()
|
||||
defer os.Remove(tmplock.Name())
|
||||
}
|
||||
|
||||
_, err = tmplock.WriteString(fmt.Sprintf("%d\n", os.Getpid()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// return value intentionally ignored, as ignoring it is part of the algorithm
|
||||
_ = os.Link(tmplock.Name(), name)
|
||||
|
||||
fiTmp, err := os.Lstat(tmplock.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fiLock, err := os.Lstat(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Success
|
||||
if os.SameFile(fiTmp, fiLock) {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = l.GetOwner()
|
||||
switch err {
|
||||
default:
|
||||
// Other errors -> defensively fail and let caller handle this
|
||||
return err
|
||||
case nil:
|
||||
return ErrBusy
|
||||
case ErrDeadOwner, ErrInvalidPid:
|
||||
// cases we can fix below
|
||||
}
|
||||
|
||||
// clean stale/invalid lockfile
|
||||
err = os.Remove(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// now that we cleaned up the stale lockfile, let's recurse
|
||||
return l.TryLock()
|
||||
}
|
||||
|
||||
// Release a lock again. Returns any error that happend during release of lock.
|
||||
func (l Lockfile) Unlock() error {
|
||||
return os.Remove(string(l))
|
||||
}
|
28
vendor/github.com/nightlyone/lockfile/lockfile_unix.go
generated
vendored
Normal file
28
vendor/github.com/nightlyone/lockfile/lockfile_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris
|
||||
|
||||
package lockfile
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func isProcessAlive(p *os.Process) error {
|
||||
err := p.Signal(os.Signal(syscall.Signal(0)))
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
errno, ok := err.(syscall.Errno)
|
||||
if !ok {
|
||||
return ErrDeadOwner
|
||||
}
|
||||
|
||||
switch errno {
|
||||
case syscall.ESRCH:
|
||||
return ErrDeadOwner
|
||||
case syscall.EPERM:
|
||||
return nil
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
32
vendor/github.com/nightlyone/lockfile/lockfile_windows.go
generated
vendored
Normal file
32
vendor/github.com/nightlyone/lockfile/lockfile_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
package lockfile
|
||||
|
||||
import (
|
||||
"os"
|
||||
"reflect"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func isProcessAlive(p *os.Process) error {
|
||||
// Extract handle value from the os.Process struct to avoid the need
|
||||
// of a second, manually opened process handle.
|
||||
value := reflect.ValueOf(p)
|
||||
// Dereference *os.Process to os.Process
|
||||
value = value.Elem()
|
||||
field := value.FieldByName("handle")
|
||||
|
||||
handle := syscall.Handle(field.Uint())
|
||||
|
||||
var code uint32
|
||||
err := syscall.GetExitCodeProcess(handle, &code)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// code will contain the exit code of the process or 259 (STILL_ALIVE)
|
||||
// if the process has not exited yet.
|
||||
if code == 259 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return ErrDeadOwner
|
||||
}
|
4
vendor/github.com/onsi/ginkgo/.gitignore
generated
vendored
Normal file
4
vendor/github.com/onsi/ginkgo/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
.DS_Store
|
||||
TODO
|
||||
tmp/**/*
|
||||
*.coverprofile
|
15
vendor/github.com/onsi/ginkgo/.travis.yml
generated
vendored
Normal file
15
vendor/github.com/onsi/ginkgo/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- tip
|
||||
|
||||
install:
|
||||
- go get -v ./...
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/onsi/gomega
|
||||
- go install github.com/onsi/ginkgo/ginkgo
|
||||
- export PATH=$PATH:$HOME/gopath/bin
|
||||
|
||||
script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace
|
136
vendor/github.com/onsi/ginkgo/CHANGELOG.md
generated
vendored
Normal file
136
vendor/github.com/onsi/ginkgo/CHANGELOG.md
generated
vendored
Normal file
|
@ -0,0 +1,136 @@
|
|||
## HEAD
|
||||
|
||||
Improvements:
|
||||
|
||||
- `Skip(message)` can be used to skip the current test.
|
||||
- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests)
|
||||
|
||||
Bug Fixes:
|
||||
|
||||
- Ginkgo tests now fail when you `panic(nil)` (#167)
|
||||
|
||||
## 1.2.0 5/31/2015
|
||||
|
||||
Improvements
|
||||
|
||||
- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160)
|
||||
- `ginkgo -afterSuiteHook COMMAND` invokes the passed-in `COMMAND` after a test suite completes (#152)
|
||||
- Relaxed requirement for Go 1.4+. `ginkgo` now works with Go v1.3+ (#166)
|
||||
|
||||
## 1.2.0-beta
|
||||
|
||||
Ginkgo now requires Go 1.4+
|
||||
|
||||
Improvements:
|
||||
|
||||
- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does.
|
||||
- Improved focus behavior. Now, this:
|
||||
|
||||
```golang
|
||||
FDescribe("Some describe", func() {
|
||||
It("A", func() {})
|
||||
|
||||
FIt("B", func() {})
|
||||
})
|
||||
```
|
||||
|
||||
will run `B` but *not* `A`. This tends to be a common usage pattern when in the thick of writing and debugging tests.
|
||||
- When `SIGINT` is received, Ginkgo will emit the contents of the `GinkgoWriter` before running the `AfterSuite`. Useful for debugging stuck tests.
|
||||
- When `--progress` is set, Ginkgo will write test progress (in particular, Ginkgo will say when it is about to run a BeforeEach, AfterEach, It, etc...) to the `GinkgoWriter`. This is useful for debugging stuck tests and tests that generate many logs.
|
||||
- Improved output when an error occurs in a setup or teardown block.
|
||||
- When `--dryRun` is set, Ginkgo will walk the spec tree and emit to its reporter *without* actually running anything. Best paired with `-v` to understand which specs will run in which order.
|
||||
- Add `By` to help document long `It`s. `By` simply writes to the `GinkgoWriter`.
|
||||
- Add support for precompiled tests:
|
||||
- `ginkgo build <path-to-package>` will now compile the package, producing a file named `package.test`
|
||||
- The compiled `package.test` file can be run directly. This runs the tests in series.
|
||||
- To run precompiled tests in parallel, you can run: `ginkgo -p package.test`
|
||||
- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs.
|
||||
- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory
|
||||
- The `ginkgo` CLI ignores `SIGQUIT`. Prevents its stack dump from interlacing with the underlying test suite's stack dump.
|
||||
- The `ginkgo` CLI now compiles tests into a temporary directory instead of the package directory. This necessitates upgrading to Go v1.4+.
|
||||
- `ginkgo -notify` now works on Linux
|
||||
|
||||
Bug Fixes:
|
||||
|
||||
- If --skipPackages is used and all packages are skipped, Ginkgo should exit 0.
|
||||
- Fix tempfile leak when running in parallel
|
||||
- Fix incorrect failure message when a panic occurs during a parallel test run
|
||||
- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests.
|
||||
- Be more consistent about handling SIGTERM as well as SIGINT
|
||||
- When interupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts.
|
||||
- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!)
|
||||
|
||||
## 1.1.0 (8/2/2014)
|
||||
|
||||
No changes, just dropping the beta.
|
||||
|
||||
## 1.1.0-beta (7/22/2014)
|
||||
New Features:
|
||||
|
||||
- `ginkgo watch` now monitors packages *and their dependencies* for changes. The depth of the dependency tree can be modified with the `-depth` flag.
|
||||
- Test suites with a programmatic focus (`FIt`, `FDescribe`, etc...) exit with non-zero status code, evne when they pass. This allows CI systems to detect accidental commits of focused test suites.
|
||||
- `ginkgo -p` runs the testsuite in parallel with an auto-detected number of nodes.
|
||||
- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command.
|
||||
- `ginkgo --failFast` aborts the test suite after the first failure.
|
||||
- `ginkgo generate file_1 file_2` can take multiple file arguments.
|
||||
- Ginkgo now summarizes any spec failures that occured at the end of the test run.
|
||||
- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed.
|
||||
|
||||
Improvements:
|
||||
|
||||
- `ginkgo -skipPackage` now takes a comma-separated list of strings. If the *relative path* to a package matches one of the entries in the comma-separated list, that package is skipped.
|
||||
- `ginkgo --untilItFails` no longer recompiles between attempts.
|
||||
- Ginkgo now panics when a runnable node (`It`, `BeforeEach`, `JustBeforeEach`, `AfterEach`, `Measure`) is nested within another runnable node. This is always a mistake. Any test suites that panic because of this change should be fixed.
|
||||
|
||||
Bug Fixes:
|
||||
|
||||
- `ginkgo boostrap` and `ginkgo generate` no longer fail when dealing with `hyphen-separated-packages`.
|
||||
- parallel specs are now better distributed across nodes - fixed a crashing bug where (for example) distributing 11 tests across 7 nodes would panic
|
||||
|
||||
## 1.0.0 (5/24/2014)
|
||||
New Features:
|
||||
|
||||
- Add `GinkgoParallelNode()` - shorthand for `config.GinkgoConfig.ParallelNode`
|
||||
|
||||
Improvements:
|
||||
|
||||
- When compilation fails, the compilation output is rewritten to present a correct *relative* path. Allows ⌘-clicking in iTerm open the file in your text editor.
|
||||
- `--untilItFails` and `ginkgo watch` now generate new random seeds between test runs, unless a particular random seed is specified.
|
||||
|
||||
Bug Fixes:
|
||||
|
||||
- `-cover` now generates a correctly combined coverprofile when running with in parallel with multiple `-node`s.
|
||||
- Print out the contents of the `GinkgoWriter` when `BeforeSuite` or `AfterSuite` fail.
|
||||
- Fix all remaining race conditions in Ginkgo's test suite.
|
||||
|
||||
## 1.0.0-beta (4/14/2014)
|
||||
Breaking changes:
|
||||
|
||||
- `thirdparty/gomocktestreporter` is gone. Use `GinkgoT()` instead
|
||||
- Modified the Reporter interface
|
||||
- `watch` is now a subcommand, not a flag.
|
||||
|
||||
DSL changes:
|
||||
|
||||
- `BeforeSuite` and `AfterSuite` for setting up and tearing down test suites.
|
||||
- `AfterSuite` is triggered on interrupt (`^C`) as well as exit.
|
||||
- `SynchronizedBeforeSuite` and `SynchronizedAfterSuite` for setting up and tearing down singleton resources across parallel nodes.
|
||||
|
||||
CLI changes:
|
||||
|
||||
- `watch` is now a subcommand, not a flag
|
||||
- `--nodot` flag can be passed to `ginkgo generate` and `ginkgo bootstrap` to avoid dot imports. This explicitly imports all exported identifiers in Ginkgo and Gomega. Refreshing this list can be done by running `ginkgo nodot`
|
||||
- Additional arguments can be passed to specs. Pass them after the `--` separator
|
||||
- `--skipPackage` flag takes a regexp and ignores any packages with package names passing said regexp.
|
||||
- `--trace` flag prints out full stack traces when errors occur, not just the line at which the error occurs.
|
||||
|
||||
Misc:
|
||||
|
||||
- Start using semantic versioning
|
||||
- Start maintaining changelog
|
||||
|
||||
Major refactor:
|
||||
|
||||
- Pull out Ginkgo's internal to `internal`
|
||||
- Rename `example` everywhere to `spec`
|
||||
- Much more!
|
20
vendor/github.com/onsi/ginkgo/LICENSE
generated
vendored
Normal file
20
vendor/github.com/onsi/ginkgo/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
Copyright (c) 2013-2014 Onsi Fakhouri
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
115
vendor/github.com/onsi/ginkgo/README.md
generated
vendored
Normal file
115
vendor/github.com/onsi/ginkgo/README.md
generated
vendored
Normal file
|
@ -0,0 +1,115 @@
|
|||
![Ginkgo: A Golang BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png)
|
||||
|
||||
[![Build Status](https://travis-ci.org/onsi/ginkgo.png)](https://travis-ci.org/onsi/ginkgo)
|
||||
|
||||
Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
|
||||
|
||||
To discuss Ginkgo and get updates, join the [google group](https://groups.google.com/d/forum/ginkgo-and-gomega).
|
||||
|
||||
## Feature List
|
||||
|
||||
- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](http://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](http://onsi.github.io/ginkgo/#adding-specs-to-a-suite)
|
||||
|
||||
- Structure your BDD-style tests expressively:
|
||||
- Nestable [`Describe` and `Context` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context)
|
||||
- [`BeforeEach` and `AfterEach` blocks](http://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown
|
||||
- [`It` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions
|
||||
- [`JustBeforeEach` blocks](http://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern).
|
||||
- [`BeforeSuite` and `AfterSuite` blocks](http://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite.
|
||||
|
||||
- A comprehensive test runner that lets you:
|
||||
- Mark specs as [pending](http://onsi.github.io/ginkgo/#pending-specs)
|
||||
- [Focus](http://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line
|
||||
- Run your tests in [random order](http://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order.
|
||||
- Break up your test suite into parallel processes for straightforward [test parallelization](http://onsi.github.io/ginkgo/#parallel-specs)
|
||||
|
||||
- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](http://onsi.github.io/ginkgo/#running-tests) and [generating](http://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples:
|
||||
- `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime
|
||||
- `ginkgo -cover` runs your tests using Golang's code coverage tool
|
||||
- `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package
|
||||
- `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression
|
||||
- `ginkgo -r` runs all tests suites under the current directory
|
||||
- `ginkgo -v` prints out identifying information for each tests just before it runs
|
||||
|
||||
And much more: run `ginkgo help` for details!
|
||||
|
||||
The `ginkgo` CLI is convenient, but purely optional -- Ginkgo works just fine with `go test`
|
||||
|
||||
- `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests. Run tests immediately as you develop!
|
||||
|
||||
- Built-in support for testing [asynchronicity](http://onsi.github.io/ginkgo/#asynchronous-tests)
|
||||
|
||||
- Built-in support for [benchmarking](http://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code.
|
||||
|
||||
- [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`.
|
||||
|
||||
- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](http://onsi.github.io/ginkgo/#third-party-integrations) for details.
|
||||
|
||||
- A modular architecture that lets you easily:
|
||||
- Write [custom reporters](http://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](http://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter).
|
||||
- [Adapt an existing matcher library (or write your own!)](http://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo
|
||||
|
||||
## [Gomega](http://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library
|
||||
|
||||
Ginkgo is best paired with Gomega. Learn more about Gomega [here](http://onsi.github.io/gomega/)
|
||||
|
||||
## [Agouti](http://github.com/sclevine/agouti): A Golang Acceptance Testing Framework
|
||||
|
||||
Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](http://agouti.org)
|
||||
|
||||
## Set Me Up!
|
||||
|
||||
You'll need Golang v1.3+ (Ubuntu users: you probably have Golang v1.0 -- you'll need to upgrade!)
|
||||
|
||||
```bash
|
||||
|
||||
go get github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI
|
||||
go get github.com/onsi/gomega # fetches the matcher library
|
||||
|
||||
cd path/to/package/you/want/to/test
|
||||
|
||||
ginkgo bootstrap # set up a new ginkgo suite
|
||||
ginkgo generate # will create a sample test file. edit this file and add your tests then...
|
||||
|
||||
go test # to run your tests
|
||||
|
||||
ginkgo # also runs your tests
|
||||
|
||||
```
|
||||
|
||||
## I'm new to Go: What are my testing options?
|
||||
|
||||
Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Gomega](https://github.com/onsi/gomega). Both packages are seeing heavy, daily, production use on a number of projects and boast a mature and comprehensive feature-set.
|
||||
|
||||
With that said, it's great to know what your options are :)
|
||||
|
||||
### What Golang gives you out of the box
|
||||
|
||||
Testing is a first class citizen in Golang, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
|
||||
|
||||
### Matcher libraries for Golang's XUnit style tests
|
||||
|
||||
A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction:
|
||||
|
||||
- [testify](https://github.com/stretchr/testify)
|
||||
- [gocheck](http://labix.org/gocheck)
|
||||
|
||||
You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](http://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests)
|
||||
|
||||
### BDD style testing frameworks
|
||||
|
||||
There are a handful of BDD-style testing frameworks written for Golang. Here are a few:
|
||||
|
||||
- [Ginkgo](https://github.com/onsi/ginkgo) ;)
|
||||
- [GoConvey](https://github.com/smartystreets/goconvey)
|
||||
- [Goblin](https://github.com/franela/goblin)
|
||||
- [Mao](https://github.com/azer/mao)
|
||||
- [Zen](https://github.com/pranavraja/zen)
|
||||
|
||||
Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of golang testing libraries.
|
||||
|
||||
Go explore!
|
||||
|
||||
## License
|
||||
|
||||
Ginkgo is MIT-Licensed
|
170
vendor/github.com/onsi/ginkgo/config/config.go
generated
vendored
Normal file
170
vendor/github.com/onsi/ginkgo/config/config.go
generated
vendored
Normal file
|
@ -0,0 +1,170 @@
|
|||
/*
|
||||
Ginkgo accepts a number of configuration options.
|
||||
|
||||
These are documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli)
|
||||
|
||||
You can also learn more via
|
||||
|
||||
ginkgo help
|
||||
|
||||
or (I kid you not):
|
||||
|
||||
go test -asdf
|
||||
*/
|
||||
package config
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"time"
|
||||
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const VERSION = "1.2.0"
|
||||
|
||||
type GinkgoConfigType struct {
|
||||
RandomSeed int64
|
||||
RandomizeAllSpecs bool
|
||||
FocusString string
|
||||
SkipString string
|
||||
SkipMeasurements bool
|
||||
FailOnPending bool
|
||||
FailFast bool
|
||||
EmitSpecProgress bool
|
||||
DryRun bool
|
||||
|
||||
ParallelNode int
|
||||
ParallelTotal int
|
||||
SyncHost string
|
||||
StreamHost string
|
||||
}
|
||||
|
||||
var GinkgoConfig = GinkgoConfigType{}
|
||||
|
||||
type DefaultReporterConfigType struct {
|
||||
NoColor bool
|
||||
SlowSpecThreshold float64
|
||||
NoisyPendings bool
|
||||
Succinct bool
|
||||
Verbose bool
|
||||
FullTrace bool
|
||||
}
|
||||
|
||||
var DefaultReporterConfig = DefaultReporterConfigType{}
|
||||
|
||||
func processPrefix(prefix string) string {
|
||||
if prefix != "" {
|
||||
prefix = prefix + "."
|
||||
}
|
||||
return prefix
|
||||
}
|
||||
|
||||
func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
|
||||
prefix = processPrefix(prefix)
|
||||
flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe/Context groups.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.")
|
||||
flagSet.StringVar(&(GinkgoConfig.FocusString), prefix+"focus", "", "If set, ginkgo will only run specs that match this regular expression.")
|
||||
flagSet.StringVar(&(GinkgoConfig.SkipString), prefix+"skip", "", "If set, ginkgo will only run specs that do not match this regular expression.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.")
|
||||
|
||||
if includeParallelFlags {
|
||||
flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.")
|
||||
flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.")
|
||||
flagSet.StringVar(&(GinkgoConfig.SyncHost), prefix+"parallel.synchost", "", "The address for the server that will synchronize the running nodes.")
|
||||
flagSet.StringVar(&(GinkgoConfig.StreamHost), prefix+"parallel.streamhost", "", "The address for the server that the running nodes should stream data to.")
|
||||
}
|
||||
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.")
|
||||
flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter (default: 5 seconds).")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs")
|
||||
}
|
||||
|
||||
func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string {
|
||||
prefix = processPrefix(prefix)
|
||||
result := make([]string, 0)
|
||||
|
||||
if ginkgo.RandomSeed > 0 {
|
||||
result = append(result, fmt.Sprintf("--%sseed=%d", prefix, ginkgo.RandomSeed))
|
||||
}
|
||||
|
||||
if ginkgo.RandomizeAllSpecs {
|
||||
result = append(result, fmt.Sprintf("--%srandomizeAllSpecs", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.SkipMeasurements {
|
||||
result = append(result, fmt.Sprintf("--%sskipMeasurements", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.FailOnPending {
|
||||
result = append(result, fmt.Sprintf("--%sfailOnPending", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.FailFast {
|
||||
result = append(result, fmt.Sprintf("--%sfailFast", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.DryRun {
|
||||
result = append(result, fmt.Sprintf("--%sdryRun", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.FocusString != "" {
|
||||
result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, ginkgo.FocusString))
|
||||
}
|
||||
|
||||
if ginkgo.SkipString != "" {
|
||||
result = append(result, fmt.Sprintf("--%sskip=%s", prefix, ginkgo.SkipString))
|
||||
}
|
||||
|
||||
if ginkgo.EmitSpecProgress {
|
||||
result = append(result, fmt.Sprintf("--%sprogress", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.ParallelNode != 0 {
|
||||
result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode))
|
||||
}
|
||||
|
||||
if ginkgo.ParallelTotal != 0 {
|
||||
result = append(result, fmt.Sprintf("--%sparallel.total=%d", prefix, ginkgo.ParallelTotal))
|
||||
}
|
||||
|
||||
if ginkgo.StreamHost != "" {
|
||||
result = append(result, fmt.Sprintf("--%sparallel.streamhost=%s", prefix, ginkgo.StreamHost))
|
||||
}
|
||||
|
||||
if ginkgo.SyncHost != "" {
|
||||
result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost))
|
||||
}
|
||||
|
||||
if reporter.NoColor {
|
||||
result = append(result, fmt.Sprintf("--%snoColor", prefix))
|
||||
}
|
||||
|
||||
if reporter.SlowSpecThreshold > 0 {
|
||||
result = append(result, fmt.Sprintf("--%sslowSpecThreshold=%.5f", prefix, reporter.SlowSpecThreshold))
|
||||
}
|
||||
|
||||
if !reporter.NoisyPendings {
|
||||
result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix))
|
||||
}
|
||||
|
||||
if reporter.Verbose {
|
||||
result = append(result, fmt.Sprintf("--%sv", prefix))
|
||||
}
|
||||
|
||||
if reporter.Succinct {
|
||||
result = append(result, fmt.Sprintf("--%ssuccinct", prefix))
|
||||
}
|
||||
|
||||
if reporter.FullTrace {
|
||||
result = append(result, fmt.Sprintf("--%strace", prefix))
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
536
vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
generated
vendored
Normal file
536
vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
generated
vendored
Normal file
|
@ -0,0 +1,536 @@
|
|||
/*
|
||||
Ginkgo is a BDD-style testing framework for Golang
|
||||
|
||||
The godoc documentation describes Ginkgo's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo/
|
||||
|
||||
Ginkgo's preferred matcher library is [Gomega](http://github.com/onsi/gomega)
|
||||
|
||||
Ginkgo on Github: http://github.com/onsi/ginkgo
|
||||
|
||||
Ginkgo is MIT-Licensed
|
||||
*/
|
||||
package ginkgo
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/internal/codelocation"
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/internal/remote"
|
||||
"github.com/onsi/ginkgo/internal/suite"
|
||||
"github.com/onsi/ginkgo/internal/testingtproxy"
|
||||
"github.com/onsi/ginkgo/internal/writer"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
const GINKGO_VERSION = config.VERSION
|
||||
const GINKGO_PANIC = `
|
||||
Your test failed.
|
||||
Ginkgo panics to prevent subsequent assertions from running.
|
||||
Normally Ginkgo rescues this panic so you shouldn't see it.
|
||||
|
||||
But, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
|
||||
To circumvent this, you should call
|
||||
|
||||
defer GinkgoRecover()
|
||||
|
||||
at the top of the goroutine that caused this panic.
|
||||
`
|
||||
const defaultTimeout = 1
|
||||
|
||||
var globalSuite *suite.Suite
|
||||
var globalFailer *failer.Failer
|
||||
|
||||
func init() {
|
||||
config.Flags(flag.CommandLine, "ginkgo", true)
|
||||
GinkgoWriter = writer.New(os.Stdout)
|
||||
globalFailer = failer.New()
|
||||
globalSuite = suite.New(globalFailer)
|
||||
}
|
||||
|
||||
//GinkgoWriter implements an io.Writer
|
||||
//When running in verbose mode any writes to GinkgoWriter will be immediately printed
|
||||
//to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen
|
||||
//only if the current test fails.
|
||||
var GinkgoWriter io.Writer
|
||||
|
||||
//The interface by which Ginkgo receives *testing.T
|
||||
type GinkgoTestingT interface {
|
||||
Fail()
|
||||
}
|
||||
|
||||
//GinkgoParallelNode returns the parallel node number for the current ginkgo process
|
||||
//The node number is 1-indexed
|
||||
func GinkgoParallelNode() int {
|
||||
return config.GinkgoConfig.ParallelNode
|
||||
}
|
||||
|
||||
//Some matcher libraries or legacy codebases require a *testing.T
|
||||
//GinkgoT implements an interface analogous to *testing.T and can be used if
|
||||
//the library in question accepts *testing.T through an interface
|
||||
//
|
||||
// For example, with testify:
|
||||
// assert.Equal(GinkgoT(), 123, 123, "they should be equal")
|
||||
//
|
||||
// Or with gomock:
|
||||
// gomock.NewController(GinkgoT())
|
||||
//
|
||||
// GinkgoT() takes an optional offset argument that can be used to get the
|
||||
// correct line number associated with the failure.
|
||||
func GinkgoT(optionalOffset ...int) GinkgoTInterface {
|
||||
offset := 3
|
||||
if len(optionalOffset) > 0 {
|
||||
offset = optionalOffset[0]
|
||||
}
|
||||
return testingtproxy.New(GinkgoWriter, Fail, offset)
|
||||
}
|
||||
|
||||
//The interface returned by GinkgoT(). This covers most of the methods
|
||||
//in the testing package's T.
|
||||
type GinkgoTInterface interface {
|
||||
Fail()
|
||||
Error(args ...interface{})
|
||||
Errorf(format string, args ...interface{})
|
||||
FailNow()
|
||||
Fatal(args ...interface{})
|
||||
Fatalf(format string, args ...interface{})
|
||||
Log(args ...interface{})
|
||||
Logf(format string, args ...interface{})
|
||||
Failed() bool
|
||||
Parallel()
|
||||
Skip(args ...interface{})
|
||||
Skipf(format string, args ...interface{})
|
||||
SkipNow()
|
||||
Skipped() bool
|
||||
}
|
||||
|
||||
//Custom Ginkgo test reporters must implement the Reporter interface.
|
||||
//
|
||||
//The custom reporter is passed in a SuiteSummary when the suite begins and ends,
|
||||
//and a SpecSummary just before a spec begins and just after a spec ends
|
||||
type Reporter reporters.Reporter
|
||||
|
||||
//Asynchronous specs are given a channel of the Done type. You must close or write to the channel
|
||||
//to tell Ginkgo that your async test is done.
|
||||
type Done chan<- interface{}
|
||||
|
||||
//GinkgoTestDescription represents the information about the current running test returned by CurrentGinkgoTestDescription
|
||||
// FullTestText: a concatenation of ComponentTexts and the TestText
|
||||
// ComponentTexts: a list of all texts for the Describes & Contexts leading up to the current test
|
||||
// TestText: the text in the actual It or Measure node
|
||||
// IsMeasurement: true if the current test is a measurement
|
||||
// FileName: the name of the file containing the current test
|
||||
// LineNumber: the line number for the current test
|
||||
// Failed: if the current test has failed, this will be true (useful in an AfterEach)
|
||||
type GinkgoTestDescription struct {
|
||||
FullTestText string
|
||||
ComponentTexts []string
|
||||
TestText string
|
||||
|
||||
IsMeasurement bool
|
||||
|
||||
FileName string
|
||||
LineNumber int
|
||||
|
||||
Failed bool
|
||||
}
|
||||
|
||||
//CurrentGinkgoTestDescripton returns information about the current running test.
|
||||
func CurrentGinkgoTestDescription() GinkgoTestDescription {
|
||||
summary, ok := globalSuite.CurrentRunningSpecSummary()
|
||||
if !ok {
|
||||
return GinkgoTestDescription{}
|
||||
}
|
||||
|
||||
subjectCodeLocation := summary.ComponentCodeLocations[len(summary.ComponentCodeLocations)-1]
|
||||
|
||||
return GinkgoTestDescription{
|
||||
ComponentTexts: summary.ComponentTexts[1:],
|
||||
FullTestText: strings.Join(summary.ComponentTexts[1:], " "),
|
||||
TestText: summary.ComponentTexts[len(summary.ComponentTexts)-1],
|
||||
IsMeasurement: summary.IsMeasurement,
|
||||
FileName: subjectCodeLocation.FileName,
|
||||
LineNumber: subjectCodeLocation.LineNumber,
|
||||
Failed: summary.HasFailureState(),
|
||||
}
|
||||
}
|
||||
|
||||
//Measurement tests receive a Benchmarker.
|
||||
//
|
||||
//You use the Time() function to time how long the passed in body function takes to run
|
||||
//You use the RecordValue() function to track arbitrary numerical measurements.
|
||||
//The optional info argument is passed to the test reporter and can be used to
|
||||
// provide the measurement data to a custom reporter with context.
|
||||
//
|
||||
//See http://onsi.github.io/ginkgo/#benchmark_tests for more details
|
||||
type Benchmarker interface {
|
||||
Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration)
|
||||
RecordValue(name string, value float64, info ...interface{})
|
||||
}
|
||||
|
||||
//RunSpecs is the entry point for the Ginkgo test runner.
|
||||
//You must call this within a Golang testing TestX(t *testing.T) function.
|
||||
//
|
||||
//To bootstrap a test suite you can use the Ginkgo CLI:
|
||||
//
|
||||
// ginkgo bootstrap
|
||||
func RunSpecs(t GinkgoTestingT, description string) bool {
|
||||
specReporters := []Reporter{buildDefaultReporter()}
|
||||
return RunSpecsWithCustomReporters(t, description, specReporters)
|
||||
}
|
||||
|
||||
//To run your tests with Ginkgo's default reporter and your custom reporter(s), replace
|
||||
//RunSpecs() with this method.
|
||||
func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
|
||||
specReporters = append([]Reporter{buildDefaultReporter()}, specReporters...)
|
||||
return RunSpecsWithCustomReporters(t, description, specReporters)
|
||||
}
|
||||
|
||||
//To run your tests with your custom reporter(s) (and *not* Ginkgo's default reporter), replace
|
||||
//RunSpecs() with this method. Note that parallel tests will not work correctly without the default reporter
|
||||
func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
|
||||
writer := GinkgoWriter.(*writer.Writer)
|
||||
writer.SetStream(config.DefaultReporterConfig.Verbose)
|
||||
reporters := make([]reporters.Reporter, len(specReporters))
|
||||
for i, reporter := range specReporters {
|
||||
reporters[i] = reporter
|
||||
}
|
||||
passed, hasFocusedTests := globalSuite.Run(t, description, reporters, writer, config.GinkgoConfig)
|
||||
if passed && hasFocusedTests {
|
||||
fmt.Println("PASS | FOCUSED")
|
||||
os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
|
||||
}
|
||||
return passed
|
||||
}
|
||||
|
||||
func buildDefaultReporter() Reporter {
|
||||
remoteReportingServer := config.GinkgoConfig.StreamHost
|
||||
if remoteReportingServer == "" {
|
||||
stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor)
|
||||
return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer)
|
||||
} else {
|
||||
return remote.NewForwardingReporter(remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor())
|
||||
}
|
||||
}
|
||||
|
||||
//Skip notifies Ginkgo that the current spec should be skipped.
|
||||
func Skip(message string, callerSkip ...int) {
|
||||
skip := 0
|
||||
if len(callerSkip) > 0 {
|
||||
skip = callerSkip[0]
|
||||
}
|
||||
|
||||
globalFailer.Skip(message, codelocation.New(skip+1))
|
||||
panic(GINKGO_PANIC)
|
||||
}
|
||||
|
||||
//Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.)
|
||||
func Fail(message string, callerSkip ...int) {
|
||||
skip := 0
|
||||
if len(callerSkip) > 0 {
|
||||
skip = callerSkip[0]
|
||||
}
|
||||
|
||||
globalFailer.Fail(message, codelocation.New(skip+1))
|
||||
panic(GINKGO_PANIC)
|
||||
}
|
||||
|
||||
//GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail`
|
||||
//Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that
|
||||
//calls out to Gomega
|
||||
//
|
||||
//Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent
|
||||
//further assertions from running. This panic must be recovered. Ginkgo does this for you
|
||||
//if the panic originates in a Ginkgo node (an It, BeforeEach, etc...)
|
||||
//
|
||||
//Unfortunately, if a panic originates on a goroutine *launched* from one of these nodes there's no
|
||||
//way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine.
|
||||
func GinkgoRecover() {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
globalFailer.Panic(codelocation.New(1), e)
|
||||
}
|
||||
}
|
||||
|
||||
//Describe blocks allow you to organize your specs. A Describe block can contain any number of
|
||||
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
|
||||
//
|
||||
//In addition you can nest Describe and Context blocks. Describe and Context blocks are functionally
|
||||
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
|
||||
//or method and, within that Describe, outline a number of Contexts.
|
||||
func Describe(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can focus the tests within a describe block using FDescribe
|
||||
func FDescribe(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark the tests within a describe block as pending using PDescribe
|
||||
func PDescribe(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark the tests within a describe block as pending using XDescribe
|
||||
func XDescribe(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//Context blocks allow you to organize your specs. A Context block can contain any number of
|
||||
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
|
||||
//
|
||||
//In addition you can nest Describe and Context blocks. Describe and Context blocks are functionally
|
||||
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
|
||||
//or method and, within that Describe, outline a number of Contexts.
|
||||
func Context(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can focus the tests within a describe block using FContext
|
||||
func FContext(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark the tests within a describe block as pending using PContext
|
||||
func PContext(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark the tests within a describe block as pending using XContext
|
||||
func XContext(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//It blocks contain your test code and assertions. You cannot nest any other Ginkgo blocks
|
||||
//within an It block.
|
||||
//
|
||||
//Ginkgo will normally run It blocks synchronously. To perform asynchronous tests, pass a
|
||||
//function that accepts a Done channel. When you do this, you can also provide an optional timeout.
|
||||
func It(text string, body interface{}, timeout ...float64) bool {
|
||||
globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can focus individual Its using FIt
|
||||
func FIt(text string, body interface{}, timeout ...float64) bool {
|
||||
globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark Its as pending using PIt
|
||||
func PIt(text string, _ ...interface{}) bool {
|
||||
globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark Its as pending using XIt
|
||||
func XIt(text string, _ ...interface{}) bool {
|
||||
globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||
return true
|
||||
}
|
||||
|
||||
//By allows you to better document large Its.
|
||||
//
|
||||
//Generally you should try to keep your Its short and to the point. This is not always possible, however,
|
||||
//especially in the context of integration tests that capture a particular workflow.
|
||||
//
|
||||
//By allows you to document such flows. By must be called within a runnable node (It, BeforeEach, Measure, etc...)
|
||||
//By will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function.
|
||||
func By(text string, callbacks ...func()) {
|
||||
preamble := "\x1b[1mSTEP\x1b[0m"
|
||||
if config.DefaultReporterConfig.NoColor {
|
||||
preamble = "STEP"
|
||||
}
|
||||
fmt.Fprintln(GinkgoWriter, preamble+": "+text)
|
||||
if len(callbacks) == 1 {
|
||||
callbacks[0]()
|
||||
}
|
||||
if len(callbacks) > 1 {
|
||||
panic("just one callback per By, please")
|
||||
}
|
||||
}
|
||||
|
||||
//Measure blocks run the passed in body function repeatedly (determined by the samples argument)
|
||||
//and accumulate metrics provided to the Benchmarker by the body function.
|
||||
//
|
||||
//The body function must have the signature:
|
||||
// func(b Benchmarker)
|
||||
func Measure(text string, body interface{}, samples int) bool {
|
||||
globalSuite.PushMeasureNode(text, body, types.FlagTypeNone, codelocation.New(1), samples)
|
||||
return true
|
||||
}
|
||||
|
||||
//You can focus individual Measures using FMeasure
|
||||
func FMeasure(text string, body interface{}, samples int) bool {
|
||||
globalSuite.PushMeasureNode(text, body, types.FlagTypeFocused, codelocation.New(1), samples)
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark Maeasurements as pending using PMeasure
|
||||
func PMeasure(text string, _ ...interface{}) bool {
|
||||
globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark Maeasurements as pending using XMeasure
|
||||
func XMeasure(text string, _ ...interface{}) bool {
|
||||
globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||
return true
|
||||
}
|
||||
|
||||
//BeforeSuite blocks are run just once before any specs are run. When running in parallel, each
|
||||
//parallel node process will call BeforeSuite.
|
||||
//
|
||||
//BeforeSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
|
||||
//
|
||||
//You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level.
|
||||
func BeforeSuite(body interface{}, timeout ...float64) bool {
|
||||
globalSuite.SetBeforeSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//AfterSuite blocks are *always* run after all the specs regardless of whether specs have passed or failed.
|
||||
//Moreover, if Ginkgo receives an interrupt signal (^C) it will attempt to run the AfterSuite before exiting.
|
||||
//
|
||||
//When running in parallel, each parallel node process will call AfterSuite.
|
||||
//
|
||||
//AfterSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
|
||||
//
|
||||
//You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level.
|
||||
func AfterSuite(body interface{}, timeout ...float64) bool {
|
||||
globalSuite.SetAfterSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//SynchronizedBeforeSuite blocks are primarily meant to solve the problem of setting up singleton external resources shared across
|
||||
//nodes when running tests in parallel. For example, say you have a shared database that you can only start one instance of that
|
||||
//must be used in your tests. When running in parallel, only one node should set up the database and all other nodes should wait
|
||||
//until that node is done before running.
|
||||
//
|
||||
//SynchronizedBeforeSuite accomplishes this by taking *two* function arguments. The first is only run on parallel node #1. The second is
|
||||
//run on all nodes, but *only* after the first function completes succesfully. Ginkgo also makes it possible to send data from the first function (on Node 1)
|
||||
//to the second function (on all the other nodes).
|
||||
//
|
||||
//The functions have the following signatures. The first function (which only runs on node 1) has the signature:
|
||||
//
|
||||
// func() []byte
|
||||
//
|
||||
//or, to run asynchronously:
|
||||
//
|
||||
// func(done Done) []byte
|
||||
//
|
||||
//The byte array returned by the first function is then passed to the second function, which has the signature:
|
||||
//
|
||||
// func(data []byte)
|
||||
//
|
||||
//or, to run asynchronously:
|
||||
//
|
||||
// func(data []byte, done Done)
|
||||
//
|
||||
//Here's a simple pseudo-code example that starts a shared database on Node 1 and shares the database's address with the other nodes:
|
||||
//
|
||||
// var dbClient db.Client
|
||||
// var dbRunner db.Runner
|
||||
//
|
||||
// var _ = SynchronizedBeforeSuite(func() []byte {
|
||||
// dbRunner = db.NewRunner()
|
||||
// err := dbRunner.Start()
|
||||
// Ω(err).ShouldNot(HaveOccurred())
|
||||
// return []byte(dbRunner.URL)
|
||||
// }, func(data []byte) {
|
||||
// dbClient = db.NewClient()
|
||||
// err := dbClient.Connect(string(data))
|
||||
// Ω(err).ShouldNot(HaveOccurred())
|
||||
// })
|
||||
func SynchronizedBeforeSuite(node1Body interface{}, allNodesBody interface{}, timeout ...float64) bool {
|
||||
globalSuite.SetSynchronizedBeforeSuiteNode(
|
||||
node1Body,
|
||||
allNodesBody,
|
||||
codelocation.New(1),
|
||||
parseTimeout(timeout...),
|
||||
)
|
||||
return true
|
||||
}
|
||||
|
||||
//SynchronizedAfterSuite blocks complement the SynchronizedBeforeSuite blocks in solving the problem of setting up
|
||||
//external singleton resources shared across nodes when running tests in parallel.
|
||||
//
|
||||
//SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all nodes. The second runs only on parallel node #1
|
||||
//and *only* after all other nodes have finished and exited. This ensures that node 1, and any resources it is running, remain alive until
|
||||
//all other nodes are finished.
|
||||
//
|
||||
//Both functions have the same signature: either func() or func(done Done) to run asynchronously.
|
||||
//
|
||||
//Here's a pseudo-code example that complements that given in SynchronizedBeforeSuite. Here, SynchronizedAfterSuite is used to tear down the shared database
|
||||
//only after all nodes have finished:
|
||||
//
|
||||
// var _ = SynchronizedAfterSuite(func() {
|
||||
// dbClient.Cleanup()
|
||||
// }, func() {
|
||||
// dbRunner.Stop()
|
||||
// })
|
||||
func SynchronizedAfterSuite(allNodesBody interface{}, node1Body interface{}, timeout ...float64) bool {
|
||||
globalSuite.SetSynchronizedAfterSuiteNode(
|
||||
allNodesBody,
|
||||
node1Body,
|
||||
codelocation.New(1),
|
||||
parseTimeout(timeout...),
|
||||
)
|
||||
return true
|
||||
}
|
||||
|
||||
//BeforeEach blocks are run before It blocks. When multiple BeforeEach blocks are defined in nested
|
||||
//Describe and Context blocks the outermost BeforeEach blocks are run first.
|
||||
//
|
||||
//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
|
||||
//a Done channel
|
||||
func BeforeEach(body interface{}, timeout ...float64) bool {
|
||||
globalSuite.PushBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//JustBeforeEach blocks are run before It blocks but *after* all BeforeEach blocks. For more details,
|
||||
//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_)
|
||||
//
|
||||
//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
|
||||
//a Done channel
|
||||
func JustBeforeEach(body interface{}, timeout ...float64) bool {
|
||||
globalSuite.PushJustBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//AfterEach blocks are run after It blocks. When multiple AfterEach blocks are defined in nested
|
||||
//Describe and Context blocks the innermost AfterEach blocks are run first.
|
||||
//
|
||||
//Like It blocks, AfterEach blocks can be made asynchronous by providing a body function that accepts
|
||||
//a Done channel
|
||||
func AfterEach(body interface{}, timeout ...float64) bool {
|
||||
globalSuite.PushAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
func parseTimeout(timeout ...float64) time.Duration {
|
||||
if len(timeout) == 0 {
|
||||
return time.Duration(defaultTimeout * int64(time.Second))
|
||||
} else {
|
||||
return time.Duration(timeout[0] * float64(time.Second))
|
||||
}
|
||||
}
|
32
vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
generated
vendored
Normal file
32
vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
package codelocation
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
func New(skip int) types.CodeLocation {
|
||||
_, file, line, _ := runtime.Caller(skip + 1)
|
||||
stackTrace := PruneStack(string(debug.Stack()), skip)
|
||||
return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace}
|
||||
}
|
||||
|
||||
func PruneStack(fullStackTrace string, skip int) string {
|
||||
stack := strings.Split(fullStackTrace, "\n")
|
||||
if len(stack) > 2*(skip+1) {
|
||||
stack = stack[2*(skip+1):]
|
||||
}
|
||||
prunedStack := []string{}
|
||||
re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
|
||||
for i := 0; i < len(stack)/2; i++ {
|
||||
if !re.Match([]byte(stack[i*2])) {
|
||||
prunedStack = append(prunedStack, stack[i*2])
|
||||
prunedStack = append(prunedStack, stack[i*2+1])
|
||||
}
|
||||
}
|
||||
return strings.Join(prunedStack, "\n")
|
||||
}
|
151
vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
generated
vendored
Normal file
151
vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
generated
vendored
Normal file
|
@ -0,0 +1,151 @@
|
|||
package containernode
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sort"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type subjectOrContainerNode struct {
|
||||
containerNode *ContainerNode
|
||||
subjectNode leafnodes.SubjectNode
|
||||
}
|
||||
|
||||
func (n subjectOrContainerNode) text() string {
|
||||
if n.containerNode != nil {
|
||||
return n.containerNode.Text()
|
||||
} else {
|
||||
return n.subjectNode.Text()
|
||||
}
|
||||
}
|
||||
|
||||
type CollatedNodes struct {
|
||||
Containers []*ContainerNode
|
||||
Subject leafnodes.SubjectNode
|
||||
}
|
||||
|
||||
type ContainerNode struct {
|
||||
text string
|
||||
flag types.FlagType
|
||||
codeLocation types.CodeLocation
|
||||
|
||||
setupNodes []leafnodes.BasicNode
|
||||
subjectAndContainerNodes []subjectOrContainerNode
|
||||
}
|
||||
|
||||
func New(text string, flag types.FlagType, codeLocation types.CodeLocation) *ContainerNode {
|
||||
return &ContainerNode{
|
||||
text: text,
|
||||
flag: flag,
|
||||
codeLocation: codeLocation,
|
||||
}
|
||||
}
|
||||
|
||||
func (container *ContainerNode) Shuffle(r *rand.Rand) {
|
||||
sort.Sort(container)
|
||||
permutation := r.Perm(len(container.subjectAndContainerNodes))
|
||||
shuffledNodes := make([]subjectOrContainerNode, len(container.subjectAndContainerNodes))
|
||||
for i, j := range permutation {
|
||||
shuffledNodes[i] = container.subjectAndContainerNodes[j]
|
||||
}
|
||||
container.subjectAndContainerNodes = shuffledNodes
|
||||
}
|
||||
|
||||
func (node *ContainerNode) BackPropagateProgrammaticFocus() bool {
|
||||
if node.flag == types.FlagTypePending {
|
||||
return false
|
||||
}
|
||||
|
||||
shouldUnfocus := false
|
||||
for _, subjectOrContainerNode := range node.subjectAndContainerNodes {
|
||||
if subjectOrContainerNode.containerNode != nil {
|
||||
shouldUnfocus = subjectOrContainerNode.containerNode.BackPropagateProgrammaticFocus() || shouldUnfocus
|
||||
} else {
|
||||
shouldUnfocus = (subjectOrContainerNode.subjectNode.Flag() == types.FlagTypeFocused) || shouldUnfocus
|
||||
}
|
||||
}
|
||||
|
||||
if shouldUnfocus {
|
||||
if node.flag == types.FlagTypeFocused {
|
||||
node.flag = types.FlagTypeNone
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
return node.flag == types.FlagTypeFocused
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Collate() []CollatedNodes {
|
||||
return node.collate([]*ContainerNode{})
|
||||
}
|
||||
|
||||
func (node *ContainerNode) collate(enclosingContainers []*ContainerNode) []CollatedNodes {
|
||||
collated := make([]CollatedNodes, 0)
|
||||
|
||||
containers := make([]*ContainerNode, len(enclosingContainers))
|
||||
copy(containers, enclosingContainers)
|
||||
containers = append(containers, node)
|
||||
|
||||
for _, subjectOrContainer := range node.subjectAndContainerNodes {
|
||||
if subjectOrContainer.containerNode != nil {
|
||||
collated = append(collated, subjectOrContainer.containerNode.collate(containers)...)
|
||||
} else {
|
||||
collated = append(collated, CollatedNodes{
|
||||
Containers: containers,
|
||||
Subject: subjectOrContainer.subjectNode,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return collated
|
||||
}
|
||||
|
||||
func (node *ContainerNode) PushContainerNode(container *ContainerNode) {
|
||||
node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{containerNode: container})
|
||||
}
|
||||
|
||||
func (node *ContainerNode) PushSubjectNode(subject leafnodes.SubjectNode) {
|
||||
node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{subjectNode: subject})
|
||||
}
|
||||
|
||||
func (node *ContainerNode) PushSetupNode(setupNode leafnodes.BasicNode) {
|
||||
node.setupNodes = append(node.setupNodes, setupNode)
|
||||
}
|
||||
|
||||
func (node *ContainerNode) SetupNodesOfType(nodeType types.SpecComponentType) []leafnodes.BasicNode {
|
||||
nodes := []leafnodes.BasicNode{}
|
||||
for _, setupNode := range node.setupNodes {
|
||||
if setupNode.Type() == nodeType {
|
||||
nodes = append(nodes, setupNode)
|
||||
}
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Text() string {
|
||||
return node.text
|
||||
}
|
||||
|
||||
func (node *ContainerNode) CodeLocation() types.CodeLocation {
|
||||
return node.codeLocation
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Flag() types.FlagType {
|
||||
return node.flag
|
||||
}
|
||||
|
||||
//sort.Interface
|
||||
|
||||
func (node *ContainerNode) Len() int {
|
||||
return len(node.subjectAndContainerNodes)
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Less(i, j int) bool {
|
||||
return node.subjectAndContainerNodes[i].text() < node.subjectAndContainerNodes[j].text()
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Swap(i, j int) {
|
||||
node.subjectAndContainerNodes[i], node.subjectAndContainerNodes[j] = node.subjectAndContainerNodes[j], node.subjectAndContainerNodes[i]
|
||||
}
|
92
vendor/github.com/onsi/ginkgo/internal/failer/failer.go
generated
vendored
Normal file
92
vendor/github.com/onsi/ginkgo/internal/failer/failer.go
generated
vendored
Normal file
|
@ -0,0 +1,92 @@
|
|||
package failer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type Failer struct {
|
||||
lock *sync.Mutex
|
||||
failure types.SpecFailure
|
||||
state types.SpecState
|
||||
}
|
||||
|
||||
func New() *Failer {
|
||||
return &Failer{
|
||||
lock: &sync.Mutex{},
|
||||
state: types.SpecStatePassed,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.state == types.SpecStatePassed {
|
||||
f.state = types.SpecStatePanicked
|
||||
f.failure = types.SpecFailure{
|
||||
Message: "Test Panicked",
|
||||
Location: location,
|
||||
ForwardedPanic: fmt.Sprintf("%v", forwardedPanic),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Failer) Timeout(location types.CodeLocation) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.state == types.SpecStatePassed {
|
||||
f.state = types.SpecStateTimedOut
|
||||
f.failure = types.SpecFailure{
|
||||
Message: "Timed out",
|
||||
Location: location,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Failer) Fail(message string, location types.CodeLocation) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.state == types.SpecStatePassed {
|
||||
f.state = types.SpecStateFailed
|
||||
f.failure = types.SpecFailure{
|
||||
Message: message,
|
||||
Location: location,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Failer) Drain(componentType types.SpecComponentType, componentIndex int, componentCodeLocation types.CodeLocation) (types.SpecFailure, types.SpecState) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
failure := f.failure
|
||||
outcome := f.state
|
||||
if outcome != types.SpecStatePassed {
|
||||
failure.ComponentType = componentType
|
||||
failure.ComponentIndex = componentIndex
|
||||
failure.ComponentCodeLocation = componentCodeLocation
|
||||
}
|
||||
|
||||
f.state = types.SpecStatePassed
|
||||
f.failure = types.SpecFailure{}
|
||||
|
||||
return failure, outcome
|
||||
}
|
||||
|
||||
func (f *Failer) Skip(message string, location types.CodeLocation) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.state == types.SpecStatePassed {
|
||||
f.state = types.SpecStateSkipped
|
||||
f.failure = types.SpecFailure{
|
||||
Message: message,
|
||||
Location: location,
|
||||
}
|
||||
}
|
||||
}
|
95
vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
generated
vendored
Normal file
95
vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
generated
vendored
Normal file
|
@ -0,0 +1,95 @@
|
|||
package leafnodes
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type benchmarker struct {
|
||||
mu sync.Mutex
|
||||
measurements map[string]*types.SpecMeasurement
|
||||
orderCounter int
|
||||
}
|
||||
|
||||
func newBenchmarker() *benchmarker {
|
||||
return &benchmarker{
|
||||
measurements: make(map[string]*types.SpecMeasurement, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) {
|
||||
t := time.Now()
|
||||
body()
|
||||
elapsedTime = time.Since(t)
|
||||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", info...)
|
||||
measurement.Results = append(measurement.Results, elapsedTime.Seconds())
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) {
|
||||
measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", info...)
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
measurement.Results = append(measurement.Results, value)
|
||||
}
|
||||
|
||||
func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, info ...interface{}) *types.SpecMeasurement {
|
||||
measurement, ok := b.measurements[name]
|
||||
if !ok {
|
||||
var computedInfo interface{}
|
||||
computedInfo = nil
|
||||
if len(info) > 0 {
|
||||
computedInfo = info[0]
|
||||
}
|
||||
measurement = &types.SpecMeasurement{
|
||||
Name: name,
|
||||
Info: computedInfo,
|
||||
Order: b.orderCounter,
|
||||
SmallestLabel: smallestLabel,
|
||||
LargestLabel: largestLabel,
|
||||
AverageLabel: averageLabel,
|
||||
Units: units,
|
||||
Results: make([]float64, 0),
|
||||
}
|
||||
b.measurements[name] = measurement
|
||||
b.orderCounter++
|
||||
}
|
||||
|
||||
return measurement
|
||||
}
|
||||
|
||||
func (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
for _, measurement := range b.measurements {
|
||||
measurement.Smallest = math.MaxFloat64
|
||||
measurement.Largest = -math.MaxFloat64
|
||||
sum := float64(0)
|
||||
sumOfSquares := float64(0)
|
||||
|
||||
for _, result := range measurement.Results {
|
||||
if result > measurement.Largest {
|
||||
measurement.Largest = result
|
||||
}
|
||||
if result < measurement.Smallest {
|
||||
measurement.Smallest = result
|
||||
}
|
||||
sum += result
|
||||
sumOfSquares += result * result
|
||||
}
|
||||
|
||||
n := float64(len(measurement.Results))
|
||||
measurement.Average = sum / n
|
||||
measurement.StdDeviation = math.Sqrt(sumOfSquares/n - (sum/n)*(sum/n))
|
||||
}
|
||||
|
||||
return b.measurements
|
||||
}
|
19
vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
generated
vendored
Normal file
19
vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
package leafnodes
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type BasicNode interface {
|
||||
Type() types.SpecComponentType
|
||||
Run() (types.SpecState, types.SpecFailure)
|
||||
CodeLocation() types.CodeLocation
|
||||
}
|
||||
|
||||
type SubjectNode interface {
|
||||
BasicNode
|
||||
|
||||
Text() string
|
||||
Flag() types.FlagType
|
||||
Samples() int
|
||||
}
|
46
vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
generated
vendored
Normal file
46
vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
package leafnodes
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ItNode struct {
|
||||
runner *runner
|
||||
|
||||
flag types.FlagType
|
||||
text string
|
||||
}
|
||||
|
||||
func NewItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *ItNode {
|
||||
return &ItNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeIt, componentIndex),
|
||||
flag: flag,
|
||||
text: text,
|
||||
}
|
||||
}
|
||||
|
||||
func (node *ItNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
return node.runner.run()
|
||||
}
|
||||
|
||||
func (node *ItNode) Type() types.SpecComponentType {
|
||||
return types.SpecComponentTypeIt
|
||||
}
|
||||
|
||||
func (node *ItNode) Text() string {
|
||||
return node.text
|
||||
}
|
||||
|
||||
func (node *ItNode) Flag() types.FlagType {
|
||||
return node.flag
|
||||
}
|
||||
|
||||
func (node *ItNode) CodeLocation() types.CodeLocation {
|
||||
return node.runner.codeLocation
|
||||
}
|
||||
|
||||
func (node *ItNode) Samples() int {
|
||||
return 1
|
||||
}
|
61
vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
generated
vendored
Normal file
61
vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
generated
vendored
Normal file
|
@ -0,0 +1,61 @@
|
|||
package leafnodes
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type MeasureNode struct {
|
||||
runner *runner
|
||||
|
||||
text string
|
||||
flag types.FlagType
|
||||
samples int
|
||||
benchmarker *benchmarker
|
||||
}
|
||||
|
||||
func NewMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int, failer *failer.Failer, componentIndex int) *MeasureNode {
|
||||
benchmarker := newBenchmarker()
|
||||
|
||||
wrappedBody := func() {
|
||||
reflect.ValueOf(body).Call([]reflect.Value{reflect.ValueOf(benchmarker)})
|
||||
}
|
||||
|
||||
return &MeasureNode{
|
||||
runner: newRunner(wrappedBody, codeLocation, 0, failer, types.SpecComponentTypeMeasure, componentIndex),
|
||||
|
||||
text: text,
|
||||
flag: flag,
|
||||
samples: samples,
|
||||
benchmarker: benchmarker,
|
||||
}
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
return node.runner.run()
|
||||
}
|
||||
|
||||
func (node *MeasureNode) MeasurementsReport() map[string]*types.SpecMeasurement {
|
||||
return node.benchmarker.measurementsReport()
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Type() types.SpecComponentType {
|
||||
return types.SpecComponentTypeMeasure
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Text() string {
|
||||
return node.text
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Flag() types.FlagType {
|
||||
return node.flag
|
||||
}
|
||||
|
||||
func (node *MeasureNode) CodeLocation() types.CodeLocation {
|
||||
return node.runner.codeLocation
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Samples() int {
|
||||
return node.samples
|
||||
}
|
113
vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
generated
vendored
Normal file
113
vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
generated
vendored
Normal file
|
@ -0,0 +1,113 @@
|
|||
package leafnodes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/onsi/ginkgo/internal/codelocation"
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
type runner struct {
|
||||
isAsync bool
|
||||
asyncFunc func(chan<- interface{})
|
||||
syncFunc func()
|
||||
codeLocation types.CodeLocation
|
||||
timeoutThreshold time.Duration
|
||||
nodeType types.SpecComponentType
|
||||
componentIndex int
|
||||
failer *failer.Failer
|
||||
}
|
||||
|
||||
func newRunner(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, nodeType types.SpecComponentType, componentIndex int) *runner {
|
||||
bodyType := reflect.TypeOf(body)
|
||||
if bodyType.Kind() != reflect.Func {
|
||||
panic(fmt.Sprintf("Expected a function but got something else at %v", codeLocation))
|
||||
}
|
||||
|
||||
runner := &runner{
|
||||
codeLocation: codeLocation,
|
||||
timeoutThreshold: timeout,
|
||||
failer: failer,
|
||||
nodeType: nodeType,
|
||||
componentIndex: componentIndex,
|
||||
}
|
||||
|
||||
switch bodyType.NumIn() {
|
||||
case 0:
|
||||
runner.syncFunc = body.(func())
|
||||
return runner
|
||||
case 1:
|
||||
if !(bodyType.In(0).Kind() == reflect.Chan && bodyType.In(0).Elem().Kind() == reflect.Interface) {
|
||||
panic(fmt.Sprintf("Must pass a Done channel to function at %v", codeLocation))
|
||||
}
|
||||
|
||||
wrappedBody := func(done chan<- interface{}) {
|
||||
bodyValue := reflect.ValueOf(body)
|
||||
bodyValue.Call([]reflect.Value{reflect.ValueOf(done)})
|
||||
}
|
||||
|
||||
runner.isAsync = true
|
||||
runner.asyncFunc = wrappedBody
|
||||
return runner
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("Too many arguments to function at %v", codeLocation))
|
||||
}
|
||||
|
||||
func (r *runner) run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
if r.isAsync {
|
||||
return r.runAsync()
|
||||
} else {
|
||||
return r.runSync()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
done := make(chan interface{}, 1)
|
||||
|
||||
go func() {
|
||||
finished := false
|
||||
|
||||
defer func() {
|
||||
if e := recover(); e != nil || !finished {
|
||||
r.failer.Panic(codelocation.New(2), e)
|
||||
select {
|
||||
case <-done:
|
||||
break
|
||||
default:
|
||||
close(done)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
r.asyncFunc(done)
|
||||
finished = true
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(r.timeoutThreshold):
|
||||
r.failer.Timeout(r.codeLocation)
|
||||
}
|
||||
|
||||
failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
|
||||
return
|
||||
}
|
||||
func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
finished := false
|
||||
|
||||
defer func() {
|
||||
if e := recover(); e != nil || !finished {
|
||||
r.failer.Panic(codelocation.New(2), e)
|
||||
}
|
||||
|
||||
failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
|
||||
}()
|
||||
|
||||
r.syncFunc()
|
||||
finished = true
|
||||
|
||||
return
|
||||
}
|
41
vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
generated
vendored
Normal file
41
vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
package leafnodes
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"time"
|
||||
)
|
||||
|
||||
type SetupNode struct {
|
||||
runner *runner
|
||||
}
|
||||
|
||||
func (node *SetupNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
return node.runner.run()
|
||||
}
|
||||
|
||||
func (node *SetupNode) Type() types.SpecComponentType {
|
||||
return node.runner.nodeType
|
||||
}
|
||||
|
||||
func (node *SetupNode) CodeLocation() types.CodeLocation {
|
||||
return node.runner.codeLocation
|
||||
}
|
||||
|
||||
func NewBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||
return &SetupNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeEach, componentIndex),
|
||||
}
|
||||
}
|
||||
|
||||
func NewAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||
return &SetupNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterEach, componentIndex),
|
||||
}
|
||||
}
|
||||
|
||||
func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||
return &SetupNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex),
|
||||
}
|
||||
}
|
54
vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
generated
vendored
Normal file
54
vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
package leafnodes
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"time"
|
||||
)
|
||||
|
||||
type SuiteNode interface {
|
||||
Run(parallelNode int, parallelTotal int, syncHost string) bool
|
||||
Passed() bool
|
||||
Summary() *types.SetupSummary
|
||||
}
|
||||
|
||||
type simpleSuiteNode struct {
|
||||
runner *runner
|
||||
outcome types.SpecState
|
||||
failure types.SpecFailure
|
||||
runTime time.Duration
|
||||
}
|
||||
|
||||
func (node *simpleSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
||||
t := time.Now()
|
||||
node.outcome, node.failure = node.runner.run()
|
||||
node.runTime = time.Since(t)
|
||||
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *simpleSuiteNode) Passed() bool {
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *simpleSuiteNode) Summary() *types.SetupSummary {
|
||||
return &types.SetupSummary{
|
||||
ComponentType: node.runner.nodeType,
|
||||
CodeLocation: node.runner.codeLocation,
|
||||
State: node.outcome,
|
||||
RunTime: node.runTime,
|
||||
Failure: node.failure,
|
||||
}
|
||||
}
|
||||
|
||||
func NewBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||
return &simpleSuiteNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func NewAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||
return &simpleSuiteNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
||||
}
|
||||
}
|
89
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
generated
vendored
Normal file
89
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
generated
vendored
Normal file
|
@ -0,0 +1,89 @@
|
|||
package leafnodes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
type synchronizedAfterSuiteNode struct {
|
||||
runnerA *runner
|
||||
runnerB *runner
|
||||
|
||||
outcome types.SpecState
|
||||
failure types.SpecFailure
|
||||
runTime time.Duration
|
||||
}
|
||||
|
||||
func NewSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||
return &synchronizedAfterSuiteNode{
|
||||
runnerA: newRunner(bodyA, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
||||
runnerB: newRunner(bodyB, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
||||
node.outcome, node.failure = node.runnerA.run()
|
||||
|
||||
if parallelNode == 1 {
|
||||
if parallelTotal > 1 {
|
||||
node.waitUntilOtherNodesAreDone(syncHost)
|
||||
}
|
||||
|
||||
outcome, failure := node.runnerB.run()
|
||||
|
||||
if node.outcome == types.SpecStatePassed {
|
||||
node.outcome, node.failure = outcome, failure
|
||||
}
|
||||
}
|
||||
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) Passed() bool {
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) Summary() *types.SetupSummary {
|
||||
return &types.SetupSummary{
|
||||
ComponentType: node.runnerA.nodeType,
|
||||
CodeLocation: node.runnerA.codeLocation,
|
||||
State: node.outcome,
|
||||
RunTime: node.runTime,
|
||||
Failure: node.failure,
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) waitUntilOtherNodesAreDone(syncHost string) {
|
||||
for {
|
||||
if node.canRun(syncHost) {
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) canRun(syncHost string) bool {
|
||||
resp, err := http.Get(syncHost + "/RemoteAfterSuiteData")
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
return false
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
afterSuiteData := types.RemoteAfterSuiteData{}
|
||||
err = json.Unmarshal(body, &afterSuiteData)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return afterSuiteData.CanRun
|
||||
}
|
182
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
generated
vendored
Normal file
182
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
generated
vendored
Normal file
|
@ -0,0 +1,182 @@
|
|||
package leafnodes
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
type synchronizedBeforeSuiteNode struct {
|
||||
runnerA *runner
|
||||
runnerB *runner
|
||||
|
||||
data []byte
|
||||
|
||||
outcome types.SpecState
|
||||
failure types.SpecFailure
|
||||
runTime time.Duration
|
||||
}
|
||||
|
||||
func NewSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||
node := &synchronizedBeforeSuiteNode{}
|
||||
|
||||
node.runnerA = newRunner(node.wrapA(bodyA), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
|
||||
node.runnerB = newRunner(node.wrapB(bodyB), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
|
||||
|
||||
return node
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
||||
t := time.Now()
|
||||
defer func() {
|
||||
node.runTime = time.Since(t)
|
||||
}()
|
||||
|
||||
if parallelNode == 1 {
|
||||
node.outcome, node.failure = node.runA(parallelTotal, syncHost)
|
||||
} else {
|
||||
node.outcome, node.failure = node.waitForA(syncHost)
|
||||
}
|
||||
|
||||
if node.outcome != types.SpecStatePassed {
|
||||
return false
|
||||
}
|
||||
node.outcome, node.failure = node.runnerB.run()
|
||||
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) runA(parallelTotal int, syncHost string) (types.SpecState, types.SpecFailure) {
|
||||
outcome, failure := node.runnerA.run()
|
||||
|
||||
if parallelTotal > 1 {
|
||||
state := types.RemoteBeforeSuiteStatePassed
|
||||
if outcome != types.SpecStatePassed {
|
||||
state = types.RemoteBeforeSuiteStateFailed
|
||||
}
|
||||
json := (types.RemoteBeforeSuiteData{
|
||||
Data: node.data,
|
||||
State: state,
|
||||
}).ToJSON()
|
||||
http.Post(syncHost+"/BeforeSuiteState", "application/json", bytes.NewBuffer(json))
|
||||
}
|
||||
|
||||
return outcome, failure
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecState, types.SpecFailure) {
|
||||
failure := func(message string) types.SpecFailure {
|
||||
return types.SpecFailure{
|
||||
Message: message,
|
||||
Location: node.runnerA.codeLocation,
|
||||
ComponentType: node.runnerA.nodeType,
|
||||
ComponentIndex: node.runnerA.componentIndex,
|
||||
ComponentCodeLocation: node.runnerA.codeLocation,
|
||||
}
|
||||
}
|
||||
for {
|
||||
resp, err := http.Get(syncHost + "/BeforeSuiteState")
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
return types.SpecStateFailed, failure("Failed to fetch BeforeSuite state")
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return types.SpecStateFailed, failure("Failed to read BeforeSuite state")
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
beforeSuiteData := types.RemoteBeforeSuiteData{}
|
||||
err = json.Unmarshal(body, &beforeSuiteData)
|
||||
if err != nil {
|
||||
return types.SpecStateFailed, failure("Failed to decode BeforeSuite state")
|
||||
}
|
||||
|
||||
switch beforeSuiteData.State {
|
||||
case types.RemoteBeforeSuiteStatePassed:
|
||||
node.data = beforeSuiteData.Data
|
||||
return types.SpecStatePassed, types.SpecFailure{}
|
||||
case types.RemoteBeforeSuiteStateFailed:
|
||||
return types.SpecStateFailed, failure("BeforeSuite on Node 1 failed")
|
||||
case types.RemoteBeforeSuiteStateDisappeared:
|
||||
return types.SpecStateFailed, failure("Node 1 disappeared before completing BeforeSuite")
|
||||
}
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
|
||||
return types.SpecStateFailed, failure("Shouldn't get here!")
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) Passed() bool {
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) Summary() *types.SetupSummary {
|
||||
return &types.SetupSummary{
|
||||
ComponentType: node.runnerA.nodeType,
|
||||
CodeLocation: node.runnerA.codeLocation,
|
||||
State: node.outcome,
|
||||
RunTime: node.runTime,
|
||||
Failure: node.failure,
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) wrapA(bodyA interface{}) interface{} {
|
||||
typeA := reflect.TypeOf(bodyA)
|
||||
if typeA.Kind() != reflect.Func {
|
||||
panic("SynchronizedBeforeSuite expects a function as its first argument")
|
||||
}
|
||||
|
||||
takesNothing := typeA.NumIn() == 0
|
||||
takesADoneChannel := typeA.NumIn() == 1 && typeA.In(0).Kind() == reflect.Chan && typeA.In(0).Elem().Kind() == reflect.Interface
|
||||
returnsBytes := typeA.NumOut() == 1 && typeA.Out(0).Kind() == reflect.Slice && typeA.Out(0).Elem().Kind() == reflect.Uint8
|
||||
|
||||
if !((takesNothing || takesADoneChannel) && returnsBytes) {
|
||||
panic("SynchronizedBeforeSuite's first argument should be a function that returns []byte and either takes no arguments or takes a Done channel.")
|
||||
}
|
||||
|
||||
if takesADoneChannel {
|
||||
return func(done chan<- interface{}) {
|
||||
out := reflect.ValueOf(bodyA).Call([]reflect.Value{reflect.ValueOf(done)})
|
||||
node.data = out[0].Interface().([]byte)
|
||||
}
|
||||
}
|
||||
|
||||
return func() {
|
||||
out := reflect.ValueOf(bodyA).Call([]reflect.Value{})
|
||||
node.data = out[0].Interface().([]byte)
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) wrapB(bodyB interface{}) interface{} {
|
||||
typeB := reflect.TypeOf(bodyB)
|
||||
if typeB.Kind() != reflect.Func {
|
||||
panic("SynchronizedBeforeSuite expects a function as its second argument")
|
||||
}
|
||||
|
||||
returnsNothing := typeB.NumOut() == 0
|
||||
takesBytesOnly := typeB.NumIn() == 1 && typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8
|
||||
takesBytesAndDone := typeB.NumIn() == 2 &&
|
||||
typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 &&
|
||||
typeB.In(1).Kind() == reflect.Chan && typeB.In(1).Elem().Kind() == reflect.Interface
|
||||
|
||||
if !((takesBytesOnly || takesBytesAndDone) && returnsNothing) {
|
||||
panic("SynchronizedBeforeSuite's second argument should be a function that returns nothing and either takes []byte or ([]byte, Done)")
|
||||
}
|
||||
|
||||
if takesBytesAndDone {
|
||||
return func(done chan<- interface{}) {
|
||||
reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data), reflect.ValueOf(done)})
|
||||
}
|
||||
}
|
||||
|
||||
return func() {
|
||||
reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data)})
|
||||
}
|
||||
}
|
250
vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
generated
vendored
Normal file
250
vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
generated
vendored
Normal file
|
@ -0,0 +1,250 @@
|
|||
/*
|
||||
|
||||
Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output
|
||||
coherently as tests complete. You shouldn't need to use this in your code. To run tests in parallel:
|
||||
|
||||
ginkgo -nodes=N
|
||||
|
||||
where N is the number of nodes you desire.
|
||||
*/
|
||||
package remote
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type configAndSuite struct {
|
||||
config config.GinkgoConfigType
|
||||
summary *types.SuiteSummary
|
||||
}
|
||||
|
||||
type Aggregator struct {
|
||||
nodeCount int
|
||||
config config.DefaultReporterConfigType
|
||||
stenographer stenographer.Stenographer
|
||||
result chan bool
|
||||
|
||||
suiteBeginnings chan configAndSuite
|
||||
aggregatedSuiteBeginnings []configAndSuite
|
||||
|
||||
beforeSuites chan *types.SetupSummary
|
||||
aggregatedBeforeSuites []*types.SetupSummary
|
||||
|
||||
afterSuites chan *types.SetupSummary
|
||||
aggregatedAfterSuites []*types.SetupSummary
|
||||
|
||||
specCompletions chan *types.SpecSummary
|
||||
completedSpecs []*types.SpecSummary
|
||||
|
||||
suiteEndings chan *types.SuiteSummary
|
||||
aggregatedSuiteEndings []*types.SuiteSummary
|
||||
specs []*types.SpecSummary
|
||||
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator {
|
||||
aggregator := &Aggregator{
|
||||
nodeCount: nodeCount,
|
||||
result: result,
|
||||
config: config,
|
||||
stenographer: stenographer,
|
||||
|
||||
suiteBeginnings: make(chan configAndSuite, 0),
|
||||
beforeSuites: make(chan *types.SetupSummary, 0),
|
||||
afterSuites: make(chan *types.SetupSummary, 0),
|
||||
specCompletions: make(chan *types.SpecSummary, 0),
|
||||
suiteEndings: make(chan *types.SuiteSummary, 0),
|
||||
}
|
||||
|
||||
go aggregator.mux()
|
||||
|
||||
return aggregator
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
aggregator.suiteBeginnings <- configAndSuite{config, summary}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
aggregator.beforeSuites <- setupSummary
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
aggregator.afterSuites <- setupSummary
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
//noop
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
aggregator.specCompletions <- specSummary
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
aggregator.suiteEndings <- summary
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) mux() {
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case configAndSuite := <-aggregator.suiteBeginnings:
|
||||
aggregator.registerSuiteBeginning(configAndSuite)
|
||||
case setupSummary := <-aggregator.beforeSuites:
|
||||
aggregator.registerBeforeSuite(setupSummary)
|
||||
case setupSummary := <-aggregator.afterSuites:
|
||||
aggregator.registerAfterSuite(setupSummary)
|
||||
case specSummary := <-aggregator.specCompletions:
|
||||
aggregator.registerSpecCompletion(specSummary)
|
||||
case suite := <-aggregator.suiteEndings:
|
||||
finished, passed := aggregator.registerSuiteEnding(suite)
|
||||
if finished {
|
||||
aggregator.result <- passed
|
||||
break loop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) {
|
||||
aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite)
|
||||
|
||||
if len(aggregator.aggregatedSuiteBeginnings) == 1 {
|
||||
aggregator.startTime = time.Now()
|
||||
}
|
||||
|
||||
if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
|
||||
return
|
||||
}
|
||||
|
||||
aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct)
|
||||
|
||||
numberOfSpecsToRun := 0
|
||||
totalNumberOfSpecs := 0
|
||||
for _, configAndSuite := range aggregator.aggregatedSuiteBeginnings {
|
||||
numberOfSpecsToRun += configAndSuite.summary.NumberOfSpecsThatWillBeRun
|
||||
totalNumberOfSpecs += configAndSuite.summary.NumberOfTotalSpecs
|
||||
}
|
||||
|
||||
aggregator.stenographer.AnnounceNumberOfSpecs(numberOfSpecsToRun, totalNumberOfSpecs, aggregator.config.Succinct)
|
||||
aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct)
|
||||
aggregator.flushCompletedSpecs()
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) {
|
||||
aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary)
|
||||
aggregator.flushCompletedSpecs()
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) {
|
||||
aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary)
|
||||
aggregator.flushCompletedSpecs()
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) {
|
||||
aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary)
|
||||
aggregator.specs = append(aggregator.specs, specSummary)
|
||||
aggregator.flushCompletedSpecs()
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) flushCompletedSpecs() {
|
||||
if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
|
||||
return
|
||||
}
|
||||
|
||||
for _, setupSummary := range aggregator.aggregatedBeforeSuites {
|
||||
aggregator.announceBeforeSuite(setupSummary)
|
||||
}
|
||||
|
||||
for _, specSummary := range aggregator.completedSpecs {
|
||||
aggregator.announceSpec(specSummary)
|
||||
}
|
||||
|
||||
for _, setupSummary := range aggregator.aggregatedAfterSuites {
|
||||
aggregator.announceAfterSuite(setupSummary)
|
||||
}
|
||||
|
||||
aggregator.aggregatedBeforeSuites = []*types.SetupSummary{}
|
||||
aggregator.completedSpecs = []*types.SpecSummary{}
|
||||
aggregator.aggregatedAfterSuites = []*types.SetupSummary{}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) {
|
||||
aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) {
|
||||
aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) {
|
||||
if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
|
||||
aggregator.stenographer.AnnounceSpecWillRun(specSummary)
|
||||
}
|
||||
|
||||
aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
|
||||
|
||||
switch specSummary.State {
|
||||
case types.SpecStatePassed:
|
||||
if specSummary.IsMeasurement {
|
||||
aggregator.stenographer.AnnounceSuccesfulMeasurement(specSummary, aggregator.config.Succinct)
|
||||
} else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold {
|
||||
aggregator.stenographer.AnnounceSuccesfulSlowSpec(specSummary, aggregator.config.Succinct)
|
||||
} else {
|
||||
aggregator.stenographer.AnnounceSuccesfulSpec(specSummary)
|
||||
}
|
||||
|
||||
case types.SpecStatePending:
|
||||
aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct)
|
||||
case types.SpecStateSkipped:
|
||||
aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
case types.SpecStateTimedOut:
|
||||
aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
case types.SpecStatePanicked:
|
||||
aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
case types.SpecStateFailed:
|
||||
aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) {
|
||||
aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite)
|
||||
if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount {
|
||||
return false, false
|
||||
}
|
||||
|
||||
aggregatedSuiteSummary := &types.SuiteSummary{}
|
||||
aggregatedSuiteSummary.SuiteSucceeded = true
|
||||
|
||||
for _, suiteSummary := range aggregator.aggregatedSuiteEndings {
|
||||
if suiteSummary.SuiteSucceeded == false {
|
||||
aggregatedSuiteSummary.SuiteSucceeded = false
|
||||
}
|
||||
|
||||
aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun
|
||||
aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs
|
||||
aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs
|
||||
aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs
|
||||
aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs
|
||||
aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs
|
||||
}
|
||||
|
||||
aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime)
|
||||
|
||||
aggregator.stenographer.SummarizeFailures(aggregator.specs)
|
||||
aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct)
|
||||
|
||||
return true, aggregatedSuiteSummary.SuiteSucceeded
|
||||
}
|
90
vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
generated
vendored
Normal file
90
vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
|||
package remote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
//An interface to net/http's client to allow the injection of fakes under test
|
||||
type Poster interface {
|
||||
Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error)
|
||||
}
|
||||
|
||||
/*
|
||||
The ForwardingReporter is a Ginkgo reporter that forwards information to
|
||||
a Ginkgo remote server.
|
||||
|
||||
When streaming parallel test output, this repoter is automatically installed by Ginkgo.
|
||||
|
||||
This is accomplished by passing in the GINKGO_REMOTE_REPORTING_SERVER environment variable to `go test`, the Ginkgo test runner
|
||||
detects this environment variable (which should contain the host of the server) and automatically installs a ForwardingReporter
|
||||
in place of Ginkgo's DefaultReporter.
|
||||
*/
|
||||
|
||||
type ForwardingReporter struct {
|
||||
serverHost string
|
||||
poster Poster
|
||||
outputInterceptor OutputInterceptor
|
||||
}
|
||||
|
||||
func NewForwardingReporter(serverHost string, poster Poster, outputInterceptor OutputInterceptor) *ForwardingReporter {
|
||||
return &ForwardingReporter{
|
||||
serverHost: serverHost,
|
||||
poster: poster,
|
||||
outputInterceptor: outputInterceptor,
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) post(path string, data interface{}) {
|
||||
encoded, _ := json.Marshal(data)
|
||||
buffer := bytes.NewBuffer(encoded)
|
||||
reporter.poster.Post(reporter.serverHost+path, "application/json", buffer)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
data := struct {
|
||||
Config config.GinkgoConfigType `json:"config"`
|
||||
Summary *types.SuiteSummary `json:"suite-summary"`
|
||||
}{
|
||||
conf,
|
||||
summary,
|
||||
}
|
||||
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
reporter.post("/SpecSuiteWillBegin", data)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
setupSummary.CapturedOutput = output
|
||||
reporter.post("/BeforeSuiteDidRun", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
reporter.post("/SpecWillRun", specSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
specSummary.CapturedOutput = output
|
||||
reporter.post("/SpecDidComplete", specSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
setupSummary.CapturedOutput = output
|
||||
reporter.post("/AfterSuiteDidRun", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
reporter.post("/SpecSuiteDidEnd", summary)
|
||||
}
|
10
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
generated
vendored
Normal file
10
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
package remote
|
||||
|
||||
/*
|
||||
The OutputInterceptor is used by the ForwardingReporter to
|
||||
intercept and capture all stdin and stderr output during a test run.
|
||||
*/
|
||||
type OutputInterceptor interface {
|
||||
StartInterceptingOutput() error
|
||||
StopInterceptingAndReturnOutput() (string, error)
|
||||
}
|
52
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
generated
vendored
Normal file
52
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
// +build freebsd openbsd netbsd dragonfly darwin linux
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func NewOutputInterceptor() OutputInterceptor {
|
||||
return &outputInterceptor{}
|
||||
}
|
||||
|
||||
type outputInterceptor struct {
|
||||
redirectFile *os.File
|
||||
intercepting bool
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StartInterceptingOutput() error {
|
||||
if interceptor.intercepting {
|
||||
return errors.New("Already intercepting output!")
|
||||
}
|
||||
interceptor.intercepting = true
|
||||
|
||||
var err error
|
||||
|
||||
interceptor.redirectFile, err = ioutil.TempFile("", "ginkgo-output")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
syscall.Dup2(int(interceptor.redirectFile.Fd()), 1)
|
||||
syscall.Dup2(int(interceptor.redirectFile.Fd()), 2)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
|
||||
if !interceptor.intercepting {
|
||||
return "", errors.New("Not intercepting output!")
|
||||
}
|
||||
|
||||
interceptor.redirectFile.Close()
|
||||
output, err := ioutil.ReadFile(interceptor.redirectFile.Name())
|
||||
os.Remove(interceptor.redirectFile.Name())
|
||||
|
||||
interceptor.intercepting = false
|
||||
|
||||
return string(output), err
|
||||
}
|
33
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
generated
vendored
Normal file
33
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
// +build windows
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
func NewOutputInterceptor() OutputInterceptor {
|
||||
return &outputInterceptor{}
|
||||
}
|
||||
|
||||
type outputInterceptor struct {
|
||||
intercepting bool
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StartInterceptingOutput() error {
|
||||
if interceptor.intercepting {
|
||||
return errors.New("Already intercepting output!")
|
||||
}
|
||||
interceptor.intercepting = true
|
||||
|
||||
// not working on windows...
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
|
||||
// not working on windows...
|
||||
interceptor.intercepting = false
|
||||
|
||||
return "", nil
|
||||
}
|
204
vendor/github.com/onsi/ginkgo/internal/remote/server.go
generated
vendored
Normal file
204
vendor/github.com/onsi/ginkgo/internal/remote/server.go
generated
vendored
Normal file
|
@ -0,0 +1,204 @@
|
|||
/*
|
||||
|
||||
The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
|
||||
This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
|
||||
|
||||
*/
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
)
|
||||
|
||||
/*
|
||||
Server spins up on an automatically selected port and listens for communication from the forwarding reporter.
|
||||
It then forwards that communication to attached reporters.
|
||||
*/
|
||||
type Server struct {
|
||||
listener net.Listener
|
||||
reporters []reporters.Reporter
|
||||
alives []func() bool
|
||||
lock *sync.Mutex
|
||||
beforeSuiteData types.RemoteBeforeSuiteData
|
||||
parallelTotal int
|
||||
}
|
||||
|
||||
//Create a new server, automatically selecting a port
|
||||
func NewServer(parallelTotal int) (*Server, error) {
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Server{
|
||||
listener: listener,
|
||||
lock: &sync.Mutex{},
|
||||
alives: make([]func() bool, parallelTotal),
|
||||
beforeSuiteData: types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending},
|
||||
parallelTotal: parallelTotal,
|
||||
}, nil
|
||||
}
|
||||
|
||||
//Start the server. You don't need to `go s.Start()`, just `s.Start()`
|
||||
func (server *Server) Start() {
|
||||
httpServer := &http.Server{}
|
||||
mux := http.NewServeMux()
|
||||
httpServer.Handler = mux
|
||||
|
||||
//streaming endpoints
|
||||
mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin)
|
||||
mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun)
|
||||
mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun)
|
||||
mux.HandleFunc("/SpecWillRun", server.specWillRun)
|
||||
mux.HandleFunc("/SpecDidComplete", server.specDidComplete)
|
||||
mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd)
|
||||
|
||||
//synchronization endpoints
|
||||
mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState)
|
||||
mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData)
|
||||
|
||||
go httpServer.Serve(server.listener)
|
||||
}
|
||||
|
||||
//Stop the server
|
||||
func (server *Server) Close() {
|
||||
server.listener.Close()
|
||||
}
|
||||
|
||||
//The address the server can be reached it. Pass this into the `ForwardingReporter`.
|
||||
func (server *Server) Address() string {
|
||||
return "http://" + server.listener.Addr().String()
|
||||
}
|
||||
|
||||
//
|
||||
// Streaming Endpoints
|
||||
//
|
||||
|
||||
//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
|
||||
func (server *Server) readAll(request *http.Request) []byte {
|
||||
defer request.Body.Close()
|
||||
body, _ := ioutil.ReadAll(request.Body)
|
||||
return body
|
||||
}
|
||||
|
||||
func (server *Server) RegisterReporters(reporters ...reporters.Reporter) {
|
||||
server.reporters = reporters
|
||||
}
|
||||
|
||||
func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
|
||||
var data struct {
|
||||
Config config.GinkgoConfigType `json:"config"`
|
||||
Summary *types.SuiteSummary `json:"suite-summary"`
|
||||
}
|
||||
|
||||
json.Unmarshal(body, &data)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.SpecSuiteWillBegin(data.Config, data.Summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var setupSummary *types.SetupSummary
|
||||
json.Unmarshal(body, &setupSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.BeforeSuiteDidRun(setupSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var setupSummary *types.SetupSummary
|
||||
json.Unmarshal(body, &setupSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.AfterSuiteDidRun(setupSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var specSummary *types.SpecSummary
|
||||
json.Unmarshal(body, &specSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.SpecWillRun(specSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var specSummary *types.SpecSummary
|
||||
json.Unmarshal(body, &specSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.SpecDidComplete(specSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var suiteSummary *types.SuiteSummary
|
||||
json.Unmarshal(body, &suiteSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.SpecSuiteDidEnd(suiteSummary)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Synchronization Endpoints
|
||||
//
|
||||
|
||||
func (server *Server) RegisterAlive(node int, alive func() bool) {
|
||||
server.lock.Lock()
|
||||
defer server.lock.Unlock()
|
||||
server.alives[node-1] = alive
|
||||
}
|
||||
|
||||
func (server *Server) nodeIsAlive(node int) bool {
|
||||
server.lock.Lock()
|
||||
defer server.lock.Unlock()
|
||||
alive := server.alives[node-1]
|
||||
if alive == nil {
|
||||
return true
|
||||
}
|
||||
return alive()
|
||||
}
|
||||
|
||||
func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
|
||||
if request.Method == "POST" {
|
||||
dec := json.NewDecoder(request.Body)
|
||||
dec.Decode(&(server.beforeSuiteData))
|
||||
} else {
|
||||
beforeSuiteData := server.beforeSuiteData
|
||||
if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) {
|
||||
beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared
|
||||
}
|
||||
enc := json.NewEncoder(writer)
|
||||
enc.Encode(beforeSuiteData)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) {
|
||||
afterSuiteData := types.RemoteAfterSuiteData{
|
||||
CanRun: true,
|
||||
}
|
||||
for i := 2; i <= server.parallelTotal; i++ {
|
||||
afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i)
|
||||
}
|
||||
|
||||
enc := json.NewEncoder(writer)
|
||||
enc.Encode(afterSuiteData)
|
||||
}
|
55
vendor/github.com/onsi/ginkgo/internal/spec/index_computer.go
generated
vendored
Normal file
55
vendor/github.com/onsi/ginkgo/internal/spec/index_computer.go
generated
vendored
Normal file
|
@ -0,0 +1,55 @@
|
|||
package spec
|
||||
|
||||
func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) {
|
||||
if length == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
// We have more nodes than tests. Trivial case.
|
||||
if parallelTotal >= length {
|
||||
if parallelNode > length {
|
||||
return 0, 0
|
||||
} else {
|
||||
return parallelNode - 1, 1
|
||||
}
|
||||
}
|
||||
|
||||
// This is the minimum amount of tests that a node will be required to run
|
||||
minTestsPerNode := length / parallelTotal
|
||||
|
||||
// This is the maximum amount of tests that a node will be required to run
|
||||
// The algorithm guarantees that this would be equal to at least the minimum amount
|
||||
// and at most one more
|
||||
maxTestsPerNode := minTestsPerNode
|
||||
if length%parallelTotal != 0 {
|
||||
maxTestsPerNode++
|
||||
}
|
||||
|
||||
// Number of nodes that will have to run the maximum amount of tests per node
|
||||
numMaxLoadNodes := length % parallelTotal
|
||||
|
||||
// Number of nodes that precede the current node and will have to run the maximum amount of tests per node
|
||||
var numPrecedingMaxLoadNodes int
|
||||
if parallelNode > numMaxLoadNodes {
|
||||
numPrecedingMaxLoadNodes = numMaxLoadNodes
|
||||
} else {
|
||||
numPrecedingMaxLoadNodes = parallelNode - 1
|
||||
}
|
||||
|
||||
// Number of nodes that precede the current node and will have to run the minimum amount of tests per node
|
||||
var numPrecedingMinLoadNodes int
|
||||
if parallelNode <= numMaxLoadNodes {
|
||||
numPrecedingMinLoadNodes = 0
|
||||
} else {
|
||||
numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1
|
||||
}
|
||||
|
||||
// Evaluate the test start index and number of tests to run
|
||||
startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode
|
||||
if parallelNode > numMaxLoadNodes {
|
||||
count = minTestsPerNode
|
||||
} else {
|
||||
count = maxTestsPerNode
|
||||
}
|
||||
return
|
||||
}
|
197
vendor/github.com/onsi/ginkgo/internal/spec/spec.go
generated
vendored
Normal file
197
vendor/github.com/onsi/ginkgo/internal/spec/spec.go
generated
vendored
Normal file
|
@ -0,0 +1,197 @@
|
|||
package spec
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/containernode"
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type Spec struct {
|
||||
subject leafnodes.SubjectNode
|
||||
focused bool
|
||||
announceProgress bool
|
||||
|
||||
containers []*containernode.ContainerNode
|
||||
|
||||
state types.SpecState
|
||||
runTime time.Duration
|
||||
failure types.SpecFailure
|
||||
}
|
||||
|
||||
func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec {
|
||||
spec := &Spec{
|
||||
subject: subject,
|
||||
containers: containers,
|
||||
focused: subject.Flag() == types.FlagTypeFocused,
|
||||
announceProgress: announceProgress,
|
||||
}
|
||||
|
||||
spec.processFlag(subject.Flag())
|
||||
for i := len(containers) - 1; i >= 0; i-- {
|
||||
spec.processFlag(containers[i].Flag())
|
||||
}
|
||||
|
||||
return spec
|
||||
}
|
||||
|
||||
func (spec *Spec) processFlag(flag types.FlagType) {
|
||||
if flag == types.FlagTypeFocused {
|
||||
spec.focused = true
|
||||
} else if flag == types.FlagTypePending {
|
||||
spec.state = types.SpecStatePending
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) Skip() {
|
||||
spec.state = types.SpecStateSkipped
|
||||
}
|
||||
|
||||
func (spec *Spec) Failed() bool {
|
||||
return spec.state == types.SpecStateFailed || spec.state == types.SpecStatePanicked || spec.state == types.SpecStateTimedOut
|
||||
}
|
||||
|
||||
func (spec *Spec) Passed() bool {
|
||||
return spec.state == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (spec *Spec) Pending() bool {
|
||||
return spec.state == types.SpecStatePending
|
||||
}
|
||||
|
||||
func (spec *Spec) Skipped() bool {
|
||||
return spec.state == types.SpecStateSkipped
|
||||
}
|
||||
|
||||
func (spec *Spec) Focused() bool {
|
||||
return spec.focused
|
||||
}
|
||||
|
||||
func (spec *Spec) IsMeasurement() bool {
|
||||
return spec.subject.Type() == types.SpecComponentTypeMeasure
|
||||
}
|
||||
|
||||
func (spec *Spec) Summary(suiteID string) *types.SpecSummary {
|
||||
componentTexts := make([]string, len(spec.containers)+1)
|
||||
componentCodeLocations := make([]types.CodeLocation, len(spec.containers)+1)
|
||||
|
||||
for i, container := range spec.containers {
|
||||
componentTexts[i] = container.Text()
|
||||
componentCodeLocations[i] = container.CodeLocation()
|
||||
}
|
||||
|
||||
componentTexts[len(spec.containers)] = spec.subject.Text()
|
||||
componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation()
|
||||
|
||||
return &types.SpecSummary{
|
||||
IsMeasurement: spec.IsMeasurement(),
|
||||
NumberOfSamples: spec.subject.Samples(),
|
||||
ComponentTexts: componentTexts,
|
||||
ComponentCodeLocations: componentCodeLocations,
|
||||
State: spec.state,
|
||||
RunTime: spec.runTime,
|
||||
Failure: spec.failure,
|
||||
Measurements: spec.measurementsReport(),
|
||||
SuiteID: suiteID,
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) ConcatenatedString() string {
|
||||
s := ""
|
||||
for _, container := range spec.containers {
|
||||
s += container.Text() + " "
|
||||
}
|
||||
|
||||
return s + spec.subject.Text()
|
||||
}
|
||||
|
||||
func (spec *Spec) Run(writer io.Writer) {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
spec.runTime = time.Since(startTime)
|
||||
}()
|
||||
|
||||
for sample := 0; sample < spec.subject.Samples(); sample++ {
|
||||
spec.runSample(sample, writer)
|
||||
|
||||
if spec.state != types.SpecStatePassed {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) runSample(sample int, writer io.Writer) {
|
||||
spec.state = types.SpecStatePassed
|
||||
spec.failure = types.SpecFailure{}
|
||||
innerMostContainerIndexToUnwind := -1
|
||||
|
||||
defer func() {
|
||||
for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
|
||||
container := spec.containers[i]
|
||||
for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) {
|
||||
spec.announceSetupNode(writer, "AfterEach", container, afterEach)
|
||||
afterEachState, afterEachFailure := afterEach.Run()
|
||||
if afterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed {
|
||||
spec.state = afterEachState
|
||||
spec.failure = afterEachFailure
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for i, container := range spec.containers {
|
||||
innerMostContainerIndexToUnwind = i
|
||||
for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) {
|
||||
spec.announceSetupNode(writer, "BeforeEach", container, beforeEach)
|
||||
spec.state, spec.failure = beforeEach.Run()
|
||||
if spec.state != types.SpecStatePassed {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, container := range spec.containers {
|
||||
for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) {
|
||||
spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach)
|
||||
spec.state, spec.failure = justBeforeEach.Run()
|
||||
if spec.state != types.SpecStatePassed {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
spec.announceSubject(writer, spec.subject)
|
||||
spec.state, spec.failure = spec.subject.Run()
|
||||
}
|
||||
|
||||
func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) {
|
||||
if spec.announceProgress {
|
||||
s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, container.Text(), setupNode.CodeLocation().String())
|
||||
writer.Write([]byte(s))
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) announceSubject(writer io.Writer, subject leafnodes.SubjectNode) {
|
||||
if spec.announceProgress {
|
||||
nodeType := ""
|
||||
switch subject.Type() {
|
||||
case types.SpecComponentTypeIt:
|
||||
nodeType = "It"
|
||||
case types.SpecComponentTypeMeasure:
|
||||
nodeType = "Measure"
|
||||
}
|
||||
s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, subject.Text(), subject.CodeLocation().String())
|
||||
writer.Write([]byte(s))
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) measurementsReport() map[string]*types.SpecMeasurement {
|
||||
if !spec.IsMeasurement() || spec.Failed() {
|
||||
return map[string]*types.SpecMeasurement{}
|
||||
}
|
||||
|
||||
return spec.subject.(*leafnodes.MeasureNode).MeasurementsReport()
|
||||
}
|
122
vendor/github.com/onsi/ginkgo/internal/spec/specs.go
generated
vendored
Normal file
122
vendor/github.com/onsi/ginkgo/internal/spec/specs.go
generated
vendored
Normal file
|
@ -0,0 +1,122 @@
|
|||
package spec
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"regexp"
|
||||
"sort"
|
||||
)
|
||||
|
||||
type Specs struct {
|
||||
specs []*Spec
|
||||
numberOfOriginalSpecs int
|
||||
hasProgrammaticFocus bool
|
||||
}
|
||||
|
||||
func NewSpecs(specs []*Spec) *Specs {
|
||||
return &Specs{
|
||||
specs: specs,
|
||||
numberOfOriginalSpecs: len(specs),
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) Specs() []*Spec {
|
||||
return e.specs
|
||||
}
|
||||
|
||||
func (e *Specs) NumberOfOriginalSpecs() int {
|
||||
return e.numberOfOriginalSpecs
|
||||
}
|
||||
|
||||
func (e *Specs) HasProgrammaticFocus() bool {
|
||||
return e.hasProgrammaticFocus
|
||||
}
|
||||
|
||||
func (e *Specs) Shuffle(r *rand.Rand) {
|
||||
sort.Sort(e)
|
||||
permutation := r.Perm(len(e.specs))
|
||||
shuffledSpecs := make([]*Spec, len(e.specs))
|
||||
for i, j := range permutation {
|
||||
shuffledSpecs[i] = e.specs[j]
|
||||
}
|
||||
e.specs = shuffledSpecs
|
||||
}
|
||||
|
||||
func (e *Specs) ApplyFocus(description string, focusString string, skipString string) {
|
||||
if focusString == "" && skipString == "" {
|
||||
e.applyProgrammaticFocus()
|
||||
} else {
|
||||
e.applyRegExpFocus(description, focusString, skipString)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) applyProgrammaticFocus() {
|
||||
e.hasProgrammaticFocus = false
|
||||
for _, spec := range e.specs {
|
||||
if spec.Focused() && !spec.Pending() {
|
||||
e.hasProgrammaticFocus = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if e.hasProgrammaticFocus {
|
||||
for _, spec := range e.specs {
|
||||
if !spec.Focused() {
|
||||
spec.Skip()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) applyRegExpFocus(description string, focusString string, skipString string) {
|
||||
for _, spec := range e.specs {
|
||||
matchesFocus := true
|
||||
matchesSkip := false
|
||||
|
||||
toMatch := []byte(description + " " + spec.ConcatenatedString())
|
||||
|
||||
if focusString != "" {
|
||||
focusFilter := regexp.MustCompile(focusString)
|
||||
matchesFocus = focusFilter.Match([]byte(toMatch))
|
||||
}
|
||||
|
||||
if skipString != "" {
|
||||
skipFilter := regexp.MustCompile(skipString)
|
||||
matchesSkip = skipFilter.Match([]byte(toMatch))
|
||||
}
|
||||
|
||||
if !matchesFocus || matchesSkip {
|
||||
spec.Skip()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) SkipMeasurements() {
|
||||
for _, spec := range e.specs {
|
||||
if spec.IsMeasurement() {
|
||||
spec.Skip()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) TrimForParallelization(total int, node int) {
|
||||
startIndex, count := ParallelizedIndexRange(len(e.specs), total, node)
|
||||
if count == 0 {
|
||||
e.specs = make([]*Spec, 0)
|
||||
} else {
|
||||
e.specs = e.specs[startIndex : startIndex+count]
|
||||
}
|
||||
}
|
||||
|
||||
//sort.Interface
|
||||
|
||||
func (e *Specs) Len() int {
|
||||
return len(e.specs)
|
||||
}
|
||||
|
||||
func (e *Specs) Less(i, j int) bool {
|
||||
return e.specs[i].ConcatenatedString() < e.specs[j].ConcatenatedString()
|
||||
}
|
||||
|
||||
func (e *Specs) Swap(i, j int) {
|
||||
e.specs[i], e.specs[j] = e.specs[j], e.specs[i]
|
||||
}
|
15
vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go
generated
vendored
Normal file
15
vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
package specrunner
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func randomID() string {
|
||||
b := make([]byte, 8)
|
||||
_, err := rand.Read(b)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%x-%x-%x-%x", b[0:2], b[2:4], b[4:6], b[6:8])
|
||||
}
|
324
vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
generated
vendored
Normal file
324
vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
generated
vendored
Normal file
|
@ -0,0 +1,324 @@
|
|||
package specrunner
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
Writer "github.com/onsi/ginkgo/internal/writer"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
|
||||
"time"
|
||||
)
|
||||
|
||||
type SpecRunner struct {
|
||||
description string
|
||||
beforeSuiteNode leafnodes.SuiteNode
|
||||
specs *spec.Specs
|
||||
afterSuiteNode leafnodes.SuiteNode
|
||||
reporters []reporters.Reporter
|
||||
startTime time.Time
|
||||
suiteID string
|
||||
runningSpec *spec.Spec
|
||||
writer Writer.WriterInterface
|
||||
config config.GinkgoConfigType
|
||||
interrupted bool
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
func New(description string, beforeSuiteNode leafnodes.SuiteNode, specs *spec.Specs, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner {
|
||||
return &SpecRunner{
|
||||
description: description,
|
||||
beforeSuiteNode: beforeSuiteNode,
|
||||
specs: specs,
|
||||
afterSuiteNode: afterSuiteNode,
|
||||
reporters: reporters,
|
||||
writer: writer,
|
||||
config: config,
|
||||
suiteID: randomID(),
|
||||
lock: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) Run() bool {
|
||||
if runner.config.DryRun {
|
||||
runner.performDryRun()
|
||||
return true
|
||||
}
|
||||
|
||||
runner.reportSuiteWillBegin()
|
||||
go runner.registerForInterrupts()
|
||||
|
||||
suitePassed := runner.runBeforeSuite()
|
||||
|
||||
if suitePassed {
|
||||
suitePassed = runner.runSpecs()
|
||||
}
|
||||
|
||||
runner.blockForeverIfInterrupted()
|
||||
|
||||
suitePassed = runner.runAfterSuite() && suitePassed
|
||||
|
||||
runner.reportSuiteDidEnd(suitePassed)
|
||||
|
||||
return suitePassed
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) performDryRun() {
|
||||
runner.reportSuiteWillBegin()
|
||||
|
||||
if runner.beforeSuiteNode != nil {
|
||||
summary := runner.beforeSuiteNode.Summary()
|
||||
summary.State = types.SpecStatePassed
|
||||
runner.reportBeforeSuite(summary)
|
||||
}
|
||||
|
||||
for _, spec := range runner.specs.Specs() {
|
||||
summary := spec.Summary(runner.suiteID)
|
||||
runner.reportSpecWillRun(summary)
|
||||
if summary.State == types.SpecStateInvalid {
|
||||
summary.State = types.SpecStatePassed
|
||||
}
|
||||
runner.reportSpecDidComplete(summary, false)
|
||||
}
|
||||
|
||||
if runner.afterSuiteNode != nil {
|
||||
summary := runner.afterSuiteNode.Summary()
|
||||
summary.State = types.SpecStatePassed
|
||||
runner.reportAfterSuite(summary)
|
||||
}
|
||||
|
||||
runner.reportSuiteDidEnd(true)
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) runBeforeSuite() bool {
|
||||
if runner.beforeSuiteNode == nil || runner.wasInterrupted() {
|
||||
return true
|
||||
}
|
||||
|
||||
runner.writer.Truncate()
|
||||
conf := runner.config
|
||||
passed := runner.beforeSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
|
||||
if !passed {
|
||||
runner.writer.DumpOut()
|
||||
}
|
||||
runner.reportBeforeSuite(runner.beforeSuiteNode.Summary())
|
||||
return passed
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) runAfterSuite() bool {
|
||||
if runner.afterSuiteNode == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
runner.writer.Truncate()
|
||||
conf := runner.config
|
||||
passed := runner.afterSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
|
||||
if !passed {
|
||||
runner.writer.DumpOut()
|
||||
}
|
||||
runner.reportAfterSuite(runner.afterSuiteNode.Summary())
|
||||
return passed
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) runSpecs() bool {
|
||||
suiteFailed := false
|
||||
skipRemainingSpecs := false
|
||||
for _, spec := range runner.specs.Specs() {
|
||||
if runner.wasInterrupted() {
|
||||
return suiteFailed
|
||||
}
|
||||
if skipRemainingSpecs {
|
||||
spec.Skip()
|
||||
}
|
||||
runner.reportSpecWillRun(spec.Summary(runner.suiteID))
|
||||
|
||||
if !spec.Skipped() && !spec.Pending() {
|
||||
runner.runningSpec = spec
|
||||
spec.Run(runner.writer)
|
||||
runner.runningSpec = nil
|
||||
if spec.Failed() {
|
||||
suiteFailed = true
|
||||
}
|
||||
} else if spec.Pending() && runner.config.FailOnPending {
|
||||
suiteFailed = true
|
||||
}
|
||||
|
||||
runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
|
||||
|
||||
if spec.Failed() && runner.config.FailFast {
|
||||
skipRemainingSpecs = true
|
||||
}
|
||||
}
|
||||
|
||||
return !suiteFailed
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) {
|
||||
if runner.runningSpec == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return runner.runningSpec.Summary(runner.suiteID), true
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) registerForInterrupts() {
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
<-c
|
||||
signal.Stop(c)
|
||||
runner.markInterrupted()
|
||||
go runner.registerForHardInterrupts()
|
||||
runner.writer.DumpOutWithHeader(`
|
||||
Received interrupt. Emitting contents of GinkgoWriter...
|
||||
---------------------------------------------------------
|
||||
`)
|
||||
if runner.afterSuiteNode != nil {
|
||||
fmt.Fprint(os.Stderr, `
|
||||
---------------------------------------------------------
|
||||
Received interrupt. Running AfterSuite...
|
||||
^C again to terminate immediately
|
||||
`)
|
||||
runner.runAfterSuite()
|
||||
}
|
||||
runner.reportSuiteDidEnd(false)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) registerForHardInterrupts() {
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
<-c
|
||||
fmt.Fprintln(os.Stderr, "\nReceived second interrupt. Shutting down.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) blockForeverIfInterrupted() {
|
||||
runner.lock.Lock()
|
||||
interrupted := runner.interrupted
|
||||
runner.lock.Unlock()
|
||||
|
||||
if interrupted {
|
||||
select {}
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) markInterrupted() {
|
||||
runner.lock.Lock()
|
||||
defer runner.lock.Unlock()
|
||||
runner.interrupted = true
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) wasInterrupted() bool {
|
||||
runner.lock.Lock()
|
||||
defer runner.lock.Unlock()
|
||||
return runner.interrupted
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportSuiteWillBegin() {
|
||||
runner.startTime = time.Now()
|
||||
summary := runner.summary(true)
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.SpecSuiteWillBegin(runner.config, summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportBeforeSuite(summary *types.SetupSummary) {
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.BeforeSuiteDidRun(summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportAfterSuite(summary *types.SetupSummary) {
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.AfterSuiteDidRun(summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) {
|
||||
runner.writer.Truncate()
|
||||
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.SpecWillRun(summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) {
|
||||
for i := len(runner.reporters) - 1; i >= 1; i-- {
|
||||
runner.reporters[i].SpecDidComplete(summary)
|
||||
}
|
||||
|
||||
if failed {
|
||||
runner.writer.DumpOut()
|
||||
}
|
||||
|
||||
runner.reporters[0].SpecDidComplete(summary)
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportSuiteDidEnd(success bool) {
|
||||
summary := runner.summary(success)
|
||||
summary.RunTime = time.Since(runner.startTime)
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.SpecSuiteDidEnd(summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) countSpecsSatisfying(filter func(ex *spec.Spec) bool) (count int) {
|
||||
count = 0
|
||||
|
||||
for _, spec := range runner.specs.Specs() {
|
||||
if filter(spec) {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) summary(success bool) *types.SuiteSummary {
|
||||
numberOfSpecsThatWillBeRun := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||
return !ex.Skipped() && !ex.Pending()
|
||||
})
|
||||
|
||||
numberOfPendingSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Pending()
|
||||
})
|
||||
|
||||
numberOfSkippedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Skipped()
|
||||
})
|
||||
|
||||
numberOfPassedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Passed()
|
||||
})
|
||||
|
||||
numberOfFailedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Failed()
|
||||
})
|
||||
|
||||
if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun {
|
||||
numberOfFailedSpecs = numberOfSpecsThatWillBeRun
|
||||
}
|
||||
|
||||
return &types.SuiteSummary{
|
||||
SuiteDescription: runner.description,
|
||||
SuiteSucceeded: success,
|
||||
SuiteID: runner.suiteID,
|
||||
|
||||
NumberOfSpecsBeforeParallelization: runner.specs.NumberOfOriginalSpecs(),
|
||||
NumberOfTotalSpecs: len(runner.specs.Specs()),
|
||||
NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun,
|
||||
NumberOfPendingSpecs: numberOfPendingSpecs,
|
||||
NumberOfSkippedSpecs: numberOfSkippedSpecs,
|
||||
NumberOfPassedSpecs: numberOfPassedSpecs,
|
||||
NumberOfFailedSpecs: numberOfFailedSpecs,
|
||||
}
|
||||
}
|
171
vendor/github.com/onsi/ginkgo/internal/suite/suite.go
generated
vendored
Normal file
171
vendor/github.com/onsi/ginkgo/internal/suite/suite.go
generated
vendored
Normal file
|
@ -0,0 +1,171 @@
|
|||
package suite
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/internal/containernode"
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
"github.com/onsi/ginkgo/internal/specrunner"
|
||||
"github.com/onsi/ginkgo/internal/writer"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type ginkgoTestingT interface {
|
||||
Fail()
|
||||
}
|
||||
|
||||
type Suite struct {
|
||||
topLevelContainer *containernode.ContainerNode
|
||||
currentContainer *containernode.ContainerNode
|
||||
containerIndex int
|
||||
beforeSuiteNode leafnodes.SuiteNode
|
||||
afterSuiteNode leafnodes.SuiteNode
|
||||
runner *specrunner.SpecRunner
|
||||
failer *failer.Failer
|
||||
running bool
|
||||
}
|
||||
|
||||
func New(failer *failer.Failer) *Suite {
|
||||
topLevelContainer := containernode.New("[Top Level]", types.FlagTypeNone, types.CodeLocation{})
|
||||
|
||||
return &Suite{
|
||||
topLevelContainer: topLevelContainer,
|
||||
currentContainer: topLevelContainer,
|
||||
failer: failer,
|
||||
containerIndex: 1,
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []reporters.Reporter, writer writer.WriterInterface, config config.GinkgoConfigType) (bool, bool) {
|
||||
if config.ParallelTotal < 1 {
|
||||
panic("ginkgo.parallel.total must be >= 1")
|
||||
}
|
||||
|
||||
if config.ParallelNode > config.ParallelTotal || config.ParallelNode < 1 {
|
||||
panic("ginkgo.parallel.node is one-indexed and must be <= ginkgo.parallel.total")
|
||||
}
|
||||
|
||||
r := rand.New(rand.NewSource(config.RandomSeed))
|
||||
suite.topLevelContainer.Shuffle(r)
|
||||
specs := suite.generateSpecs(description, config)
|
||||
suite.runner = specrunner.New(description, suite.beforeSuiteNode, specs, suite.afterSuiteNode, reporters, writer, config)
|
||||
|
||||
suite.running = true
|
||||
success := suite.runner.Run()
|
||||
if !success {
|
||||
t.Fail()
|
||||
}
|
||||
return success, specs.HasProgrammaticFocus()
|
||||
}
|
||||
|
||||
func (suite *Suite) generateSpecs(description string, config config.GinkgoConfigType) *spec.Specs {
|
||||
specsSlice := []*spec.Spec{}
|
||||
suite.topLevelContainer.BackPropagateProgrammaticFocus()
|
||||
for _, collatedNodes := range suite.topLevelContainer.Collate() {
|
||||
specsSlice = append(specsSlice, spec.New(collatedNodes.Subject, collatedNodes.Containers, config.EmitSpecProgress))
|
||||
}
|
||||
|
||||
specs := spec.NewSpecs(specsSlice)
|
||||
|
||||
if config.RandomizeAllSpecs {
|
||||
specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed)))
|
||||
}
|
||||
|
||||
specs.ApplyFocus(description, config.FocusString, config.SkipString)
|
||||
|
||||
if config.SkipMeasurements {
|
||||
specs.SkipMeasurements()
|
||||
}
|
||||
|
||||
if config.ParallelTotal > 1 {
|
||||
specs.TrimForParallelization(config.ParallelTotal, config.ParallelNode)
|
||||
}
|
||||
|
||||
return specs
|
||||
}
|
||||
|
||||
func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) {
|
||||
return suite.runner.CurrentSpecSummary()
|
||||
}
|
||||
|
||||
func (suite *Suite) SetBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.beforeSuiteNode != nil {
|
||||
panic("You may only call BeforeSuite once!")
|
||||
}
|
||||
suite.beforeSuiteNode = leafnodes.NewBeforeSuiteNode(body, codeLocation, timeout, suite.failer)
|
||||
}
|
||||
|
||||
func (suite *Suite) SetAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.afterSuiteNode != nil {
|
||||
panic("You may only call AfterSuite once!")
|
||||
}
|
||||
suite.afterSuiteNode = leafnodes.NewAfterSuiteNode(body, codeLocation, timeout, suite.failer)
|
||||
}
|
||||
|
||||
func (suite *Suite) SetSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.beforeSuiteNode != nil {
|
||||
panic("You may only call BeforeSuite once!")
|
||||
}
|
||||
suite.beforeSuiteNode = leafnodes.NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
|
||||
}
|
||||
|
||||
func (suite *Suite) SetSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.afterSuiteNode != nil {
|
||||
panic("You may only call AfterSuite once!")
|
||||
}
|
||||
suite.afterSuiteNode = leafnodes.NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
|
||||
}
|
||||
|
||||
func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagType, codeLocation types.CodeLocation) {
|
||||
container := containernode.New(text, flag, codeLocation)
|
||||
suite.currentContainer.PushContainerNode(container)
|
||||
|
||||
previousContainer := suite.currentContainer
|
||||
suite.currentContainer = container
|
||||
suite.containerIndex++
|
||||
|
||||
body()
|
||||
|
||||
suite.containerIndex--
|
||||
suite.currentContainer = previousContainer
|
||||
}
|
||||
|
||||
func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call It from within a Describe or Context", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call Measure from within a Describe or Context", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call BeforeEach from within a Describe or Context", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call JustBeforeEach from within a Describe or Context", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call AfterEach from within a Describe or Context", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
76
vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
generated
vendored
Normal file
76
vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
generated
vendored
Normal file
|
@ -0,0 +1,76 @@
|
|||
package testingtproxy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
type failFunc func(message string, callerSkip ...int)
|
||||
|
||||
func New(writer io.Writer, fail failFunc, offset int) *ginkgoTestingTProxy {
|
||||
return &ginkgoTestingTProxy{
|
||||
fail: fail,
|
||||
offset: offset,
|
||||
writer: writer,
|
||||
}
|
||||
}
|
||||
|
||||
type ginkgoTestingTProxy struct {
|
||||
fail failFunc
|
||||
offset int
|
||||
writer io.Writer
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
|
||||
t.fail(fmt.Sprintln(args...), t.offset)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) {
|
||||
t.fail(fmt.Sprintf(format, args...), t.offset)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Fail() {
|
||||
t.fail("failed", t.offset)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) FailNow() {
|
||||
t.fail("failed", t.offset)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) {
|
||||
t.fail(fmt.Sprintln(args...), t.offset)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
|
||||
t.fail(fmt.Sprintf(format, args...), t.offset)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
|
||||
fmt.Fprintln(t.writer, args...)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) {
|
||||
fmt.Fprintf(t.writer, format, args...)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Failed() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Parallel() {
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Skip(args ...interface{}) {
|
||||
fmt.Println(args...)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) {
|
||||
fmt.Printf(format, args...)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) SkipNow() {
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Skipped() bool {
|
||||
return false
|
||||
}
|
31
vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go
generated
vendored
Normal file
31
vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
package writer
|
||||
|
||||
type FakeGinkgoWriter struct {
|
||||
EventStream []string
|
||||
}
|
||||
|
||||
func NewFake() *FakeGinkgoWriter {
|
||||
return &FakeGinkgoWriter{
|
||||
EventStream: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) AddEvent(event string) {
|
||||
writer.EventStream = append(writer.EventStream, event)
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) Truncate() {
|
||||
writer.EventStream = append(writer.EventStream, "TRUNCATE")
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) DumpOut() {
|
||||
writer.EventStream = append(writer.EventStream, "DUMP")
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) DumpOutWithHeader(header string) {
|
||||
writer.EventStream = append(writer.EventStream, "DUMP_WITH_HEADER: "+header)
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) Write(data []byte) (n int, err error) {
|
||||
return 0, nil
|
||||
}
|
71
vendor/github.com/onsi/ginkgo/internal/writer/writer.go
generated
vendored
Normal file
71
vendor/github.com/onsi/ginkgo/internal/writer/writer.go
generated
vendored
Normal file
|
@ -0,0 +1,71 @@
|
|||
package writer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type WriterInterface interface {
|
||||
io.Writer
|
||||
|
||||
Truncate()
|
||||
DumpOut()
|
||||
DumpOutWithHeader(header string)
|
||||
}
|
||||
|
||||
type Writer struct {
|
||||
buffer *bytes.Buffer
|
||||
outWriter io.Writer
|
||||
lock *sync.Mutex
|
||||
stream bool
|
||||
}
|
||||
|
||||
func New(outWriter io.Writer) *Writer {
|
||||
return &Writer{
|
||||
buffer: &bytes.Buffer{},
|
||||
lock: &sync.Mutex{},
|
||||
outWriter: outWriter,
|
||||
stream: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) SetStream(stream bool) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
w.stream = stream
|
||||
}
|
||||
|
||||
func (w *Writer) Write(b []byte) (n int, err error) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
|
||||
if w.stream {
|
||||
return w.outWriter.Write(b)
|
||||
} else {
|
||||
return w.buffer.Write(b)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) Truncate() {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
w.buffer.Reset()
|
||||
}
|
||||
|
||||
func (w *Writer) DumpOut() {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
if !w.stream {
|
||||
w.buffer.WriteTo(w.outWriter)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) DumpOutWithHeader(header string) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
if !w.stream && w.buffer.Len() > 0 {
|
||||
w.outWriter.Write([]byte(header))
|
||||
w.buffer.WriteTo(w.outWriter)
|
||||
}
|
||||
}
|
83
vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
generated
vendored
Normal file
83
vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
Ginkgo's Default Reporter
|
||||
|
||||
A number of command line flags are available to tweak Ginkgo's default output.
|
||||
|
||||
These are documented [here](http://onsi.github.io/ginkgo/#running_tests)
|
||||
*/
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type DefaultReporter struct {
|
||||
config config.DefaultReporterConfigType
|
||||
stenographer stenographer.Stenographer
|
||||
specSummaries []*types.SpecSummary
|
||||
}
|
||||
|
||||
func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *DefaultReporter {
|
||||
return &DefaultReporter{
|
||||
config: config,
|
||||
stenographer: stenographer,
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct)
|
||||
if config.ParallelTotal > 1 {
|
||||
reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, summary.NumberOfTotalSpecs, summary.NumberOfSpecsBeforeParallelization, reporter.config.Succinct)
|
||||
}
|
||||
reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct)
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
reporter.stenographer.AnnounceBeforeSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
reporter.stenographer.AnnounceAfterSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
if reporter.config.Verbose && !reporter.config.Succinct && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
|
||||
reporter.stenographer.AnnounceSpecWillRun(specSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
switch specSummary.State {
|
||||
case types.SpecStatePassed:
|
||||
if specSummary.IsMeasurement {
|
||||
reporter.stenographer.AnnounceSuccesfulMeasurement(specSummary, reporter.config.Succinct)
|
||||
} else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold {
|
||||
reporter.stenographer.AnnounceSuccesfulSlowSpec(specSummary, reporter.config.Succinct)
|
||||
} else {
|
||||
reporter.stenographer.AnnounceSuccesfulSpec(specSummary)
|
||||
}
|
||||
case types.SpecStatePending:
|
||||
reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct)
|
||||
case types.SpecStateSkipped:
|
||||
reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
case types.SpecStateTimedOut:
|
||||
reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
case types.SpecStatePanicked:
|
||||
reporter.stenographer.AnnounceSpecPanicked(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
case types.SpecStateFailed:
|
||||
reporter.stenographer.AnnounceSpecFailed(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
}
|
||||
|
||||
reporter.specSummaries = append(reporter.specSummaries, specSummary)
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
reporter.stenographer.SummarizeFailures(reporter.specSummaries)
|
||||
reporter.stenographer.AnnounceSpecRunCompletion(summary, reporter.config.Succinct)
|
||||
}
|
59
vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go
generated
vendored
Normal file
59
vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go
generated
vendored
Normal file
|
@ -0,0 +1,59 @@
|
|||
package reporters
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
//FakeReporter is useful for testing purposes
|
||||
type FakeReporter struct {
|
||||
Config config.GinkgoConfigType
|
||||
|
||||
BeginSummary *types.SuiteSummary
|
||||
BeforeSuiteSummary *types.SetupSummary
|
||||
SpecWillRunSummaries []*types.SpecSummary
|
||||
SpecSummaries []*types.SpecSummary
|
||||
AfterSuiteSummary *types.SetupSummary
|
||||
EndSummary *types.SuiteSummary
|
||||
|
||||
SpecWillRunStub func(specSummary *types.SpecSummary)
|
||||
SpecDidCompleteStub func(specSummary *types.SpecSummary)
|
||||
}
|
||||
|
||||
func NewFakeReporter() *FakeReporter {
|
||||
return &FakeReporter{
|
||||
SpecWillRunSummaries: make([]*types.SpecSummary, 0),
|
||||
SpecSummaries: make([]*types.SpecSummary, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
fakeR.Config = config
|
||||
fakeR.BeginSummary = summary
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
fakeR.BeforeSuiteSummary = setupSummary
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
if fakeR.SpecWillRunStub != nil {
|
||||
fakeR.SpecWillRunStub(specSummary)
|
||||
}
|
||||
fakeR.SpecWillRunSummaries = append(fakeR.SpecWillRunSummaries, specSummary)
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
if fakeR.SpecDidCompleteStub != nil {
|
||||
fakeR.SpecDidCompleteStub(specSummary)
|
||||
}
|
||||
fakeR.SpecSummaries = append(fakeR.SpecSummaries, specSummary)
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
fakeR.AfterSuiteSummary = setupSummary
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
fakeR.EndSummary = summary
|
||||
}
|
139
vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
generated
vendored
Normal file
139
vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
generated
vendored
Normal file
|
@ -0,0 +1,139 @@
|
|||
/*
|
||||
|
||||
JUnit XML Reporter for Ginkgo
|
||||
|
||||
For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output
|
||||
|
||||
*/
|
||||
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type JUnitTestSuite struct {
|
||||
XMLName xml.Name `xml:"testsuite"`
|
||||
TestCases []JUnitTestCase `xml:"testcase"`
|
||||
Tests int `xml:"tests,attr"`
|
||||
Failures int `xml:"failures,attr"`
|
||||
Time float64 `xml:"time,attr"`
|
||||
}
|
||||
|
||||
type JUnitTestCase struct {
|
||||
Name string `xml:"name,attr"`
|
||||
ClassName string `xml:"classname,attr"`
|
||||
FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"`
|
||||
Skipped *JUnitSkipped `xml:"skipped,omitempty"`
|
||||
Time float64 `xml:"time,attr"`
|
||||
}
|
||||
|
||||
type JUnitFailureMessage struct {
|
||||
Type string `xml:"type,attr"`
|
||||
Message string `xml:",chardata"`
|
||||
}
|
||||
|
||||
type JUnitSkipped struct {
|
||||
XMLName xml.Name `xml:"skipped"`
|
||||
}
|
||||
|
||||
type JUnitReporter struct {
|
||||
suite JUnitTestSuite
|
||||
filename string
|
||||
testSuiteName string
|
||||
}
|
||||
|
||||
//NewJUnitReporter creates a new JUnit XML reporter. The XML will be stored in the passed in filename.
|
||||
func NewJUnitReporter(filename string) *JUnitReporter {
|
||||
return &JUnitReporter{
|
||||
filename: filename,
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
reporter.suite = JUnitTestSuite{
|
||||
Tests: summary.NumberOfSpecsThatWillBeRun,
|
||||
TestCases: []JUnitTestCase{},
|
||||
}
|
||||
reporter.testSuiteName = summary.SuiteDescription
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
reporter.handleSetupSummary("BeforeSuite", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
reporter.handleSetupSummary("AfterSuite", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
testCase := JUnitTestCase{
|
||||
Name: name,
|
||||
ClassName: reporter.testSuiteName,
|
||||
}
|
||||
|
||||
testCase.FailureMessage = &JUnitFailureMessage{
|
||||
Type: reporter.failureTypeForState(setupSummary.State),
|
||||
Message: fmt.Sprintf("%s\n%s", setupSummary.Failure.ComponentCodeLocation.String(), setupSummary.Failure.Message),
|
||||
}
|
||||
testCase.Time = setupSummary.RunTime.Seconds()
|
||||
reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
testCase := JUnitTestCase{
|
||||
Name: strings.Join(specSummary.ComponentTexts[1:], " "),
|
||||
ClassName: reporter.testSuiteName,
|
||||
}
|
||||
if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
|
||||
testCase.FailureMessage = &JUnitFailureMessage{
|
||||
Type: reporter.failureTypeForState(specSummary.State),
|
||||
Message: fmt.Sprintf("%s\n%s", specSummary.Failure.ComponentCodeLocation.String(), specSummary.Failure.Message),
|
||||
}
|
||||
}
|
||||
if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
|
||||
testCase.Skipped = &JUnitSkipped{}
|
||||
}
|
||||
testCase.Time = specSummary.RunTime.Seconds()
|
||||
reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
reporter.suite.Time = summary.RunTime.Seconds()
|
||||
reporter.suite.Failures = summary.NumberOfFailedSpecs
|
||||
file, err := os.Create(reporter.filename)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to create JUnit report file: %s\n\t%s", reporter.filename, err.Error())
|
||||
}
|
||||
defer file.Close()
|
||||
file.WriteString(xml.Header)
|
||||
encoder := xml.NewEncoder(file)
|
||||
encoder.Indent(" ", " ")
|
||||
err = encoder.Encode(reporter.suite)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to generate JUnit report\n\t%s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) failureTypeForState(state types.SpecState) string {
|
||||
switch state {
|
||||
case types.SpecStateFailed:
|
||||
return "Failure"
|
||||
case types.SpecStateTimedOut:
|
||||
return "Timeout"
|
||||
case types.SpecStatePanicked:
|
||||
return "Panic"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
15
vendor/github.com/onsi/ginkgo/reporters/reporter.go
generated
vendored
Normal file
15
vendor/github.com/onsi/ginkgo/reporters/reporter.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
package reporters
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type Reporter interface {
|
||||
SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary)
|
||||
BeforeSuiteDidRun(setupSummary *types.SetupSummary)
|
||||
SpecWillRun(specSummary *types.SpecSummary)
|
||||
SpecDidComplete(specSummary *types.SpecSummary)
|
||||
AfterSuiteDidRun(setupSummary *types.SetupSummary)
|
||||
SpecSuiteDidEnd(summary *types.SuiteSummary)
|
||||
}
|
64
vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
generated
vendored
Normal file
64
vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
|||
package stenographer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (s *consoleStenographer) colorize(colorCode string, format string, args ...interface{}) string {
|
||||
var out string
|
||||
|
||||
if len(args) > 0 {
|
||||
out = fmt.Sprintf(format, args...)
|
||||
} else {
|
||||
out = format
|
||||
}
|
||||
|
||||
if s.color {
|
||||
return fmt.Sprintf("%s%s%s", colorCode, out, defaultStyle)
|
||||
} else {
|
||||
return out
|
||||
}
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printBanner(text string, bannerCharacter string) {
|
||||
fmt.Println(text)
|
||||
fmt.Println(strings.Repeat(bannerCharacter, len(text)))
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printNewLine() {
|
||||
fmt.Println("")
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printDelimiter() {
|
||||
fmt.Println(s.colorize(grayColor, "%s", strings.Repeat("-", 30)))
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) print(indentation int, format string, args ...interface{}) {
|
||||
fmt.Print(s.indent(indentation, format, args...))
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) println(indentation int, format string, args ...interface{}) {
|
||||
fmt.Println(s.indent(indentation, format, args...))
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) indent(indentation int, format string, args ...interface{}) string {
|
||||
var text string
|
||||
|
||||
if len(args) > 0 {
|
||||
text = fmt.Sprintf(format, args...)
|
||||
} else {
|
||||
text = format
|
||||
}
|
||||
|
||||
stringArray := strings.Split(text, "\n")
|
||||
padding := ""
|
||||
if indentation >= 0 {
|
||||
padding = strings.Repeat(" ", indentation)
|
||||
}
|
||||
for i, s := range stringArray {
|
||||
stringArray[i] = fmt.Sprintf("%s%s", padding, s)
|
||||
}
|
||||
|
||||
return strings.Join(stringArray, "\n")
|
||||
}
|
138
vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
generated
vendored
Normal file
138
vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
generated
vendored
Normal file
|
@ -0,0 +1,138 @@
|
|||
package stenographer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
func NewFakeStenographerCall(method string, args ...interface{}) FakeStenographerCall {
|
||||
return FakeStenographerCall{
|
||||
Method: method,
|
||||
Args: args,
|
||||
}
|
||||
}
|
||||
|
||||
type FakeStenographer struct {
|
||||
calls []FakeStenographerCall
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
type FakeStenographerCall struct {
|
||||
Method string
|
||||
Args []interface{}
|
||||
}
|
||||
|
||||
func NewFakeStenographer() *FakeStenographer {
|
||||
stenographer := &FakeStenographer{
|
||||
lock: &sync.Mutex{},
|
||||
}
|
||||
stenographer.Reset()
|
||||
return stenographer
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) Calls() []FakeStenographerCall {
|
||||
stenographer.lock.Lock()
|
||||
defer stenographer.lock.Unlock()
|
||||
|
||||
return stenographer.calls
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) Reset() {
|
||||
stenographer.lock.Lock()
|
||||
defer stenographer.lock.Unlock()
|
||||
|
||||
stenographer.calls = make([]FakeStenographerCall, 0)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) CallsTo(method string) []FakeStenographerCall {
|
||||
stenographer.lock.Lock()
|
||||
defer stenographer.lock.Unlock()
|
||||
|
||||
results := make([]FakeStenographerCall, 0)
|
||||
for _, call := range stenographer.calls {
|
||||
if call.Method == method {
|
||||
results = append(results, call)
|
||||
}
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) registerCall(method string, args ...interface{}) {
|
||||
stenographer.lock.Lock()
|
||||
defer stenographer.lock.Unlock()
|
||||
|
||||
stenographer.calls = append(stenographer.calls, NewFakeStenographerCall(method, args...))
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
|
||||
stenographer.registerCall("AnnounceSuite", description, randomSeed, randomizingAll, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
|
||||
stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) {
|
||||
stenographer.registerCall("AnnounceParallelRun", node, nodes, specsToRun, totalSpecs, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
|
||||
stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
|
||||
stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
|
||||
stenographer.registerCall("AnnounceSpecWillRun", spec)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||
stenographer.registerCall("AnnounceBeforeSuiteFailure", summary, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||
stenographer.registerCall("AnnounceAfterSuiteFailure", summary, succinct, fullTrace)
|
||||
}
|
||||
func (stenographer *FakeStenographer) AnnounceCapturedOutput(output string) {
|
||||
stenographer.registerCall("AnnounceCapturedOutput", output)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) {
|
||||
stenographer.registerCall("AnnounceSuccesfulSpec", spec)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) {
|
||||
stenographer.registerCall("AnnounceSuccesfulSlowSpec", spec, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) {
|
||||
stenographer.registerCall("AnnounceSuccesfulMeasurement", spec, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
|
||||
stenographer.registerCall("AnnouncePendingSpec", spec, noisy)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
stenographer.registerCall("AnnounceSkippedSpec", spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
stenographer.registerCall("AnnounceSpecTimedOut", spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
stenographer.registerCall("AnnounceSpecPanicked", spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
stenographer.registerCall("AnnounceSpecFailed", spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
|
||||
stenographer.registerCall("SummarizeFailures", summaries)
|
||||
}
|
542
vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
generated
vendored
Normal file
542
vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
generated
vendored
Normal file
|
@ -0,0 +1,542 @@
|
|||
/*
|
||||
The stenographer is used by Ginkgo's reporters to generate output.
|
||||
|
||||
Move along, nothing to see here.
|
||||
*/
|
||||
|
||||
package stenographer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
const defaultStyle = "\x1b[0m"
|
||||
const boldStyle = "\x1b[1m"
|
||||
const redColor = "\x1b[91m"
|
||||
const greenColor = "\x1b[32m"
|
||||
const yellowColor = "\x1b[33m"
|
||||
const cyanColor = "\x1b[36m"
|
||||
const grayColor = "\x1b[90m"
|
||||
const lightGrayColor = "\x1b[37m"
|
||||
|
||||
type cursorStateType int
|
||||
|
||||
const (
|
||||
cursorStateTop cursorStateType = iota
|
||||
cursorStateStreaming
|
||||
cursorStateMidBlock
|
||||
cursorStateEndBlock
|
||||
)
|
||||
|
||||
type Stenographer interface {
|
||||
AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool)
|
||||
AnnounceAggregatedParallelRun(nodes int, succinct bool)
|
||||
AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool)
|
||||
AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool)
|
||||
AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool)
|
||||
|
||||
AnnounceSpecWillRun(spec *types.SpecSummary)
|
||||
AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
|
||||
AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
|
||||
|
||||
AnnounceCapturedOutput(output string)
|
||||
|
||||
AnnounceSuccesfulSpec(spec *types.SpecSummary)
|
||||
AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool)
|
||||
AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool)
|
||||
|
||||
AnnouncePendingSpec(spec *types.SpecSummary, noisy bool)
|
||||
AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
||||
|
||||
AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
||||
AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
||||
AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
||||
|
||||
SummarizeFailures(summaries []*types.SpecSummary)
|
||||
}
|
||||
|
||||
func New(color bool) Stenographer {
|
||||
return &consoleStenographer{
|
||||
color: color,
|
||||
cursorState: cursorStateTop,
|
||||
}
|
||||
}
|
||||
|
||||
type consoleStenographer struct {
|
||||
color bool
|
||||
cursorState cursorStateType
|
||||
}
|
||||
|
||||
var alternatingColors = []string{defaultStyle, grayColor}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
|
||||
if succinct {
|
||||
s.print(0, "[%d] %s ", randomSeed, s.colorize(boldStyle, description))
|
||||
return
|
||||
}
|
||||
s.printBanner(fmt.Sprintf("Running Suite: %s", description), "=")
|
||||
s.print(0, "Random Seed: %s", s.colorize(boldStyle, "%d", randomSeed))
|
||||
if randomizingAll {
|
||||
s.print(0, " - Will randomize all specs")
|
||||
}
|
||||
s.printNewLine()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) {
|
||||
if succinct {
|
||||
s.print(0, "- node #%d ", node)
|
||||
return
|
||||
}
|
||||
s.println(0,
|
||||
"Parallel test node %s/%s. Assigned %s of %s specs.",
|
||||
s.colorize(boldStyle, "%d", node),
|
||||
s.colorize(boldStyle, "%d", nodes),
|
||||
s.colorize(boldStyle, "%d", specsToRun),
|
||||
s.colorize(boldStyle, "%d", totalSpecs),
|
||||
)
|
||||
s.printNewLine()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
|
||||
if succinct {
|
||||
s.print(0, "- %d nodes ", nodes)
|
||||
return
|
||||
}
|
||||
s.println(0,
|
||||
"Running in parallel across %s nodes",
|
||||
s.colorize(boldStyle, "%d", nodes),
|
||||
)
|
||||
s.printNewLine()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
|
||||
if succinct {
|
||||
s.print(0, "- %d/%d specs ", specsToRun, total)
|
||||
s.stream()
|
||||
return
|
||||
}
|
||||
s.println(0,
|
||||
"Will run %s of %s specs",
|
||||
s.colorize(boldStyle, "%d", specsToRun),
|
||||
s.colorize(boldStyle, "%d", total),
|
||||
)
|
||||
|
||||
s.printNewLine()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
|
||||
if succinct && summary.SuiteSucceeded {
|
||||
s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime)
|
||||
return
|
||||
}
|
||||
s.printNewLine()
|
||||
color := greenColor
|
||||
if !summary.SuiteSucceeded {
|
||||
color = redColor
|
||||
}
|
||||
s.println(0, s.colorize(boldStyle+color, "Ran %d of %d Specs in %.3f seconds", summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, summary.RunTime.Seconds()))
|
||||
|
||||
status := ""
|
||||
if summary.SuiteSucceeded {
|
||||
status = s.colorize(boldStyle+greenColor, "SUCCESS!")
|
||||
} else {
|
||||
status = s.colorize(boldStyle+redColor, "FAIL!")
|
||||
}
|
||||
|
||||
s.print(0,
|
||||
"%s -- %s | %s | %s | %s ",
|
||||
status,
|
||||
s.colorize(greenColor+boldStyle, "%d Passed", summary.NumberOfPassedSpecs),
|
||||
s.colorize(redColor+boldStyle, "%d Failed", summary.NumberOfFailedSpecs),
|
||||
s.colorize(yellowColor+boldStyle, "%d Pending", summary.NumberOfPendingSpecs),
|
||||
s.colorize(cyanColor+boldStyle, "%d Skipped", summary.NumberOfSkippedSpecs),
|
||||
)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
|
||||
s.startBlock()
|
||||
for i, text := range spec.ComponentTexts[1 : len(spec.ComponentTexts)-1] {
|
||||
s.print(0, s.colorize(alternatingColors[i%2], text)+" ")
|
||||
}
|
||||
|
||||
indentation := 0
|
||||
if len(spec.ComponentTexts) > 2 {
|
||||
indentation = 1
|
||||
s.printNewLine()
|
||||
}
|
||||
index := len(spec.ComponentTexts) - 1
|
||||
s.print(indentation, s.colorize(boldStyle, spec.ComponentTexts[index]))
|
||||
s.printNewLine()
|
||||
s.print(indentation, s.colorize(lightGrayColor, spec.ComponentCodeLocations[index].String()))
|
||||
s.printNewLine()
|
||||
s.midBlock()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||
s.announceSetupFailure("BeforeSuite", summary, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||
s.announceSetupFailure("AfterSuite", summary, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) announceSetupFailure(name string, summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||
s.startBlock()
|
||||
var message string
|
||||
switch summary.State {
|
||||
case types.SpecStateFailed:
|
||||
message = "Failure"
|
||||
case types.SpecStatePanicked:
|
||||
message = "Panic"
|
||||
case types.SpecStateTimedOut:
|
||||
message = "Timeout"
|
||||
}
|
||||
|
||||
s.println(0, s.colorize(redColor+boldStyle, "%s [%.3f seconds]", message, summary.RunTime.Seconds()))
|
||||
|
||||
indentation := s.printCodeLocationBlock([]string{name}, []types.CodeLocation{summary.CodeLocation}, summary.ComponentType, 0, summary.State, true)
|
||||
|
||||
s.printNewLine()
|
||||
s.printFailure(indentation, summary.State, summary.Failure, fullTrace)
|
||||
|
||||
s.endBlock()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceCapturedOutput(output string) {
|
||||
if output == "" {
|
||||
return
|
||||
}
|
||||
|
||||
s.startBlock()
|
||||
s.println(0, output)
|
||||
s.midBlock()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) {
|
||||
s.print(0, s.colorize(greenColor, "•"))
|
||||
s.stream()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) {
|
||||
s.printBlockWithMessage(
|
||||
s.colorize(greenColor, "• [SLOW TEST:%.3f seconds]", spec.RunTime.Seconds()),
|
||||
"",
|
||||
spec,
|
||||
succinct,
|
||||
)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) {
|
||||
s.printBlockWithMessage(
|
||||
s.colorize(greenColor, "• [MEASUREMENT]"),
|
||||
s.measurementReport(spec, succinct),
|
||||
spec,
|
||||
succinct,
|
||||
)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
|
||||
if noisy {
|
||||
s.printBlockWithMessage(
|
||||
s.colorize(yellowColor, "P [PENDING]"),
|
||||
"",
|
||||
spec,
|
||||
false,
|
||||
)
|
||||
} else {
|
||||
s.print(0, s.colorize(yellowColor, "P"))
|
||||
s.stream()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
// Skips at runtime will have a non-empty spec.Failure. All others should be succinct.
|
||||
if succinct || spec.Failure == (types.SpecFailure{}) {
|
||||
s.print(0, s.colorize(cyanColor, "S"))
|
||||
s.stream()
|
||||
} else {
|
||||
s.startBlock()
|
||||
s.println(0, s.colorize(cyanColor+boldStyle, "S [SKIPPING]%s [%.3f seconds]", s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
|
||||
|
||||
indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
|
||||
|
||||
s.printNewLine()
|
||||
s.printSkip(indentation, spec.Failure)
|
||||
s.endBlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
s.printSpecFailure("•... Timeout", spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
s.printSpecFailure("•! Panic", spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
s.printSpecFailure("• Failure", spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
|
||||
failingSpecs := []*types.SpecSummary{}
|
||||
|
||||
for _, summary := range summaries {
|
||||
if summary.HasFailureState() {
|
||||
failingSpecs = append(failingSpecs, summary)
|
||||
}
|
||||
}
|
||||
|
||||
if len(failingSpecs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
s.printNewLine()
|
||||
s.printNewLine()
|
||||
plural := "s"
|
||||
if len(failingSpecs) == 1 {
|
||||
plural = ""
|
||||
}
|
||||
s.println(0, s.colorize(redColor+boldStyle, "Summarizing %d Failure%s:", len(failingSpecs), plural))
|
||||
for _, summary := range failingSpecs {
|
||||
s.printNewLine()
|
||||
if summary.HasFailureState() {
|
||||
if summary.TimedOut() {
|
||||
s.print(0, s.colorize(redColor+boldStyle, "[Timeout...] "))
|
||||
} else if summary.Panicked() {
|
||||
s.print(0, s.colorize(redColor+boldStyle, "[Panic!] "))
|
||||
} else if summary.Failed() {
|
||||
s.print(0, s.colorize(redColor+boldStyle, "[Fail] "))
|
||||
}
|
||||
s.printSpecContext(summary.ComponentTexts, summary.ComponentCodeLocations, summary.Failure.ComponentType, summary.Failure.ComponentIndex, summary.State, true)
|
||||
s.printNewLine()
|
||||
s.println(0, s.colorize(lightGrayColor, summary.Failure.Location.String()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) startBlock() {
|
||||
if s.cursorState == cursorStateStreaming {
|
||||
s.printNewLine()
|
||||
s.printDelimiter()
|
||||
} else if s.cursorState == cursorStateMidBlock {
|
||||
s.printNewLine()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) midBlock() {
|
||||
s.cursorState = cursorStateMidBlock
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) endBlock() {
|
||||
s.printDelimiter()
|
||||
s.cursorState = cursorStateEndBlock
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) stream() {
|
||||
s.cursorState = cursorStateStreaming
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printBlockWithMessage(header string, message string, spec *types.SpecSummary, succinct bool) {
|
||||
s.startBlock()
|
||||
s.println(0, header)
|
||||
|
||||
indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, types.SpecComponentTypeInvalid, 0, spec.State, succinct)
|
||||
|
||||
if message != "" {
|
||||
s.printNewLine()
|
||||
s.println(indentation, message)
|
||||
}
|
||||
|
||||
s.endBlock()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printSpecFailure(message string, spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
s.startBlock()
|
||||
s.println(0, s.colorize(redColor+boldStyle, "%s%s [%.3f seconds]", message, s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
|
||||
|
||||
indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
|
||||
|
||||
s.printNewLine()
|
||||
s.printFailure(indentation, spec.State, spec.Failure, fullTrace)
|
||||
s.endBlock()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) failureContext(failedComponentType types.SpecComponentType) string {
|
||||
switch failedComponentType {
|
||||
case types.SpecComponentTypeBeforeSuite:
|
||||
return " in Suite Setup (BeforeSuite)"
|
||||
case types.SpecComponentTypeAfterSuite:
|
||||
return " in Suite Teardown (AfterSuite)"
|
||||
case types.SpecComponentTypeBeforeEach:
|
||||
return " in Spec Setup (BeforeEach)"
|
||||
case types.SpecComponentTypeJustBeforeEach:
|
||||
return " in Spec Setup (JustBeforeEach)"
|
||||
case types.SpecComponentTypeAfterEach:
|
||||
return " in Spec Teardown (AfterEach)"
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printSkip(indentation int, spec types.SpecFailure) {
|
||||
s.println(indentation, s.colorize(cyanColor, spec.Message))
|
||||
s.printNewLine()
|
||||
s.println(indentation, spec.Location.String())
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printFailure(indentation int, state types.SpecState, failure types.SpecFailure, fullTrace bool) {
|
||||
if state == types.SpecStatePanicked {
|
||||
s.println(indentation, s.colorize(redColor+boldStyle, failure.Message))
|
||||
s.println(indentation, s.colorize(redColor, failure.ForwardedPanic))
|
||||
s.println(indentation, failure.Location.String())
|
||||
s.printNewLine()
|
||||
s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
|
||||
s.println(indentation, failure.Location.FullStackTrace)
|
||||
} else {
|
||||
s.println(indentation, s.colorize(redColor, failure.Message))
|
||||
s.printNewLine()
|
||||
s.println(indentation, failure.Location.String())
|
||||
if fullTrace {
|
||||
s.printNewLine()
|
||||
s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
|
||||
s.println(indentation, failure.Location.FullStackTrace)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printSpecContext(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
|
||||
startIndex := 1
|
||||
indentation := 0
|
||||
|
||||
if len(componentTexts) == 1 {
|
||||
startIndex = 0
|
||||
}
|
||||
|
||||
for i := startIndex; i < len(componentTexts); i++ {
|
||||
if (state.IsFailure() || state == types.SpecStateSkipped) && i == failedComponentIndex {
|
||||
color := redColor
|
||||
if state == types.SpecStateSkipped {
|
||||
color = cyanColor
|
||||
}
|
||||
blockType := ""
|
||||
switch failedComponentType {
|
||||
case types.SpecComponentTypeBeforeSuite:
|
||||
blockType = "BeforeSuite"
|
||||
case types.SpecComponentTypeAfterSuite:
|
||||
blockType = "AfterSuite"
|
||||
case types.SpecComponentTypeBeforeEach:
|
||||
blockType = "BeforeEach"
|
||||
case types.SpecComponentTypeJustBeforeEach:
|
||||
blockType = "JustBeforeEach"
|
||||
case types.SpecComponentTypeAfterEach:
|
||||
blockType = "AfterEach"
|
||||
case types.SpecComponentTypeIt:
|
||||
blockType = "It"
|
||||
case types.SpecComponentTypeMeasure:
|
||||
blockType = "Measurement"
|
||||
}
|
||||
if succinct {
|
||||
s.print(0, s.colorize(color+boldStyle, "[%s] %s ", blockType, componentTexts[i]))
|
||||
} else {
|
||||
s.println(indentation, s.colorize(color+boldStyle, "%s [%s]", componentTexts[i], blockType))
|
||||
s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
|
||||
}
|
||||
} else {
|
||||
if succinct {
|
||||
s.print(0, s.colorize(alternatingColors[i%2], "%s ", componentTexts[i]))
|
||||
} else {
|
||||
s.println(indentation, componentTexts[i])
|
||||
s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
|
||||
}
|
||||
}
|
||||
indentation++
|
||||
}
|
||||
|
||||
return indentation
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printCodeLocationBlock(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
|
||||
indentation := s.printSpecContext(componentTexts, componentCodeLocations, failedComponentType, failedComponentIndex, state, succinct)
|
||||
|
||||
if succinct {
|
||||
if len(componentTexts) > 0 {
|
||||
s.printNewLine()
|
||||
s.print(0, s.colorize(lightGrayColor, "%s", componentCodeLocations[len(componentCodeLocations)-1]))
|
||||
}
|
||||
s.printNewLine()
|
||||
indentation = 1
|
||||
} else {
|
||||
indentation--
|
||||
}
|
||||
|
||||
return indentation
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) orderedMeasurementKeys(measurements map[string]*types.SpecMeasurement) []string {
|
||||
orderedKeys := make([]string, len(measurements))
|
||||
for key, measurement := range measurements {
|
||||
orderedKeys[measurement.Order] = key
|
||||
}
|
||||
return orderedKeys
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) measurementReport(spec *types.SpecSummary, succinct bool) string {
|
||||
if len(spec.Measurements) == 0 {
|
||||
return "Found no measurements"
|
||||
}
|
||||
|
||||
message := []string{}
|
||||
orderedKeys := s.orderedMeasurementKeys(spec.Measurements)
|
||||
|
||||
if succinct {
|
||||
message = append(message, fmt.Sprintf("%s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
|
||||
for _, key := range orderedKeys {
|
||||
measurement := spec.Measurements[key]
|
||||
message = append(message, fmt.Sprintf(" %s - %s: %s%s, %s: %s%s ± %s%s, %s: %s%s",
|
||||
s.colorize(boldStyle, "%s", measurement.Name),
|
||||
measurement.SmallestLabel,
|
||||
s.colorize(greenColor, "%.3f", measurement.Smallest),
|
||||
measurement.Units,
|
||||
measurement.AverageLabel,
|
||||
s.colorize(cyanColor, "%.3f", measurement.Average),
|
||||
measurement.Units,
|
||||
s.colorize(cyanColor, "%.3f", measurement.StdDeviation),
|
||||
measurement.Units,
|
||||
measurement.LargestLabel,
|
||||
s.colorize(redColor, "%.3f", measurement.Largest),
|
||||
measurement.Units,
|
||||
))
|
||||
}
|
||||
} else {
|
||||
message = append(message, fmt.Sprintf("Ran %s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
|
||||
for _, key := range orderedKeys {
|
||||
measurement := spec.Measurements[key]
|
||||
info := ""
|
||||
if measurement.Info != nil {
|
||||
message = append(message, fmt.Sprintf("%v", measurement.Info))
|
||||
}
|
||||
|
||||
message = append(message, fmt.Sprintf("%s:\n%s %s: %s%s\n %s: %s%s\n %s: %s%s ± %s%s",
|
||||
s.colorize(boldStyle, "%s", measurement.Name),
|
||||
info,
|
||||
measurement.SmallestLabel,
|
||||
s.colorize(greenColor, "%.3f", measurement.Smallest),
|
||||
measurement.Units,
|
||||
measurement.LargestLabel,
|
||||
s.colorize(redColor, "%.3f", measurement.Largest),
|
||||
measurement.Units,
|
||||
measurement.AverageLabel,
|
||||
s.colorize(cyanColor, "%.3f", measurement.Average),
|
||||
measurement.Units,
|
||||
s.colorize(cyanColor, "%.3f", measurement.StdDeviation),
|
||||
measurement.Units,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(message, "\n")
|
||||
}
|
92
vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
generated
vendored
Normal file
92
vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
generated
vendored
Normal file
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
|
||||
TeamCity Reporter for Ginkgo
|
||||
|
||||
Makes use of TeamCity's support for Service Messages
|
||||
http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests
|
||||
*/
|
||||
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
messageId = "##teamcity"
|
||||
)
|
||||
|
||||
type TeamCityReporter struct {
|
||||
writer io.Writer
|
||||
testSuiteName string
|
||||
}
|
||||
|
||||
func NewTeamCityReporter(writer io.Writer) *TeamCityReporter {
|
||||
return &TeamCityReporter{
|
||||
writer: writer,
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
reporter.testSuiteName = escape(summary.SuiteDescription)
|
||||
fmt.Fprintf(reporter.writer, "%s[testSuiteStarted name='%s']", messageId, reporter.testSuiteName)
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
reporter.handleSetupSummary("BeforeSuite", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
reporter.handleSetupSummary("AfterSuite", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
testName := escape(name)
|
||||
fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName)
|
||||
message := escape(setupSummary.Failure.ComponentCodeLocation.String())
|
||||
details := escape(setupSummary.Failure.Message)
|
||||
fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details)
|
||||
durationInMilliseconds := setupSummary.RunTime.Seconds() * 1000
|
||||
fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
testName := escape(strings.Join(specSummary.ComponentTexts[1:], " "))
|
||||
fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName)
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
testName := escape(strings.Join(specSummary.ComponentTexts[1:], " "))
|
||||
|
||||
if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
|
||||
message := escape(specSummary.Failure.ComponentCodeLocation.String())
|
||||
details := escape(specSummary.Failure.Message)
|
||||
fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details)
|
||||
}
|
||||
if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
|
||||
fmt.Fprintf(reporter.writer, "%s[testIgnored name='%s']", messageId, testName)
|
||||
}
|
||||
|
||||
durationInMilliseconds := specSummary.RunTime.Seconds() * 1000
|
||||
fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds)
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
fmt.Fprintf(reporter.writer, "%s[testSuiteFinished name='%s']", messageId, reporter.testSuiteName)
|
||||
}
|
||||
|
||||
func escape(output string) string {
|
||||
output = strings.Replace(output, "|", "||", -1)
|
||||
output = strings.Replace(output, "'", "|'", -1)
|
||||
output = strings.Replace(output, "\n", "|n", -1)
|
||||
output = strings.Replace(output, "\r", "|r", -1)
|
||||
output = strings.Replace(output, "[", "|[", -1)
|
||||
output = strings.Replace(output, "]", "|]", -1)
|
||||
return output
|
||||
}
|
15
vendor/github.com/onsi/ginkgo/types/code_location.go
generated
vendored
Normal file
15
vendor/github.com/onsi/ginkgo/types/code_location.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type CodeLocation struct {
|
||||
FileName string
|
||||
LineNumber int
|
||||
FullStackTrace string
|
||||
}
|
||||
|
||||
func (codeLocation CodeLocation) String() string {
|
||||
return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber)
|
||||
}
|
30
vendor/github.com/onsi/ginkgo/types/synchronization.go
generated
vendored
Normal file
30
vendor/github.com/onsi/ginkgo/types/synchronization.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
type RemoteBeforeSuiteState int
|
||||
|
||||
const (
|
||||
RemoteBeforeSuiteStateInvalid RemoteBeforeSuiteState = iota
|
||||
|
||||
RemoteBeforeSuiteStatePending
|
||||
RemoteBeforeSuiteStatePassed
|
||||
RemoteBeforeSuiteStateFailed
|
||||
RemoteBeforeSuiteStateDisappeared
|
||||
)
|
||||
|
||||
type RemoteBeforeSuiteData struct {
|
||||
Data []byte
|
||||
State RemoteBeforeSuiteState
|
||||
}
|
||||
|
||||
func (r RemoteBeforeSuiteData) ToJSON() []byte {
|
||||
data, _ := json.Marshal(r)
|
||||
return data
|
||||
}
|
||||
|
||||
type RemoteAfterSuiteData struct {
|
||||
CanRun bool
|
||||
}
|
143
vendor/github.com/onsi/ginkgo/types/types.go
generated
vendored
Normal file
143
vendor/github.com/onsi/ginkgo/types/types.go
generated
vendored
Normal file
|
@ -0,0 +1,143 @@
|
|||
package types
|
||||
|
||||
import "time"
|
||||
|
||||
const GINKGO_FOCUS_EXIT_CODE = 197
|
||||
|
||||
type SuiteSummary struct {
|
||||
SuiteDescription string
|
||||
SuiteSucceeded bool
|
||||
SuiteID string
|
||||
|
||||
NumberOfSpecsBeforeParallelization int
|
||||
NumberOfTotalSpecs int
|
||||
NumberOfSpecsThatWillBeRun int
|
||||
NumberOfPendingSpecs int
|
||||
NumberOfSkippedSpecs int
|
||||
NumberOfPassedSpecs int
|
||||
NumberOfFailedSpecs int
|
||||
RunTime time.Duration
|
||||
}
|
||||
|
||||
type SpecSummary struct {
|
||||
ComponentTexts []string
|
||||
ComponentCodeLocations []CodeLocation
|
||||
|
||||
State SpecState
|
||||
RunTime time.Duration
|
||||
Failure SpecFailure
|
||||
IsMeasurement bool
|
||||
NumberOfSamples int
|
||||
Measurements map[string]*SpecMeasurement
|
||||
|
||||
CapturedOutput string
|
||||
SuiteID string
|
||||
}
|
||||
|
||||
func (s SpecSummary) HasFailureState() bool {
|
||||
return s.State.IsFailure()
|
||||
}
|
||||
|
||||
func (s SpecSummary) TimedOut() bool {
|
||||
return s.State == SpecStateTimedOut
|
||||
}
|
||||
|
||||
func (s SpecSummary) Panicked() bool {
|
||||
return s.State == SpecStatePanicked
|
||||
}
|
||||
|
||||
func (s SpecSummary) Failed() bool {
|
||||
return s.State == SpecStateFailed
|
||||
}
|
||||
|
||||
func (s SpecSummary) Passed() bool {
|
||||
return s.State == SpecStatePassed
|
||||
}
|
||||
|
||||
func (s SpecSummary) Skipped() bool {
|
||||
return s.State == SpecStateSkipped
|
||||
}
|
||||
|
||||
func (s SpecSummary) Pending() bool {
|
||||
return s.State == SpecStatePending
|
||||
}
|
||||
|
||||
type SetupSummary struct {
|
||||
ComponentType SpecComponentType
|
||||
CodeLocation CodeLocation
|
||||
|
||||
State SpecState
|
||||
RunTime time.Duration
|
||||
Failure SpecFailure
|
||||
|
||||
CapturedOutput string
|
||||
SuiteID string
|
||||
}
|
||||
|
||||
type SpecFailure struct {
|
||||
Message string
|
||||
Location CodeLocation
|
||||
ForwardedPanic string
|
||||
|
||||
ComponentIndex int
|
||||
ComponentType SpecComponentType
|
||||
ComponentCodeLocation CodeLocation
|
||||
}
|
||||
|
||||
type SpecMeasurement struct {
|
||||
Name string
|
||||
Info interface{}
|
||||
Order int
|
||||
|
||||
Results []float64
|
||||
|
||||
Smallest float64
|
||||
Largest float64
|
||||
Average float64
|
||||
StdDeviation float64
|
||||
|
||||
SmallestLabel string
|
||||
LargestLabel string
|
||||
AverageLabel string
|
||||
Units string
|
||||
}
|
||||
|
||||
type SpecState uint
|
||||
|
||||
const (
|
||||
SpecStateInvalid SpecState = iota
|
||||
|
||||
SpecStatePending
|
||||
SpecStateSkipped
|
||||
SpecStatePassed
|
||||
SpecStateFailed
|
||||
SpecStatePanicked
|
||||
SpecStateTimedOut
|
||||
)
|
||||
|
||||
func (state SpecState) IsFailure() bool {
|
||||
return state == SpecStateTimedOut || state == SpecStatePanicked || state == SpecStateFailed
|
||||
}
|
||||
|
||||
type SpecComponentType uint
|
||||
|
||||
const (
|
||||
SpecComponentTypeInvalid SpecComponentType = iota
|
||||
|
||||
SpecComponentTypeContainer
|
||||
SpecComponentTypeBeforeSuite
|
||||
SpecComponentTypeAfterSuite
|
||||
SpecComponentTypeBeforeEach
|
||||
SpecComponentTypeJustBeforeEach
|
||||
SpecComponentTypeAfterEach
|
||||
SpecComponentTypeIt
|
||||
SpecComponentTypeMeasure
|
||||
)
|
||||
|
||||
type FlagType uint
|
||||
|
||||
const (
|
||||
FlagTypeNone FlagType = iota
|
||||
FlagTypeFocused
|
||||
FlagTypePending
|
||||
)
|
3
vendor/github.com/onsi/gomega/.gitignore
generated
vendored
Normal file
3
vendor/github.com/onsi/gomega/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
.DS_Store
|
||||
*.test
|
||||
.
|
11
vendor/github.com/onsi/gomega/.travis.yml
generated
vendored
Normal file
11
vendor/github.com/onsi/gomega/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.4
|
||||
- 1.5
|
||||
|
||||
install:
|
||||
- go get -v ./...
|
||||
- go get github.com/onsi/ginkgo
|
||||
- go install github.com/onsi/ginkgo/ginkgo
|
||||
|
||||
script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --failOnPending --randomizeSuites --race
|
68
vendor/github.com/onsi/gomega/CHANGELOG.md
generated
vendored
Normal file
68
vendor/github.com/onsi/gomega/CHANGELOG.md
generated
vendored
Normal file
|
@ -0,0 +1,68 @@
|
|||
## HEAD
|
||||
|
||||
Improvements:
|
||||
|
||||
- Added `BeSent` which attempts to send a value down a channel and fails if the attempt blocks. Can be paired with `Eventually` to safely send a value down a channel with a timeout.
|
||||
- `Ω`, `Expect`, `Eventually`, and `Consistently` now immediately `panic` if there is no registered fail handler. This is always a mistake that can hide failing tests.
|
||||
- `Receive()` no longer errors when passed a closed channel, it's perfectly fine to attempt to read from a closed channel so Ω(c).Should(Receive()) always fails and Ω(c).ShoudlNot(Receive()) always passes with a closed channel.
|
||||
- Added `HavePrefix` and `HaveSuffix` matchers.
|
||||
- `ghttp` can now handle concurrent requests.
|
||||
- Added `Succeed` which allows one to write `Ω(MyFunction()).Should(Succeed())`.
|
||||
- Improved `ghttp`'s behavior around failing assertions and panics:
|
||||
- If a registered handler makes a failing assertion `ghttp` will return `500`.
|
||||
- If a registered handler panics, `ghttp` will return `500` *and* fail the test. This is new behavior that may cause existing code to break. This code is almost certainly incorrect and creating a false positive.
|
||||
- `ghttp` servers can take an `io.Writer`. `ghttp` will write a line to the writer when each request arrives.
|
||||
|
||||
Bug Fixes:
|
||||
- gexec: `session.Wait` now uses `EventuallyWithOffset` to get the right line number in the failure.
|
||||
- `ContainElement` no longer bails if a passed-in matcher errors.
|
||||
|
||||
## 1.0 (8/2/2014)
|
||||
|
||||
No changes. Dropping "beta" from the version number.
|
||||
|
||||
## 1.0.0-beta (7/8/2014)
|
||||
Breaking Changes:
|
||||
|
||||
- Changed OmegaMatcher interface. Instead of having `Match` return failure messages, two new methods `FailureMessage` and `NegatedFailureMessage` are called instead.
|
||||
- Moved and renamed OmegaFailHandler to types.GomegaFailHandler and OmegaMatcher to types.GomegaMatcher. Any references to OmegaMatcher in any custom matchers will need to be changed to point to types.GomegaMatcher
|
||||
|
||||
New Test-Support Features:
|
||||
|
||||
- `ghttp`: supports testing http clients
|
||||
- Provides a flexible fake http server
|
||||
- Provides a collection of chainable http handlers that perform assertions.
|
||||
- `gbytes`: supports making ordered assertions against streams of data
|
||||
- Provides a `gbytes.Buffer`
|
||||
- Provides a `Say` matcher to perform ordered assertions against output data
|
||||
- `gexec`: supports testing external processes
|
||||
- Provides support for building Go binaries
|
||||
- Wraps and starts `exec.Cmd` commands
|
||||
- Makes it easy to assert against stdout and stderr
|
||||
- Makes it easy to send signals and wait for processes to exit
|
||||
- Provides an `Exit` matcher to assert against exit code.
|
||||
|
||||
DSL Changes:
|
||||
|
||||
- `Eventually` and `Consistently` can accept `time.Duration` interval and polling inputs.
|
||||
- The default timeouts for `Eventually` and `Consistently` are now configurable.
|
||||
|
||||
New Matchers:
|
||||
|
||||
- `ConsistOf`: order-independent assertion against the elements of an array/slice or keys of a map.
|
||||
- `BeTemporally`: like `BeNumerically` but for `time.Time`
|
||||
- `HaveKeyWithValue`: asserts a map has a given key with the given value.
|
||||
|
||||
Updated Matchers:
|
||||
|
||||
- `Receive` matcher can take a matcher as an argument and passes only if the channel under test receives an objet that satisfies the passed-in matcher.
|
||||
- Matchers that implement `MatchMayChangeInTheFuture(actual interface{}) bool` can inform `Eventually` and/or `Consistently` when a match has no chance of changing status in the future. For example, `Receive` returns `false` when a channel is closed.
|
||||
|
||||
Misc:
|
||||
|
||||
- Start using semantic versioning
|
||||
- Start maintaining changelog
|
||||
|
||||
Major refactor:
|
||||
|
||||
- Pull out Gomega's internal to `internal`
|
20
vendor/github.com/onsi/gomega/LICENSE
generated
vendored
Normal file
20
vendor/github.com/onsi/gomega/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
Copyright (c) 2013-2014 Onsi Fakhouri
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
17
vendor/github.com/onsi/gomega/README.md
generated
vendored
Normal file
17
vendor/github.com/onsi/gomega/README.md
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
![Gomega: Ginkgo's Preferred Matcher Library](http://onsi.github.io/gomega/images/gomega.png)
|
||||
|
||||
[![Build Status](https://travis-ci.org/onsi/gomega.png)](https://travis-ci.org/onsi/gomega)
|
||||
|
||||
Jump straight to the [docs](http://onsi.github.io/gomega/) to learn about Gomega, including a list of [all available matchers](http://onsi.github.io/gomega/#provided-matchers).
|
||||
|
||||
To discuss Gomega and get updates, join the [google group](https://groups.google.com/d/forum/ginkgo-and-gomega).
|
||||
|
||||
## [Ginkgo](http://github.com/onsi/ginkgo): a BDD Testing Framework for Golang
|
||||
|
||||
Learn more about Ginkgo [here](http://onsi.github.io/ginkgo/)
|
||||
|
||||
## License
|
||||
|
||||
Gomega is MIT-Licensed
|
||||
|
||||
The `ConsistOf` matcher uses [goraph](https://github.com/amitkgupta/goraph) which is embedded in the source to simplify distribution. goraph has an MIT license.
|
276
vendor/github.com/onsi/gomega/format/format.go
generated
vendored
Normal file
276
vendor/github.com/onsi/gomega/format/format.go
generated
vendored
Normal file
|
@ -0,0 +1,276 @@
|
|||
/*
|
||||
Gomega's format package pretty-prints objects. It explores input objects recursively and generates formatted, indented output with type information.
|
||||
*/
|
||||
package format
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Use MaxDepth to set the maximum recursion depth when printing deeply nested objects
|
||||
var MaxDepth = uint(10)
|
||||
|
||||
/*
|
||||
By default, all objects (even those that implement fmt.Stringer and fmt.GoStringer) are recursively inspected to generate output.
|
||||
|
||||
Set UseStringerRepresentation = true to use GoString (for fmt.GoStringers) or String (for fmt.Stringer) instead.
|
||||
|
||||
Note that GoString and String don't always have all the information you need to understand why a test failed!
|
||||
*/
|
||||
var UseStringerRepresentation = false
|
||||
|
||||
//The default indentation string emitted by the format package
|
||||
var Indent = " "
|
||||
|
||||
var longFormThreshold = 20
|
||||
|
||||
/*
|
||||
Generates a formatted matcher success/failure message of the form:
|
||||
|
||||
Expected
|
||||
<pretty printed actual>
|
||||
<message>
|
||||
<pretty printed expected>
|
||||
|
||||
If expected is omited, then the message looks like:
|
||||
|
||||
Expected
|
||||
<pretty printed actual>
|
||||
<message>
|
||||
*/
|
||||
func Message(actual interface{}, message string, expected ...interface{}) string {
|
||||
if len(expected) == 0 {
|
||||
return fmt.Sprintf("Expected\n%s\n%s", Object(actual, 1), message)
|
||||
} else {
|
||||
return fmt.Sprintf("Expected\n%s\n%s\n%s", Object(actual, 1), message, Object(expected[0], 1))
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Pretty prints the passed in object at the passed in indentation level.
|
||||
|
||||
Object recurses into deeply nested objects emitting pretty-printed representations of their components.
|
||||
|
||||
Modify format.MaxDepth to control how deep the recursion is allowed to go
|
||||
Set format.UseStringerRepresentation to true to return object.GoString() or object.String() when available instead of
|
||||
recursing into the object.
|
||||
*/
|
||||
func Object(object interface{}, indentation uint) string {
|
||||
indent := strings.Repeat(Indent, int(indentation))
|
||||
value := reflect.ValueOf(object)
|
||||
return fmt.Sprintf("%s<%s>: %s", indent, formatType(object), formatValue(value, indentation))
|
||||
}
|
||||
|
||||
/*
|
||||
IndentString takes a string and indents each line by the specified amount.
|
||||
*/
|
||||
func IndentString(s string, indentation uint) string {
|
||||
components := strings.Split(s, "\n")
|
||||
result := ""
|
||||
indent := strings.Repeat(Indent, int(indentation))
|
||||
for i, component := range components {
|
||||
result += indent + component
|
||||
if i < len(components)-1 {
|
||||
result += "\n"
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func formatType(object interface{}) string {
|
||||
t := reflect.TypeOf(object)
|
||||
if t == nil {
|
||||
return "nil"
|
||||
}
|
||||
switch t.Kind() {
|
||||
case reflect.Chan:
|
||||
v := reflect.ValueOf(object)
|
||||
return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap())
|
||||
case reflect.Ptr:
|
||||
return fmt.Sprintf("%T | %p", object, object)
|
||||
case reflect.Slice:
|
||||
v := reflect.ValueOf(object)
|
||||
return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap())
|
||||
case reflect.Map:
|
||||
v := reflect.ValueOf(object)
|
||||
return fmt.Sprintf("%T | len:%d", object, v.Len())
|
||||
default:
|
||||
return fmt.Sprintf("%T", object)
|
||||
}
|
||||
}
|
||||
|
||||
func formatValue(value reflect.Value, indentation uint) string {
|
||||
if indentation > MaxDepth {
|
||||
return "..."
|
||||
}
|
||||
|
||||
if isNilValue(value) {
|
||||
return "nil"
|
||||
}
|
||||
|
||||
if UseStringerRepresentation {
|
||||
if value.CanInterface() {
|
||||
obj := value.Interface()
|
||||
switch x := obj.(type) {
|
||||
case fmt.GoStringer:
|
||||
return x.GoString()
|
||||
case fmt.Stringer:
|
||||
return x.String()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch value.Kind() {
|
||||
case reflect.Bool:
|
||||
return fmt.Sprintf("%v", value.Bool())
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return fmt.Sprintf("%v", value.Int())
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return fmt.Sprintf("%v", value.Uint())
|
||||
case reflect.Uintptr:
|
||||
return fmt.Sprintf("0x%x", value.Uint())
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return fmt.Sprintf("%v", value.Float())
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return fmt.Sprintf("%v", value.Complex())
|
||||
case reflect.Chan:
|
||||
return fmt.Sprintf("0x%x", value.Pointer())
|
||||
case reflect.Func:
|
||||
return fmt.Sprintf("0x%x", value.Pointer())
|
||||
case reflect.Ptr:
|
||||
return formatValue(value.Elem(), indentation)
|
||||
case reflect.Slice:
|
||||
if value.Type().Elem().Kind() == reflect.Uint8 {
|
||||
return formatString(value.Bytes(), indentation)
|
||||
}
|
||||
return formatSlice(value, indentation)
|
||||
case reflect.String:
|
||||
return formatString(value.String(), indentation)
|
||||
case reflect.Array:
|
||||
return formatSlice(value, indentation)
|
||||
case reflect.Map:
|
||||
return formatMap(value, indentation)
|
||||
case reflect.Struct:
|
||||
return formatStruct(value, indentation)
|
||||
case reflect.Interface:
|
||||
return formatValue(value.Elem(), indentation)
|
||||
default:
|
||||
if value.CanInterface() {
|
||||
return fmt.Sprintf("%#v", value.Interface())
|
||||
} else {
|
||||
return fmt.Sprintf("%#v", value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func formatString(object interface{}, indentation uint) string {
|
||||
if indentation == 1 {
|
||||
s := fmt.Sprintf("%s", object)
|
||||
components := strings.Split(s, "\n")
|
||||
result := ""
|
||||
for i, component := range components {
|
||||
if i == 0 {
|
||||
result += component
|
||||
} else {
|
||||
result += Indent + component
|
||||
}
|
||||
if i < len(components)-1 {
|
||||
result += "\n"
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s", result)
|
||||
} else {
|
||||
return fmt.Sprintf("%q", object)
|
||||
}
|
||||
}
|
||||
|
||||
func formatSlice(v reflect.Value, indentation uint) string {
|
||||
l := v.Len()
|
||||
result := make([]string, l)
|
||||
longest := 0
|
||||
for i := 0; i < l; i++ {
|
||||
result[i] = formatValue(v.Index(i), indentation+1)
|
||||
if len(result[i]) > longest {
|
||||
longest = len(result[i])
|
||||
}
|
||||
}
|
||||
|
||||
if longest > longFormThreshold {
|
||||
indenter := strings.Repeat(Indent, int(indentation))
|
||||
return fmt.Sprintf("[\n%s%s,\n%s]", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter)
|
||||
} else {
|
||||
return fmt.Sprintf("[%s]", strings.Join(result, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
func formatMap(v reflect.Value, indentation uint) string {
|
||||
l := v.Len()
|
||||
result := make([]string, l)
|
||||
|
||||
longest := 0
|
||||
for i, key := range v.MapKeys() {
|
||||
value := v.MapIndex(key)
|
||||
result[i] = fmt.Sprintf("%s: %s", formatValue(key, 0), formatValue(value, indentation+1))
|
||||
if len(result[i]) > longest {
|
||||
longest = len(result[i])
|
||||
}
|
||||
}
|
||||
|
||||
if longest > longFormThreshold {
|
||||
indenter := strings.Repeat(Indent, int(indentation))
|
||||
return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter)
|
||||
} else {
|
||||
return fmt.Sprintf("{%s}", strings.Join(result, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
func formatStruct(v reflect.Value, indentation uint) string {
|
||||
t := v.Type()
|
||||
|
||||
l := v.NumField()
|
||||
result := []string{}
|
||||
longest := 0
|
||||
for i := 0; i < l; i++ {
|
||||
structField := t.Field(i)
|
||||
fieldEntry := v.Field(i)
|
||||
representation := fmt.Sprintf("%s: %s", structField.Name, formatValue(fieldEntry, indentation+1))
|
||||
result = append(result, representation)
|
||||
if len(representation) > longest {
|
||||
longest = len(representation)
|
||||
}
|
||||
}
|
||||
if longest > longFormThreshold {
|
||||
indenter := strings.Repeat(Indent, int(indentation))
|
||||
return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter)
|
||||
} else {
|
||||
return fmt.Sprintf("{%s}", strings.Join(result, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
func isNilValue(a reflect.Value) bool {
|
||||
switch a.Kind() {
|
||||
case reflect.Invalid:
|
||||
return true
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return a.IsNil()
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func isNil(a interface{}) bool {
|
||||
if a == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
switch reflect.TypeOf(a).Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return reflect.ValueOf(a).IsNil()
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
335
vendor/github.com/onsi/gomega/gomega_dsl.go
generated
vendored
Normal file
335
vendor/github.com/onsi/gomega/gomega_dsl.go
generated
vendored
Normal file
|
@ -0,0 +1,335 @@
|
|||
/*
|
||||
Gomega is the Ginkgo BDD-style testing framework's preferred matcher library.
|
||||
|
||||
The godoc documentation describes Gomega's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/gomega/
|
||||
|
||||
Gomega on Github: http://github.com/onsi/gomega
|
||||
|
||||
Learn more about Ginkgo online: http://onsi.github.io/ginkgo
|
||||
|
||||
Ginkgo on Github: http://github.com/onsi/ginkgo
|
||||
|
||||
Gomega is MIT-Licensed
|
||||
*/
|
||||
package gomega
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/gomega/internal/assertion"
|
||||
"github.com/onsi/gomega/internal/asyncassertion"
|
||||
"github.com/onsi/gomega/internal/testingtsupport"
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
const GOMEGA_VERSION = "1.0"
|
||||
|
||||
const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil.
|
||||
If you're using Ginkgo then you probably forgot to put your assertion in an It().
|
||||
Alternatively, you may have forgotten to register a fail handler with RegisterFailHandler() or RegisterTestingT().
|
||||
`
|
||||
|
||||
var globalFailHandler types.GomegaFailHandler
|
||||
|
||||
var defaultEventuallyTimeout = time.Second
|
||||
var defaultEventuallyPollingInterval = 10 * time.Millisecond
|
||||
var defaultConsistentlyDuration = 100 * time.Millisecond
|
||||
var defaultConsistentlyPollingInterval = 10 * time.Millisecond
|
||||
|
||||
//RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails
|
||||
//the fail handler passed into RegisterFailHandler is called.
|
||||
func RegisterFailHandler(handler types.GomegaFailHandler) {
|
||||
globalFailHandler = handler
|
||||
}
|
||||
|
||||
//RegisterTestingT connects Gomega to Golang's XUnit style
|
||||
//Testing.T tests. You'll need to call this at the top of each XUnit style test:
|
||||
//
|
||||
// func TestFarmHasCow(t *testing.T) {
|
||||
// RegisterTestingT(t)
|
||||
//
|
||||
// f := farm.New([]string{"Cow", "Horse"})
|
||||
// Expect(f.HasCow()).To(BeTrue(), "Farm should have cow")
|
||||
// }
|
||||
//
|
||||
// Note that this *testing.T is registered *globally* by Gomega (this is why you don't have to
|
||||
// pass `t` down to the matcher itself). This means that you cannot run the XUnit style tests
|
||||
// in parallel as the global fail handler cannot point to more than one testing.T at a time.
|
||||
//
|
||||
// (As an aside: Ginkgo gets around this limitation by running parallel tests in different *processes*).
|
||||
func RegisterTestingT(t types.GomegaTestingT) {
|
||||
RegisterFailHandler(testingtsupport.BuildTestingTGomegaFailHandler(t))
|
||||
}
|
||||
|
||||
//InterceptGomegaHandlers runs a given callback and returns an array of
|
||||
//failure messages generated by any Gomega assertions within the callback.
|
||||
//
|
||||
//This is accomplished by temporarily replacing the *global* fail handler
|
||||
//with a fail handler that simply annotates failures. The original fail handler
|
||||
//is reset when InterceptGomegaFailures returns.
|
||||
//
|
||||
//This is most useful when testing custom matchers, but can also be used to check
|
||||
//on a value using a Gomega assertion without causing a test failure.
|
||||
func InterceptGomegaFailures(f func()) []string {
|
||||
originalHandler := globalFailHandler
|
||||
failures := []string{}
|
||||
RegisterFailHandler(func(message string, callerSkip ...int) {
|
||||
failures = append(failures, message)
|
||||
})
|
||||
f()
|
||||
RegisterFailHandler(originalHandler)
|
||||
return failures
|
||||
}
|
||||
|
||||
//Ω wraps an actual value allowing assertions to be made on it:
|
||||
// Ω("foo").Should(Equal("foo"))
|
||||
//
|
||||
//If Ω is passed more than one argument it will pass the *first* argument to the matcher.
|
||||
//All subsequent arguments will be required to be nil/zero.
|
||||
//
|
||||
//This is convenient if you want to make an assertion on a method/function that returns
|
||||
//a value and an error - a common patter in Go.
|
||||
//
|
||||
//For example, given a function with signature:
|
||||
// func MyAmazingThing() (int, error)
|
||||
//
|
||||
//Then:
|
||||
// Ω(MyAmazingThing()).Should(Equal(3))
|
||||
//Will succeed only if `MyAmazingThing()` returns `(3, nil)`
|
||||
//
|
||||
//Ω and Expect are identical
|
||||
func Ω(actual interface{}, extra ...interface{}) GomegaAssertion {
|
||||
return ExpectWithOffset(0, actual, extra...)
|
||||
}
|
||||
|
||||
//Expect wraps an actual value allowing assertions to be made on it:
|
||||
// Expect("foo").To(Equal("foo"))
|
||||
//
|
||||
//If Expect is passed more than one argument it will pass the *first* argument to the matcher.
|
||||
//All subsequent arguments will be required to be nil/zero.
|
||||
//
|
||||
//This is convenient if you want to make an assertion on a method/function that returns
|
||||
//a value and an error - a common patter in Go.
|
||||
//
|
||||
//For example, given a function with signature:
|
||||
// func MyAmazingThing() (int, error)
|
||||
//
|
||||
//Then:
|
||||
// Expect(MyAmazingThing()).Should(Equal(3))
|
||||
//Will succeed only if `MyAmazingThing()` returns `(3, nil)`
|
||||
//
|
||||
//Expect and Ω are identical
|
||||
func Expect(actual interface{}, extra ...interface{}) GomegaAssertion {
|
||||
return ExpectWithOffset(0, actual, extra...)
|
||||
}
|
||||
|
||||
//ExpectWithOffset wraps an actual value allowing assertions to be made on it:
|
||||
// ExpectWithOffset(1, "foo").To(Equal("foo"))
|
||||
//
|
||||
//Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument
|
||||
//this is used to modify the call-stack offset when computing line numbers.
|
||||
//
|
||||
//This is most useful in helper functions that make assertions. If you want Gomega's
|
||||
//error message to refer to the calling line in the test (as opposed to the line in the helper function)
|
||||
//set the first argument of `ExpectWithOffset` appropriately.
|
||||
func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) GomegaAssertion {
|
||||
if globalFailHandler == nil {
|
||||
panic(nilFailHandlerPanic)
|
||||
}
|
||||
return assertion.New(actual, globalFailHandler, offset, extra...)
|
||||
}
|
||||
|
||||
//Eventually wraps an actual value allowing assertions to be made on it.
|
||||
//The assertion is tried periodically until it passes or a timeout occurs.
|
||||
//
|
||||
//Both the timeout and polling interval are configurable as optional arguments:
|
||||
//The first optional argument is the timeout
|
||||
//The second optional argument is the polling interval
|
||||
//
|
||||
//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the
|
||||
//last case they are interpreted as seconds.
|
||||
//
|
||||
//If Eventually is passed an actual that is a function taking no arguments and returning at least one value,
|
||||
//then Eventually will call the function periodically and try the matcher against the function's first return value.
|
||||
//
|
||||
//Example:
|
||||
//
|
||||
// Eventually(func() int {
|
||||
// return thingImPolling.Count()
|
||||
// }).Should(BeNumerically(">=", 17))
|
||||
//
|
||||
//Note that this example could be rewritten:
|
||||
//
|
||||
// Eventually(thingImPolling.Count).Should(BeNumerically(">=", 17))
|
||||
//
|
||||
//If the function returns more than one value, then Eventually will pass the first value to the matcher and
|
||||
//assert that all other values are nil/zero.
|
||||
//This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go.
|
||||
//
|
||||
//For example, consider a method that returns a value and an error:
|
||||
// func FetchFromDB() (string, error)
|
||||
//
|
||||
//Then
|
||||
// Eventually(FetchFromDB).Should(Equal("hasselhoff"))
|
||||
//
|
||||
//Will pass only if the the returned error is nil and the returned string passes the matcher.
|
||||
//
|
||||
//Eventually's default timeout is 1 second, and its default polling interval is 10ms
|
||||
func Eventually(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
|
||||
return EventuallyWithOffset(0, actual, intervals...)
|
||||
}
|
||||
|
||||
//EventuallyWithOffset operates like Eventually but takes an additional
|
||||
//initial argument to indicate an offset in the call stack. This is useful when building helper
|
||||
//functions that contain matchers. To learn more, read about `ExpectWithOffset`.
|
||||
func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
|
||||
if globalFailHandler == nil {
|
||||
panic(nilFailHandlerPanic)
|
||||
}
|
||||
timeoutInterval := defaultEventuallyTimeout
|
||||
pollingInterval := defaultEventuallyPollingInterval
|
||||
if len(intervals) > 0 {
|
||||
timeoutInterval = toDuration(intervals[0])
|
||||
}
|
||||
if len(intervals) > 1 {
|
||||
pollingInterval = toDuration(intervals[1])
|
||||
}
|
||||
return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, globalFailHandler, timeoutInterval, pollingInterval, offset)
|
||||
}
|
||||
|
||||
//Consistently wraps an actual value allowing assertions to be made on it.
|
||||
//The assertion is tried periodically and is required to pass for a period of time.
|
||||
//
|
||||
//Both the total time and polling interval are configurable as optional arguments:
|
||||
//The first optional argument is the duration that Consistently will run for
|
||||
//The second optional argument is the polling interval
|
||||
//
|
||||
//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the
|
||||
//last case they are interpreted as seconds.
|
||||
//
|
||||
//If Consistently is passed an actual that is a function taking no arguments and returning at least one value,
|
||||
//then Consistently will call the function periodically and try the matcher against the function's first return value.
|
||||
//
|
||||
//If the function returns more than one value, then Consistently will pass the first value to the matcher and
|
||||
//assert that all other values are nil/zero.
|
||||
//This allows you to pass Consistently a function that returns a value and an error - a common pattern in Go.
|
||||
//
|
||||
//Consistently is useful in cases where you want to assert that something *does not happen* over a period of tiem.
|
||||
//For example, you want to assert that a goroutine does *not* send data down a channel. In this case, you could:
|
||||
//
|
||||
// Consistently(channel).ShouldNot(Receive())
|
||||
//
|
||||
//Consistently's default duration is 100ms, and its default polling interval is 10ms
|
||||
func Consistently(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
|
||||
return ConsistentlyWithOffset(0, actual, intervals...)
|
||||
}
|
||||
|
||||
//ConsistentlyWithOffset operates like Consistnetly but takes an additional
|
||||
//initial argument to indicate an offset in the call stack. This is useful when building helper
|
||||
//functions that contain matchers. To learn more, read about `ExpectWithOffset`.
|
||||
func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
|
||||
if globalFailHandler == nil {
|
||||
panic(nilFailHandlerPanic)
|
||||
}
|
||||
timeoutInterval := defaultConsistentlyDuration
|
||||
pollingInterval := defaultConsistentlyPollingInterval
|
||||
if len(intervals) > 0 {
|
||||
timeoutInterval = toDuration(intervals[0])
|
||||
}
|
||||
if len(intervals) > 1 {
|
||||
pollingInterval = toDuration(intervals[1])
|
||||
}
|
||||
return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, globalFailHandler, timeoutInterval, pollingInterval, offset)
|
||||
}
|
||||
|
||||
//Set the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses.
|
||||
func SetDefaultEventuallyTimeout(t time.Duration) {
|
||||
defaultEventuallyTimeout = t
|
||||
}
|
||||
|
||||
//Set the default polling interval for Eventually.
|
||||
func SetDefaultEventuallyPollingInterval(t time.Duration) {
|
||||
defaultEventuallyPollingInterval = t
|
||||
}
|
||||
|
||||
//Set the default duration for Consistently. Consistently will verify that your condition is satsified for this long.
|
||||
func SetDefaultConsistentlyDuration(t time.Duration) {
|
||||
defaultConsistentlyDuration = t
|
||||
}
|
||||
|
||||
//Set the default polling interval for Consistently.
|
||||
func SetDefaultConsistentlyPollingInterval(t time.Duration) {
|
||||
defaultConsistentlyPollingInterval = t
|
||||
}
|
||||
|
||||
//GomegaAsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against
|
||||
//the matcher passed to the Should and ShouldNot methods.
|
||||
//
|
||||
//Both Should and ShouldNot take a variadic optionalDescription argument. This is passed on to
|
||||
//fmt.Sprintf() and is used to annotate failure messages. This allows you to make your failure messages more
|
||||
//descriptive
|
||||
//
|
||||
//Both Should and ShouldNot return a boolean that is true if the assertion passed and false if it failed.
|
||||
//
|
||||
//Example:
|
||||
//
|
||||
// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.")
|
||||
// Consistently(myChannel).ShouldNot(Receive(), "Nothing should have come down the pipe.")
|
||||
type GomegaAsyncAssertion interface {
|
||||
Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
|
||||
ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
|
||||
}
|
||||
|
||||
//GomegaAssertion is returned by Ω and Expect and compares the actual value to the matcher
|
||||
//passed to the Should/ShouldNot and To/ToNot/NotTo methods.
|
||||
//
|
||||
//Typically Should/ShouldNot are used with Ω and To/ToNot/NotTo are used with Expect
|
||||
//though this is not enforced.
|
||||
//
|
||||
//All methods take a variadic optionalDescription argument. This is passed on to fmt.Sprintf()
|
||||
//and is used to annotate failure messages.
|
||||
//
|
||||
//All methods return a bool that is true if hte assertion passed and false if it failed.
|
||||
//
|
||||
//Example:
|
||||
//
|
||||
// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm)
|
||||
type GomegaAssertion interface {
|
||||
Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
|
||||
ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
|
||||
|
||||
To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
|
||||
ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
|
||||
NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
|
||||
}
|
||||
|
||||
//OmegaMatcher is deprecated in favor of the better-named and better-organized types.GomegaMatcher but sticks around to support existing code that uses it
|
||||
type OmegaMatcher types.GomegaMatcher
|
||||
|
||||
func toDuration(input interface{}) time.Duration {
|
||||
duration, ok := input.(time.Duration)
|
||||
if ok {
|
||||
return duration
|
||||
}
|
||||
|
||||
value := reflect.ValueOf(input)
|
||||
kind := reflect.TypeOf(input).Kind()
|
||||
|
||||
if reflect.Int <= kind && kind <= reflect.Int64 {
|
||||
return time.Duration(value.Int()) * time.Second
|
||||
} else if reflect.Uint <= kind && kind <= reflect.Uint64 {
|
||||
return time.Duration(value.Uint()) * time.Second
|
||||
} else if reflect.Float32 <= kind && kind <= reflect.Float64 {
|
||||
return time.Duration(value.Float() * float64(time.Second))
|
||||
} else if reflect.String == kind {
|
||||
duration, err := time.ParseDuration(value.String())
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("%#v is not a valid parsable duration string.", input))
|
||||
}
|
||||
return duration
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("%v is not a valid interval. Must be time.Duration, parsable duration string or a number.", input))
|
||||
}
|
98
vendor/github.com/onsi/gomega/internal/assertion/assertion.go
generated
vendored
Normal file
98
vendor/github.com/onsi/gomega/internal/assertion/assertion.go
generated
vendored
Normal file
|
@ -0,0 +1,98 @@
|
|||
package assertion
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
type Assertion struct {
|
||||
actualInput interface{}
|
||||
fail types.GomegaFailHandler
|
||||
offset int
|
||||
extra []interface{}
|
||||
}
|
||||
|
||||
func New(actualInput interface{}, fail types.GomegaFailHandler, offset int, extra ...interface{}) *Assertion {
|
||||
return &Assertion{
|
||||
actualInput: actualInput,
|
||||
fail: fail,
|
||||
offset: offset,
|
||||
extra: extra,
|
||||
}
|
||||
}
|
||||
|
||||
func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
|
||||
return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
|
||||
}
|
||||
|
||||
func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
|
||||
return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
|
||||
}
|
||||
|
||||
func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
|
||||
return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
|
||||
}
|
||||
|
||||
func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
|
||||
return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
|
||||
}
|
||||
|
||||
func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
|
||||
return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
|
||||
}
|
||||
|
||||
func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string {
|
||||
switch len(optionalDescription) {
|
||||
case 0:
|
||||
return ""
|
||||
default:
|
||||
return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
|
||||
}
|
||||
}
|
||||
|
||||
func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
|
||||
matches, err := matcher.Match(assertion.actualInput)
|
||||
description := assertion.buildDescription(optionalDescription...)
|
||||
if err != nil {
|
||||
assertion.fail(description+err.Error(), 2+assertion.offset)
|
||||
return false
|
||||
}
|
||||
if matches != desiredMatch {
|
||||
var message string
|
||||
if desiredMatch {
|
||||
message = matcher.FailureMessage(assertion.actualInput)
|
||||
} else {
|
||||
message = matcher.NegatedFailureMessage(assertion.actualInput)
|
||||
}
|
||||
assertion.fail(description+message, 2+assertion.offset)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (assertion *Assertion) vetExtras(optionalDescription ...interface{}) bool {
|
||||
success, message := vetExtras(assertion.extra)
|
||||
if success {
|
||||
return true
|
||||
}
|
||||
|
||||
description := assertion.buildDescription(optionalDescription...)
|
||||
assertion.fail(description+message, 2+assertion.offset)
|
||||
return false
|
||||
}
|
||||
|
||||
func vetExtras(extras []interface{}) (bool, string) {
|
||||
for i, extra := range extras {
|
||||
if extra != nil {
|
||||
zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface()
|
||||
if !reflect.DeepEqual(zeroValue, extra) {
|
||||
message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra)
|
||||
return false, message
|
||||
}
|
||||
}
|
||||
}
|
||||
return true, ""
|
||||
}
|
197
vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go
generated
vendored
Normal file
197
vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go
generated
vendored
Normal file
|
@ -0,0 +1,197 @@
|
|||
package asyncassertion
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
type AsyncAssertionType uint
|
||||
|
||||
const (
|
||||
AsyncAssertionTypeEventually AsyncAssertionType = iota
|
||||
AsyncAssertionTypeConsistently
|
||||
)
|
||||
|
||||
type AsyncAssertion struct {
|
||||
asyncType AsyncAssertionType
|
||||
actualInput interface{}
|
||||
timeoutInterval time.Duration
|
||||
pollingInterval time.Duration
|
||||
fail types.GomegaFailHandler
|
||||
offset int
|
||||
}
|
||||
|
||||
func New(asyncType AsyncAssertionType, actualInput interface{}, fail types.GomegaFailHandler, timeoutInterval time.Duration, pollingInterval time.Duration, offset int) *AsyncAssertion {
|
||||
actualType := reflect.TypeOf(actualInput)
|
||||
if actualType.Kind() == reflect.Func {
|
||||
if actualType.NumIn() != 0 || actualType.NumOut() == 0 {
|
||||
panic("Expected a function with no arguments and one or more return values.")
|
||||
}
|
||||
}
|
||||
|
||||
return &AsyncAssertion{
|
||||
asyncType: asyncType,
|
||||
actualInput: actualInput,
|
||||
fail: fail,
|
||||
timeoutInterval: timeoutInterval,
|
||||
pollingInterval: pollingInterval,
|
||||
offset: offset,
|
||||
}
|
||||
}
|
||||
|
||||
func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
|
||||
return assertion.match(matcher, true, optionalDescription...)
|
||||
}
|
||||
|
||||
func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
|
||||
return assertion.match(matcher, false, optionalDescription...)
|
||||
}
|
||||
|
||||
func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interface{}) string {
|
||||
switch len(optionalDescription) {
|
||||
case 0:
|
||||
return ""
|
||||
default:
|
||||
return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
|
||||
}
|
||||
}
|
||||
|
||||
func (assertion *AsyncAssertion) actualInputIsAFunction() bool {
|
||||
actualType := reflect.TypeOf(assertion.actualInput)
|
||||
return actualType.Kind() == reflect.Func && actualType.NumIn() == 0 && actualType.NumOut() > 0
|
||||
}
|
||||
|
||||
func (assertion *AsyncAssertion) pollActual() (interface{}, error) {
|
||||
if assertion.actualInputIsAFunction() {
|
||||
values := reflect.ValueOf(assertion.actualInput).Call([]reflect.Value{})
|
||||
|
||||
extras := []interface{}{}
|
||||
for _, value := range values[1:] {
|
||||
extras = append(extras, value.Interface())
|
||||
}
|
||||
|
||||
success, message := vetExtras(extras)
|
||||
|
||||
if !success {
|
||||
return nil, errors.New(message)
|
||||
}
|
||||
|
||||
return values[0].Interface(), nil
|
||||
}
|
||||
|
||||
return assertion.actualInput, nil
|
||||
}
|
||||
|
||||
type oracleMatcher interface {
|
||||
MatchMayChangeInTheFuture(actual interface{}) bool
|
||||
}
|
||||
|
||||
func (assertion *AsyncAssertion) matcherMayChange(matcher types.GomegaMatcher, value interface{}) bool {
|
||||
if assertion.actualInputIsAFunction() {
|
||||
return true
|
||||
}
|
||||
|
||||
oracleMatcher, ok := matcher.(oracleMatcher)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
return oracleMatcher.MatchMayChangeInTheFuture(value)
|
||||
}
|
||||
|
||||
func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
|
||||
timer := time.Now()
|
||||
timeout := time.After(assertion.timeoutInterval)
|
||||
|
||||
description := assertion.buildDescription(optionalDescription...)
|
||||
|
||||
var matches bool
|
||||
var err error
|
||||
mayChange := true
|
||||
value, err := assertion.pollActual()
|
||||
if err == nil {
|
||||
mayChange = assertion.matcherMayChange(matcher, value)
|
||||
matches, err = matcher.Match(value)
|
||||
}
|
||||
|
||||
fail := func(preamble string) {
|
||||
errMsg := ""
|
||||
message := ""
|
||||
if err != nil {
|
||||
errMsg = "Error: " + err.Error()
|
||||
} else {
|
||||
if desiredMatch {
|
||||
message = matcher.FailureMessage(value)
|
||||
} else {
|
||||
message = matcher.NegatedFailureMessage(value)
|
||||
}
|
||||
}
|
||||
assertion.fail(fmt.Sprintf("%s after %.3fs.\n%s%s%s", preamble, time.Since(timer).Seconds(), description, message, errMsg), 3+assertion.offset)
|
||||
}
|
||||
|
||||
if assertion.asyncType == AsyncAssertionTypeEventually {
|
||||
for {
|
||||
if err == nil && matches == desiredMatch {
|
||||
return true
|
||||
}
|
||||
|
||||
if !mayChange {
|
||||
fail("No future change is possible. Bailing out early")
|
||||
return false
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(assertion.pollingInterval):
|
||||
value, err = assertion.pollActual()
|
||||
if err == nil {
|
||||
mayChange = assertion.matcherMayChange(matcher, value)
|
||||
matches, err = matcher.Match(value)
|
||||
}
|
||||
case <-timeout:
|
||||
fail("Timed out")
|
||||
return false
|
||||
}
|
||||
}
|
||||
} else if assertion.asyncType == AsyncAssertionTypeConsistently {
|
||||
for {
|
||||
if !(err == nil && matches == desiredMatch) {
|
||||
fail("Failed")
|
||||
return false
|
||||
}
|
||||
|
||||
if !mayChange {
|
||||
return true
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(assertion.pollingInterval):
|
||||
value, err = assertion.pollActual()
|
||||
if err == nil {
|
||||
mayChange = assertion.matcherMayChange(matcher, value)
|
||||
matches, err = matcher.Match(value)
|
||||
}
|
||||
case <-timeout:
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func vetExtras(extras []interface{}) (bool, string) {
|
||||
for i, extra := range extras {
|
||||
if extra != nil {
|
||||
zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface()
|
||||
if !reflect.DeepEqual(zeroValue, extra) {
|
||||
message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra)
|
||||
return false, message
|
||||
}
|
||||
}
|
||||
}
|
||||
return true, ""
|
||||
}
|
40
vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go
generated
vendored
Normal file
40
vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
package testingtsupport
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
type gomegaTestingT interface {
|
||||
Errorf(format string, args ...interface{})
|
||||
}
|
||||
|
||||
func BuildTestingTGomegaFailHandler(t gomegaTestingT) types.GomegaFailHandler {
|
||||
return func(message string, callerSkip ...int) {
|
||||
skip := 1
|
||||
if len(callerSkip) > 0 {
|
||||
skip = callerSkip[0]
|
||||
}
|
||||
stackTrace := pruneStack(string(debug.Stack()), skip)
|
||||
t.Errorf("\n%s\n%s", stackTrace, message)
|
||||
}
|
||||
}
|
||||
|
||||
func pruneStack(fullStackTrace string, skip int) string {
|
||||
stack := strings.Split(fullStackTrace, "\n")
|
||||
if len(stack) > 2*(skip+1) {
|
||||
stack = stack[2*(skip+1):]
|
||||
}
|
||||
prunedStack := []string{}
|
||||
re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
|
||||
for i := 0; i < len(stack)/2; i++ {
|
||||
if !re.Match([]byte(stack[i*2])) {
|
||||
prunedStack = append(prunedStack, stack[i*2])
|
||||
prunedStack = append(prunedStack, stack[i*2+1])
|
||||
}
|
||||
}
|
||||
return strings.Join(prunedStack, "\n")
|
||||
}
|
345
vendor/github.com/onsi/gomega/matchers.go
generated
vendored
Normal file
345
vendor/github.com/onsi/gomega/matchers.go
generated
vendored
Normal file
|
@ -0,0 +1,345 @@
|
|||
package gomega
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/gomega/matchers"
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
//Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about
|
||||
//types when performing comparisons.
|
||||
//It is an error for both actual and expected to be nil. Use BeNil() instead.
|
||||
func Equal(expected interface{}) types.GomegaMatcher {
|
||||
return &matchers.EqualMatcher{
|
||||
Expected: expected,
|
||||
}
|
||||
}
|
||||
|
||||
//BeEquivalentTo is more lax than Equal, allowing equality between different types.
|
||||
//This is done by converting actual to have the type of expected before
|
||||
//attempting equality with reflect.DeepEqual.
|
||||
//It is an error for actual and expected to be nil. Use BeNil() instead.
|
||||
func BeEquivalentTo(expected interface{}) types.GomegaMatcher {
|
||||
return &matchers.BeEquivalentToMatcher{
|
||||
Expected: expected,
|
||||
}
|
||||
}
|
||||
|
||||
//BeNil succeeds if actual is nil
|
||||
func BeNil() types.GomegaMatcher {
|
||||
return &matchers.BeNilMatcher{}
|
||||
}
|
||||
|
||||
//BeTrue succeeds if actual is true
|
||||
func BeTrue() types.GomegaMatcher {
|
||||
return &matchers.BeTrueMatcher{}
|
||||
}
|
||||
|
||||
//BeFalse succeeds if actual is false
|
||||
func BeFalse() types.GomegaMatcher {
|
||||
return &matchers.BeFalseMatcher{}
|
||||
}
|
||||
|
||||
//HaveOccurred succeeds if actual is a non-nil error
|
||||
//The typical Go error checking pattern looks like:
|
||||
// err := SomethingThatMightFail()
|
||||
// Ω(err).ShouldNot(HaveOccurred())
|
||||
func HaveOccurred() types.GomegaMatcher {
|
||||
return &matchers.HaveOccurredMatcher{}
|
||||
}
|
||||
|
||||
//Succeed passes if actual is a nil error
|
||||
//Succeed is intended to be used with functions that return a single error value. Instead of
|
||||
// err := SomethingThatMightFail()
|
||||
// Ω(err).ShouldNot(HaveOccurred())
|
||||
//
|
||||
//You can write:
|
||||
// Ω(SomethingThatMightFail()).Should(Succeed())
|
||||
//
|
||||
//It is a mistake to use Succeed with a function that has multiple return values. Gomega's Ω and Expect
|
||||
//functions automatically trigger failure if any return values after the first return value are non-zero/non-nil.
|
||||
//This means that Ω(MultiReturnFunc()).ShouldNot(Succeed()) can never pass.
|
||||
func Succeed() types.GomegaMatcher {
|
||||
return &matchers.SucceedMatcher{}
|
||||
}
|
||||
|
||||
//MatchError succeeds if actual is a non-nil error that matches the passed in string/error.
|
||||
//
|
||||
//These are valid use-cases:
|
||||
// Ω(err).Should(MatchError("an error")) //asserts that err.Error() == "an error"
|
||||
// Ω(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual)
|
||||
//
|
||||
//It is an error for err to be nil or an object that does not implement the Error interface
|
||||
func MatchError(expected interface{}) types.GomegaMatcher {
|
||||
return &matchers.MatchErrorMatcher{
|
||||
Expected: expected,
|
||||
}
|
||||
}
|
||||
|
||||
//BeClosed succeeds if actual is a closed channel.
|
||||
//It is an error to pass a non-channel to BeClosed, it is also an error to pass nil
|
||||
//
|
||||
//In order to check whether or not the channel is closed, Gomega must try to read from the channel
|
||||
//(even in the `ShouldNot(BeClosed())` case). You should keep this in mind if you wish to make subsequent assertions about
|
||||
//values coming down the channel.
|
||||
//
|
||||
//Also, if you are testing that a *buffered* channel is closed you must first read all values out of the channel before
|
||||
//asserting that it is closed (it is not possible to detect that a buffered-channel has been closed until all its buffered values are read).
|
||||
//
|
||||
//Finally, as a corollary: it is an error to check whether or not a send-only channel is closed.
|
||||
func BeClosed() types.GomegaMatcher {
|
||||
return &matchers.BeClosedMatcher{}
|
||||
}
|
||||
|
||||
//Receive succeeds if there is a value to be received on actual.
|
||||
//Actual must be a channel (and cannot be a send-only channel) -- anything else is an error.
|
||||
//
|
||||
//Receive returns immediately and never blocks:
|
||||
//
|
||||
//- If there is nothing on the channel `c` then Ω(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass.
|
||||
//
|
||||
//- If the channel `c` is closed then Ω(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass.
|
||||
//
|
||||
//- If there is something on the channel `c` ready to be read, then Ω(c).Should(Receive()) will pass and Ω(c).ShouldNot(Receive()) will fail.
|
||||
//
|
||||
//If you have a go-routine running in the background that will write to channel `c` you can:
|
||||
// Eventually(c).Should(Receive())
|
||||
//
|
||||
//This will timeout if nothing gets sent to `c` (you can modify the timeout interval as you normally do with `Eventually`)
|
||||
//
|
||||
//A similar use-case is to assert that no go-routine writes to a channel (for a period of time). You can do this with `Consistently`:
|
||||
// Consistently(c).ShouldNot(Receive())
|
||||
//
|
||||
//You can pass `Receive` a matcher. If you do so, it will match the received object against the matcher. For example:
|
||||
// Ω(c).Should(Receive(Equal("foo")))
|
||||
//
|
||||
//When given a matcher, `Receive` will always fail if there is nothing to be received on the channel.
|
||||
//
|
||||
//Passing Receive a matcher is especially useful when paired with Eventually:
|
||||
//
|
||||
// Eventually(c).Should(Receive(ContainSubstring("bar")))
|
||||
//
|
||||
//will repeatedly attempt to pull values out of `c` until a value matching "bar" is received.
|
||||
//
|
||||
//Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type:
|
||||
// var myThing thing
|
||||
// Eventually(thingChan).Should(Receive(&myThing))
|
||||
// Ω(myThing.Sprocket).Should(Equal("foo"))
|
||||
// Ω(myThing.IsValid()).Should(BeTrue())
|
||||
func Receive(args ...interface{}) types.GomegaMatcher {
|
||||
var arg interface{}
|
||||
if len(args) > 0 {
|
||||
arg = args[0]
|
||||
}
|
||||
|
||||
return &matchers.ReceiveMatcher{
|
||||
Arg: arg,
|
||||
}
|
||||
}
|
||||
|
||||
//BeSent succeeds if a value can be sent to actual.
|
||||
//Actual must be a channel (and cannot be a receive-only channel) that can sent the type of the value passed into BeSent -- anything else is an error.
|
||||
//In addition, actual must not be closed.
|
||||
//
|
||||
//BeSent never blocks:
|
||||
//
|
||||
//- If the channel `c` is not ready to receive then Ω(c).Should(BeSent("foo")) will fail immediately
|
||||
//- If the channel `c` is eventually ready to receive then Eventually(c).Should(BeSent("foo")) will succeed.. presuming the channel becomes ready to receive before Eventually's timeout
|
||||
//- If the channel `c` is closed then Ω(c).Should(BeSent("foo")) and Ω(c).ShouldNot(BeSent("foo")) will both fail immediately
|
||||
//
|
||||
//Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with).
|
||||
//Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends.
|
||||
func BeSent(arg interface{}) types.GomegaMatcher {
|
||||
return &matchers.BeSentMatcher{
|
||||
Arg: arg,
|
||||
}
|
||||
}
|
||||
|
||||
//MatchRegexp succeeds if actual is a string or stringer that matches the
|
||||
//passed-in regexp. Optional arguments can be provided to construct a regexp
|
||||
//via fmt.Sprintf().
|
||||
func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher {
|
||||
return &matchers.MatchRegexpMatcher{
|
||||
Regexp: regexp,
|
||||
Args: args,
|
||||
}
|
||||
}
|
||||
|
||||
//ContainSubstring succeeds if actual is a string or stringer that contains the
|
||||
//passed-in regexp. Optional arguments can be provided to construct the substring
|
||||
//via fmt.Sprintf().
|
||||
func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher {
|
||||
return &matchers.ContainSubstringMatcher{
|
||||
Substr: substr,
|
||||
Args: args,
|
||||
}
|
||||
}
|
||||
|
||||
//HavePrefix succeeds if actual is a string or stringer that contains the
|
||||
//passed-in string as a prefix. Optional arguments can be provided to construct
|
||||
//via fmt.Sprintf().
|
||||
func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher {
|
||||
return &matchers.HavePrefixMatcher{
|
||||
Prefix: prefix,
|
||||
Args: args,
|
||||
}
|
||||
}
|
||||
|
||||
//HaveSuffix succeeds if actual is a string or stringer that contains the
|
||||
//passed-in string as a suffix. Optional arguments can be provided to construct
|
||||
//via fmt.Sprintf().
|
||||
func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher {
|
||||
return &matchers.HaveSuffixMatcher{
|
||||
Suffix: suffix,
|
||||
Args: args,
|
||||
}
|
||||
}
|
||||
|
||||
//MatchJSON succeeds if actual is a string or stringer of JSON that matches
|
||||
//the expected JSON. The JSONs are decoded and the resulting objects are compared via
|
||||
//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter.
|
||||
func MatchJSON(json interface{}) types.GomegaMatcher {
|
||||
return &matchers.MatchJSONMatcher{
|
||||
JSONToMatch: json,
|
||||
}
|
||||
}
|
||||
|
||||
//BeEmpty succeeds if actual is empty. Actual must be of type string, array, map, chan, or slice.
|
||||
func BeEmpty() types.GomegaMatcher {
|
||||
return &matchers.BeEmptyMatcher{}
|
||||
}
|
||||
|
||||
//HaveLen succeeds if actual has the passed-in length. Actual must be of type string, array, map, chan, or slice.
|
||||
func HaveLen(count int) types.GomegaMatcher {
|
||||
return &matchers.HaveLenMatcher{
|
||||
Count: count,
|
||||
}
|
||||
}
|
||||
|
||||
//BeZero succeeds if actual is the zero value for its type or if actual is nil.
|
||||
func BeZero() types.GomegaMatcher {
|
||||
return &matchers.BeZeroMatcher{}
|
||||
}
|
||||
|
||||
//ContainElement succeeds if actual contains the passed in element.
|
||||
//By default ContainElement() uses Equal() to perform the match, however a
|
||||
//matcher can be passed in instead:
|
||||
// Ω([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar")))
|
||||
//
|
||||
//Actual must be an array, slice or map.
|
||||
//For maps, ContainElement searches through the map's values.
|
||||
func ContainElement(element interface{}) types.GomegaMatcher {
|
||||
return &matchers.ContainElementMatcher{
|
||||
Element: element,
|
||||
}
|
||||
}
|
||||
|
||||
//ConsistOf succeeds if actual contains preciely the elements passed into the matcher. The ordering of the elements does not matter.
|
||||
//By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples:
|
||||
//
|
||||
// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo"))
|
||||
// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Bar"), "Foo"))
|
||||
// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Foo"), ContainSubstring("Foo")))
|
||||
//
|
||||
//Actual must be an array, slice or map. For maps, ConsistOf matches against the map's values.
|
||||
//
|
||||
//You typically pass variadic arguments to ConsistOf (as in the examples above). However, if you need to pass in a slice you can provided that it
|
||||
//is the only element passed in to ConsistOf:
|
||||
//
|
||||
// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"}))
|
||||
//
|
||||
//Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule.
|
||||
func ConsistOf(elements ...interface{}) types.GomegaMatcher {
|
||||
return &matchers.ConsistOfMatcher{
|
||||
Elements: elements,
|
||||
}
|
||||
}
|
||||
|
||||
//HaveKey succeeds if actual is a map with the passed in key.
|
||||
//By default HaveKey uses Equal() to perform the match, however a
|
||||
//matcher can be passed in instead:
|
||||
// Ω(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`)))
|
||||
func HaveKey(key interface{}) types.GomegaMatcher {
|
||||
return &matchers.HaveKeyMatcher{
|
||||
Key: key,
|
||||
}
|
||||
}
|
||||
|
||||
//HaveKeyWithValue succeeds if actual is a map with the passed in key and value.
|
||||
//By default HaveKeyWithValue uses Equal() to perform the match, however a
|
||||
//matcher can be passed in instead:
|
||||
// Ω(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar"))
|
||||
// Ω(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar"))
|
||||
func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher {
|
||||
return &matchers.HaveKeyWithValueMatcher{
|
||||
Key: key,
|
||||
Value: value,
|
||||
}
|
||||
}
|
||||
|
||||
//BeNumerically performs numerical assertions in a type-agnostic way.
|
||||
//Actual and expected should be numbers, though the specific type of
|
||||
//number is irrelevant (floa32, float64, uint8, etc...).
|
||||
//
|
||||
//There are six, self-explanatory, supported comparators:
|
||||
// Ω(1.0).Should(BeNumerically("==", 1))
|
||||
// Ω(1.0).Should(BeNumerically("~", 0.999, 0.01))
|
||||
// Ω(1.0).Should(BeNumerically(">", 0.9))
|
||||
// Ω(1.0).Should(BeNumerically(">=", 1.0))
|
||||
// Ω(1.0).Should(BeNumerically("<", 3))
|
||||
// Ω(1.0).Should(BeNumerically("<=", 1.0))
|
||||
func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatcher {
|
||||
return &matchers.BeNumericallyMatcher{
|
||||
Comparator: comparator,
|
||||
CompareTo: compareTo,
|
||||
}
|
||||
}
|
||||
|
||||
//BeTemporally compares time.Time's like BeNumerically
|
||||
//Actual and expected must be time.Time. The comparators are the same as for BeNumerically
|
||||
// Ω(time.Now()).Should(BeTemporally(">", time.Time{}))
|
||||
// Ω(time.Now()).Should(BeTemporally("~", time.Now(), time.Second))
|
||||
func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Duration) types.GomegaMatcher {
|
||||
return &matchers.BeTemporallyMatcher{
|
||||
Comparator: comparator,
|
||||
CompareTo: compareTo,
|
||||
Threshold: threshold,
|
||||
}
|
||||
}
|
||||
|
||||
//BeAssignableToTypeOf succeeds if actual is assignable to the type of expected.
|
||||
//It will return an error when one of the values is nil.
|
||||
// Ω(0).Should(BeAssignableToTypeOf(0)) // Same values
|
||||
// Ω(5).Should(BeAssignableToTypeOf(-1)) // different values same type
|
||||
// Ω("foo").Should(BeAssignableToTypeOf("bar")) // different values same type
|
||||
// Ω(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{}))
|
||||
func BeAssignableToTypeOf(expected interface{}) types.GomegaMatcher {
|
||||
return &matchers.AssignableToTypeOfMatcher{
|
||||
Expected: expected,
|
||||
}
|
||||
}
|
||||
|
||||
//Panic succeeds if actual is a function that, when invoked, panics.
|
||||
//Actual must be a function that takes no arguments and returns no results.
|
||||
func Panic() types.GomegaMatcher {
|
||||
return &matchers.PanicMatcher{}
|
||||
}
|
||||
|
||||
//BeAnExistingFile succeeds if a file exists.
|
||||
//Actual must be a string representing the abs path to the file being checked.
|
||||
func BeAnExistingFile() types.GomegaMatcher {
|
||||
return &matchers.BeAnExistingFileMatcher{}
|
||||
}
|
||||
|
||||
//BeARegularFile succeeds iff a file exists and is a regular file.
|
||||
//Actual must be a string representing the abs path to the file being checked.
|
||||
func BeARegularFile() types.GomegaMatcher {
|
||||
return &matchers.BeARegularFileMatcher{}
|
||||
}
|
||||
|
||||
//BeADirectory succeeds iff a file exists and is a directory.
|
||||
//Actual must be a string representing the abs path to the file being checked.
|
||||
func BeADirectory() types.GomegaMatcher {
|
||||
return &matchers.BeADirectoryMatcher{}
|
||||
}
|
31
vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go
generated
vendored
Normal file
31
vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type AssignableToTypeOfMatcher struct {
|
||||
Expected interface{}
|
||||
}
|
||||
|
||||
func (matcher *AssignableToTypeOfMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if actual == nil || matcher.Expected == nil {
|
||||
return false, fmt.Errorf("Refusing to compare <nil> to <nil>.\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
|
||||
}
|
||||
|
||||
actualType := reflect.TypeOf(actual)
|
||||
expectedType := reflect.TypeOf(matcher.Expected)
|
||||
|
||||
return actualType.AssignableTo(expectedType), nil
|
||||
}
|
||||
|
||||
func (matcher *AssignableToTypeOfMatcher) FailureMessage(actual interface{}) string {
|
||||
return format.Message(actual, fmt.Sprintf("to be assignable to the type: %T", matcher.Expected))
|
||||
}
|
||||
|
||||
func (matcher *AssignableToTypeOfMatcher) NegatedFailureMessage(actual interface{}) string {
|
||||
return format.Message(actual, fmt.Sprintf("not to be assignable to the type: %T", matcher.Expected))
|
||||
}
|
54
vendor/github.com/onsi/gomega/matchers/be_a_directory.go
generated
vendored
Normal file
54
vendor/github.com/onsi/gomega/matchers/be_a_directory.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type notADirectoryError struct {
|
||||
os.FileInfo
|
||||
}
|
||||
|
||||
func (t notADirectoryError) Error() string {
|
||||
fileInfo := os.FileInfo(t)
|
||||
switch {
|
||||
case fileInfo.Mode().IsRegular():
|
||||
return "file is a regular file"
|
||||
default:
|
||||
return fmt.Sprintf("file mode is: %s", fileInfo.Mode().String())
|
||||
}
|
||||
}
|
||||
|
||||
type BeADirectoryMatcher struct {
|
||||
expected interface{}
|
||||
err error
|
||||
}
|
||||
|
||||
func (matcher *BeADirectoryMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
actualFilename, ok := actual.(string)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("BeADirectoryMatcher matcher expects a file path")
|
||||
}
|
||||
|
||||
fileInfo, err := os.Stat(actualFilename)
|
||||
if err != nil {
|
||||
matcher.err = err
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if !fileInfo.Mode().IsDir() {
|
||||
matcher.err = notADirectoryError{fileInfo}
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (matcher *BeADirectoryMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, fmt.Sprintf("to be a directory: %s", matcher.err))
|
||||
}
|
||||
|
||||
func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, fmt.Sprintf("not be a directory"))
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue