mirror of
https://github.com/Luzifer/vault-openvpn.git
synced 2024-12-25 06:11:21 +00:00
Update dependencies
Signed-off-by: Knut Ahlers <knut@ahlers.me>
This commit is contained in:
parent
b4cc42902e
commit
4d62cd0789
2163 changed files with 658080 additions and 50809 deletions
77
Gopkg.lock
generated
77
Gopkg.lock
generated
|
@ -4,18 +4,26 @@
|
|||
[[projects]]
|
||||
name = "github.com/Luzifer/rconfig"
|
||||
packages = ["."]
|
||||
revision = "c27bd3a64b5b19556914d9fec69922cf3852d585"
|
||||
version = "v1.1.0"
|
||||
revision = "7aef1d393c1e2d0758901853b59981c7adc67c7e"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Sirupsen/logrus"
|
||||
packages = ["."]
|
||||
revision = "3ec0642a7fb6488f65b06f9040adc67e3990296a"
|
||||
revision = "f006c2ac4710855cf0f916dd6b77acf6b048dc6e"
|
||||
version = "v1.0.3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/fatih/structs"
|
||||
packages = ["."]
|
||||
revision = "3fe2facc32a7fbde4b29c0f85604dc1dd22836d2"
|
||||
revision = "a720dfa8df582c51dee1b36feabb906bde1588bd"
|
||||
version = "v1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/snappy"
|
||||
packages = ["."]
|
||||
revision = "553a641470496b2327abcac10b36396bd98e45c9"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
|
@ -24,14 +32,16 @@
|
|||
revision = "7554cd9344cec97297fa6649b055a8c98c2a1e55"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/go-cleanhttp"
|
||||
packages = ["."]
|
||||
revision = "ad28ea4487f05916463e2423a55166280e8254b5"
|
||||
revision = "3573b8b52aa7b37b9358d966a898feb387f62437"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/go-multierror"
|
||||
packages = ["."]
|
||||
revision = "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5"
|
||||
revision = "83588e72410abfbe4df460eeb6f30841ae47d4c4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
|
@ -40,59 +50,88 @@
|
|||
revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/hcl"
|
||||
packages = [".","hcl/ast","hcl/parser","hcl/scanner","hcl/strconv","hcl/token","json/parser","json/scanner","json/token"]
|
||||
revision = "ef8133da8cda503718a74741312bf50821e6de79"
|
||||
revision = "42e33e2d55a0ff1d6263f738896ea8c13571a8d0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/hashicorp/vault"
|
||||
packages = ["api","helper/certutil","helper/compressutil","helper/errutil","helper/jsonutil"]
|
||||
revision = "4490e93395fb70c3a25ade1fe88f363561a7d584"
|
||||
packages = ["api","helper/certutil","helper/compressutil","helper/errutil","helper/jsonutil","helper/parseutil"]
|
||||
revision = "6b29fb2b7f70ed538ee2b3c057335d706b6d4e36"
|
||||
version = "v0.8.3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/mattn/go-runewidth"
|
||||
packages = ["."]
|
||||
revision = "14207d285c6c197daabb5c9793d63e7af9ab2d50"
|
||||
revision = "9e777a8366cce605130a531d2cd6363d07ad7317"
|
||||
version = "v0.0.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/mitchellh/go-homedir"
|
||||
packages = ["."]
|
||||
revision = "981ab348d865cf048eb7d17e78ac7192632d8415"
|
||||
revision = "b8bc1bf767474819792c23f32d8286a45736f1c6"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/mitchellh/mapstructure"
|
||||
packages = ["."]
|
||||
revision = "ca63d7c062ee3c9f34db231e352b60012b4fd0c1"
|
||||
revision = "d0303fe809921458f417bcf828397a65db30a7e4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/olekukonko/tablewriter"
|
||||
packages = ["."]
|
||||
revision = "febf2d34b54a69ce7530036c7503b1c9fbfdf0bb"
|
||||
revision = "a7a4c189eb47ed33ce7b35f2880070a0c82a67d4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/sethgrid/pester"
|
||||
packages = ["."]
|
||||
revision = "4f4c0a67b6496764028e1ab9fd8dfb630282ed2f"
|
||||
revision = "0af5bab1e1ea2860c5aef8e77427bab011d774d8"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
revision = "c7e63cf4530bcd3ba943729cee0efeff2ebea63f"
|
||||
revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ssh/terminal"]
|
||||
revision = "9419663f5a44be8b34ca85f08abc5fe1be11f8a3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = ["http2","http2/hpack","idna","lex/httplex"]
|
||||
revision = "f09c4662a0bd6bd8943ac7b4931e185df9471da4"
|
||||
revision = "a04bdaca5b32abe1c069418fb7088ae607de5bd0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix"]
|
||||
revision = "8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9"
|
||||
packages = ["unix","windows"]
|
||||
revision = "ebfc5b4631820b793c9010c87fd8fef0f39eb082"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/text"
|
||||
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
|
||||
revision = "825fc78a2fd6fa0a5447e300189e3219e05e1f25"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
name = "gopkg.in/validator.v2"
|
||||
packages = ["."]
|
||||
revision = "460c83432a98c35224a6fe352acf8b23e067ad06"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
revision = "31c299268d302dd0aa9a0dcf765a3d58971ac83f"
|
||||
revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
|
|
4
vendor/github.com/Luzifer/rconfig/.travis.yml
generated
vendored
4
vendor/github.com/Luzifer/rconfig/.travis.yml
generated
vendored
|
@ -1,8 +1,8 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- 1.7
|
||||
- tip
|
||||
|
||||
script: go test -v -race -cover ./...
|
||||
|
|
4
vendor/github.com/Luzifer/rconfig/History.md
generated
vendored
4
vendor/github.com/Luzifer/rconfig/History.md
generated
vendored
|
@ -1,3 +1,7 @@
|
|||
# 1.2.0 / 2017-06-19
|
||||
|
||||
* Add ParseAndValidate method
|
||||
|
||||
# 1.1.0 / 2016-06-28
|
||||
|
||||
* Support time.Duration config parameters
|
||||
|
|
47
vendor/github.com/Luzifer/rconfig/README.md
generated
vendored
47
vendor/github.com/Luzifer/rconfig/README.md
generated
vendored
|
@ -29,34 +29,31 @@ go test -v -race -cover github.com/Luzifer/rconfig
|
|||
|
||||
## Usage
|
||||
|
||||
As a first step define a struct holding your configuration:
|
||||
A very simple usecase is to just configure a struct inside the vars section of your `main.go` and to parse the commandline flags from the `main()` function:
|
||||
|
||||
```go
|
||||
type config struct {
|
||||
Username string `default:"unknown" flag:"user" description:"Your name"`
|
||||
Details struct {
|
||||
Age int `default:"25" flag:"age" env:"age" description:"Your age"`
|
||||
}
|
||||
}
|
||||
```
|
||||
package main
|
||||
|
||||
Next create an instance of that struct and let `rconfig` fill that config:
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/Luzifer/rconfig"
|
||||
)
|
||||
|
||||
```go
|
||||
var cfg config
|
||||
func init() {
|
||||
cfg = config{}
|
||||
rconfig.Parse(&cfg)
|
||||
}
|
||||
```
|
||||
var (
|
||||
cfg = struct {
|
||||
Username string `default:"unknown" flag:"user" description:"Your name"`
|
||||
Details struct {
|
||||
Age int `default:"25" flag:"age" env:"age" description:"Your age"`
|
||||
}
|
||||
}{}
|
||||
)
|
||||
|
||||
You're ready to access your configuration:
|
||||
|
||||
```go
|
||||
func main() {
|
||||
rconfig.Parse(&cfg)
|
||||
|
||||
fmt.Printf("Hello %s, happy birthday for your %dth birthday.",
|
||||
cfg.Username,
|
||||
cfg.Details.Age)
|
||||
cfg.Username,
|
||||
cfg.Details.Age)
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -72,18 +69,14 @@ The order of the directives (lower number = higher precedence):
|
|||
1. `default` tag in the struct
|
||||
|
||||
```go
|
||||
type config struct {
|
||||
var cfg = struct {
|
||||
Username string `vardefault:"username" flag:"username" description:"Your username"`
|
||||
}
|
||||
|
||||
var cfg = config{}
|
||||
|
||||
func init() {
|
||||
func main() {
|
||||
rconfig.SetVariableDefaults(rconfig.VarDefaultsFromYAMLFile("~/.myapp.yml"))
|
||||
rconfig.Parse(&cfg)
|
||||
}
|
||||
|
||||
func main() {
|
||||
fmt.Printf("Username = %s", cfg.Username)
|
||||
// Output: Username = luzifer
|
||||
}
|
||||
|
|
18
vendor/github.com/Luzifer/rconfig/config.go
generated
vendored
18
vendor/github.com/Luzifer/rconfig/config.go
generated
vendored
|
@ -13,6 +13,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
validator "gopkg.in/validator.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -45,6 +46,15 @@ func Parse(config interface{}) error {
|
|||
return parse(config, nil)
|
||||
}
|
||||
|
||||
// ParseAndValidate works exactly like Parse but implements an additional run of
|
||||
// the go-validator package on the configuration struct. Therefore additonal struct
|
||||
// tags are supported like described in the readme file of the go-validator package:
|
||||
//
|
||||
// https://github.com/go-validator/validator/tree/v2#usage
|
||||
func ParseAndValidate(config interface{}) error {
|
||||
return parseAndValidate(config, nil)
|
||||
}
|
||||
|
||||
// Args returns the non-flag command-line arguments.
|
||||
func Args() []string {
|
||||
return fs.Args()
|
||||
|
@ -65,6 +75,14 @@ func SetVariableDefaults(defaults map[string]string) {
|
|||
variableDefaults = defaults
|
||||
}
|
||||
|
||||
func parseAndValidate(in interface{}, args []string) error {
|
||||
if err := parse(in, args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return validator.Validate(in)
|
||||
}
|
||||
|
||||
func parse(in interface{}, args []string) error {
|
||||
if args == nil {
|
||||
args = os.Args
|
||||
|
|
19
vendor/github.com/Luzifer/rconfig/general_test.go
generated
vendored
19
vendor/github.com/Luzifer/rconfig/general_test.go
generated
vendored
|
@ -15,6 +15,10 @@ var _ = Describe("Testing general parsing", func() {
|
|||
SadFlag string
|
||||
}
|
||||
|
||||
type tValidated struct {
|
||||
Test string `flag:"test" default:"" validate:"nonzero"`
|
||||
}
|
||||
|
||||
var (
|
||||
err error
|
||||
args []string
|
||||
|
@ -106,4 +110,19 @@ var _ = Describe("Testing general parsing", func() {
|
|||
})
|
||||
})
|
||||
|
||||
Context("making use of the validator package", func() {
|
||||
var cfgValidated tValidated
|
||||
|
||||
BeforeEach(func() {
|
||||
cfgValidated = tValidated{}
|
||||
args = []string{}
|
||||
})
|
||||
|
||||
JustBeforeEach(func() {
|
||||
err = parseAndValidate(&cfgValidated, args)
|
||||
})
|
||||
|
||||
It("should have errored", func() { Expect(err).To(HaveOccurred()) })
|
||||
})
|
||||
|
||||
})
|
||||
|
|
17
vendor/github.com/Sirupsen/logrus/.travis.yml
generated
vendored
17
vendor/github.com/Sirupsen/logrus/.travis.yml
generated
vendored
|
@ -1,10 +1,15 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- tip
|
||||
env:
|
||||
- GOMAXPROCS=4 GORACE=halt_on_error=1
|
||||
install:
|
||||
- go get -t ./...
|
||||
script: GOMAXPROCS=4 GORACE="halt_on_error=1" go test -race -v ./...
|
||||
- go get github.com/stretchr/testify/assert
|
||||
- go get gopkg.in/gemnasium/logrus-airbrake-hook.v2
|
||||
- go get golang.org/x/sys/unix
|
||||
- go get golang.org/x/sys/windows
|
||||
script:
|
||||
- go test -race -v ./...
|
||||
|
|
47
vendor/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
47
vendor/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
|
@ -1,3 +1,50 @@
|
|||
# 1.0.3
|
||||
|
||||
* Replace example files with testable examples
|
||||
|
||||
# 1.0.2
|
||||
|
||||
* bug: quote non-string values in text formatter (#583)
|
||||
* Make (*Logger) SetLevel a public method
|
||||
|
||||
# 1.0.1
|
||||
|
||||
* bug: fix escaping in text formatter (#575)
|
||||
|
||||
# 1.0.0
|
||||
|
||||
* Officially changed name to lower-case
|
||||
* bug: colors on Windows 10 (#541)
|
||||
* bug: fix race in accessing level (#512)
|
||||
|
||||
# 0.11.5
|
||||
|
||||
* feature: add writer and writerlevel to entry (#372)
|
||||
|
||||
# 0.11.4
|
||||
|
||||
* bug: fix undefined variable on solaris (#493)
|
||||
|
||||
# 0.11.3
|
||||
|
||||
* formatter: configure quoting of empty values (#484)
|
||||
* formatter: configure quoting character (default is `"`) (#484)
|
||||
* bug: fix not importing io correctly in non-linux environments (#481)
|
||||
|
||||
# 0.11.2
|
||||
|
||||
* bug: fix windows terminal detection (#476)
|
||||
|
||||
# 0.11.1
|
||||
|
||||
* bug: fix tty detection with custom out (#471)
|
||||
|
||||
# 0.11.0
|
||||
|
||||
* performance: Use bufferpool to allocate (#370)
|
||||
* terminal: terminal detection for app-engine (#343)
|
||||
* feature: exit handler (#375)
|
||||
|
||||
# 0.10.0
|
||||
|
||||
* feature: Add a test hook (#180)
|
||||
|
|
190
vendor/github.com/Sirupsen/logrus/README.md
generated
vendored
190
vendor/github.com/Sirupsen/logrus/README.md
generated
vendored
|
@ -1,11 +1,24 @@
|
|||
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus)
|
||||
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus)
|
||||
|
||||
Logrus is a structured logger for Go (golang), completely API compatible with
|
||||
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
|
||||
yet stable (pre 1.0). Logrus itself is completely stable and has been used in
|
||||
many large deployments. The core API is unlikely to change much but please
|
||||
version control your Logrus to make sure you aren't fetching latest `master` on
|
||||
every build.**
|
||||
the standard library logger.
|
||||
|
||||
**Seeing weird case-sensitive problems?** It's in the past been possible to
|
||||
import Logrus as both upper- and lower-case. Due to the Go package environment,
|
||||
this caused issues in the community and we needed a standard. Some environments
|
||||
experienced problems with the upper-case variant, so the lower-case was decided.
|
||||
Everything using `logrus` will need to use the lower-case:
|
||||
`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
|
||||
|
||||
To fix Glide, see [these
|
||||
comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
|
||||
For an in-depth explanation of the casing issue, see [this
|
||||
comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276).
|
||||
|
||||
**Are you interested in assisting in maintaining Logrus?** Currently I have a
|
||||
lot of obligations, and I am unable to provide Logrus with the maintainership it
|
||||
needs. If you'd like to help, please reach out to me at `simon at author's
|
||||
username dot com`.
|
||||
|
||||
Nicely color-coded in development (when a TTY is attached, otherwise just
|
||||
plain text):
|
||||
|
@ -46,6 +59,12 @@ time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x20822
|
|||
exit status 1
|
||||
```
|
||||
|
||||
#### Case-sensitivity
|
||||
|
||||
The organization's name was changed to lower-case--and this will not be changed
|
||||
back. If you are getting import conflicts due to case sensitivity, please use
|
||||
the lower-case import: `github.com/sirupsen/logrus`.
|
||||
|
||||
#### Example
|
||||
|
||||
The simplest way to use Logrus is simply the package-level exported logger:
|
||||
|
@ -54,7 +73,7 @@ The simplest way to use Logrus is simply the package-level exported logger:
|
|||
package main
|
||||
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -65,7 +84,7 @@ func main() {
|
|||
```
|
||||
|
||||
Note that it's completely api-compatible with the stdlib logger, so you can
|
||||
replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
|
||||
replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
|
||||
and you'll now have the flexibility of Logrus. You can customize it all you
|
||||
want:
|
||||
|
||||
|
@ -74,15 +93,16 @@ package main
|
|||
|
||||
import (
|
||||
"os"
|
||||
log "github.com/Sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Log as JSON instead of the default ASCII formatter.
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
|
||||
// Output to stderr instead of stdout, could also be a file.
|
||||
log.SetOutput(os.Stderr)
|
||||
// Output to stdout instead of the default stderr
|
||||
// Can be any io.Writer, see below for File example
|
||||
log.SetOutput(os.Stdout)
|
||||
|
||||
// Only log the warning severity or above.
|
||||
log.SetLevel(log.WarnLevel)
|
||||
|
@ -123,7 +143,8 @@ application, you can also create an instance of the `logrus` Logger:
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"os"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Create a new instance of the logger. You can have any number of instances.
|
||||
|
@ -132,7 +153,15 @@ var log = logrus.New()
|
|||
func main() {
|
||||
// The API for setting attributes is a little different than the package level
|
||||
// exported logger. See Godoc.
|
||||
log.Out = os.Stderr
|
||||
log.Out = os.Stdout
|
||||
|
||||
// You could set this to any `io.Writer` such as a file
|
||||
// file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
|
||||
// if err == nil {
|
||||
// log.Out = file
|
||||
// } else {
|
||||
// log.Info("Failed to log to file, using default stderr")
|
||||
// }
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
|
@ -143,7 +172,7 @@ func main() {
|
|||
|
||||
#### Fields
|
||||
|
||||
Logrus encourages careful, structured logging though logging fields instead of
|
||||
Logrus encourages careful, structured logging through logging fields instead of
|
||||
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
|
||||
to send event %s to topic %s with key %d")`, you should log the much more
|
||||
discoverable:
|
||||
|
@ -165,6 +194,20 @@ In general, with Logrus using any of the `printf`-family functions should be
|
|||
seen as a hint you should add a field, however, you can still use the
|
||||
`printf`-family functions with Logrus.
|
||||
|
||||
#### Default Fields
|
||||
|
||||
Often it's helpful to have fields _always_ attached to log statements in an
|
||||
application or parts of one. For example, you may want to always log the
|
||||
`request_id` and `user_ip` in the context of a request. Instead of writing
|
||||
`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
|
||||
every line, you can create a `logrus.Entry` to pass around instead:
|
||||
|
||||
```go
|
||||
requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
|
||||
requestLogger.Info("something happened on that request") # will log request_id and user_ip
|
||||
requestLogger.Warn("something not great happened")
|
||||
```
|
||||
|
||||
#### Hooks
|
||||
|
||||
You can add hooks for logging levels. For example to send errors to an exception
|
||||
|
@ -176,9 +219,9 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
|||
|
||||
```go
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
|
||||
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
|
||||
logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
|
||||
"log/syslog"
|
||||
)
|
||||
|
||||
|
@ -200,37 +243,52 @@ Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/v
|
|||
|
||||
| Hook | Description |
|
||||
| ----- | ----------- |
|
||||
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
|
||||
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
|
||||
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
|
||||
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
|
||||
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
|
||||
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
|
||||
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
|
||||
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
|
||||
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
|
||||
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
|
||||
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
|
||||
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
|
||||
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
|
||||
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
|
||||
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
|
||||
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
|
||||
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
|
||||
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
|
||||
| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
|
||||
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
|
||||
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
|
||||
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
|
||||
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
|
||||
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
|
||||
| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) |
|
||||
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
|
||||
| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
|
||||
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
|
||||
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
|
||||
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
|
||||
| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) |
|
||||
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
|
||||
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
|
||||
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
|
||||
| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/)
|
||||
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
|
||||
| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) |
|
||||
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
|
||||
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
|
||||
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
|
||||
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
|
||||
| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) |
|
||||
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
|
||||
| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
|
||||
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
|
||||
| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) |
|
||||
| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
|
||||
| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
|
||||
|
||||
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
|
||||
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
|
||||
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
|
||||
| [Mattermost](https://github.com/shuLhan/mattermost-integration/tree/master/hooks/logrus) | Hook for logging to [Mattermost](https://mattermost.com/) |
|
||||
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
|
||||
| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) |
|
||||
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
|
||||
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
|
||||
| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
|
||||
| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
|
||||
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
|
||||
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
|
||||
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
|
||||
| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
|
||||
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
|
||||
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
|
||||
| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) |
|
||||
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
|
||||
| [Syslog](https://github.com/sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
|
||||
| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. |
|
||||
| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) |
|
||||
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
|
||||
| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
|
||||
| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) |
|
||||
|
||||
#### Level logging
|
||||
|
||||
|
@ -279,7 +337,7 @@ could do:
|
|||
|
||||
```go
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
init() {
|
||||
|
@ -306,11 +364,15 @@ The built-in logging formatters are:
|
|||
without colors.
|
||||
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
|
||||
field to `true`. To force no colored output even if there is a TTY set the
|
||||
`DisableColors` field to `true`
|
||||
`DisableColors` field to `true`. For Windows, see
|
||||
[github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
|
||||
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
|
||||
* `logrus.JSONFormatter`. Logs fields as JSON.
|
||||
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
|
||||
|
||||
Third party logging formatters:
|
||||
|
||||
* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can by parsed by Kubernetes and Google Container Engine.
|
||||
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
|
||||
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
|
||||
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
|
||||
|
@ -356,6 +418,18 @@ srv := http.Server{
|
|||
Each line written to that writer will be printed the usual way, using formatters
|
||||
and hooks. The level for those entries is `info`.
|
||||
|
||||
This means that we can override the standard library logger easily:
|
||||
|
||||
```go
|
||||
logger := logrus.New()
|
||||
logger.Formatter = &logrus.JSONFormatter{}
|
||||
|
||||
// Use logrus for standard log output
|
||||
// Note that `log` here references stdlib's log
|
||||
// Not logrus imported under the name `log`.
|
||||
log.SetOutput(logger.Writer())
|
||||
```
|
||||
|
||||
#### Rotation
|
||||
|
||||
Log rotation is not provided with Logrus. Log rotation should be done by an
|
||||
|
@ -367,6 +441,7 @@ entries. It should not be a feature of the application-level logger.
|
|||
| Tool | Description |
|
||||
| ---- | ----------- |
|
||||
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
|
||||
|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
|
||||
|
||||
#### Testing
|
||||
|
||||
|
@ -376,15 +451,24 @@ Logrus has a built in facility for asserting the presence of log messages. This
|
|||
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
|
||||
|
||||
```go
|
||||
logger, hook := NewNullLogger()
|
||||
logger.Error("Hello error")
|
||||
import(
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
assert.Equal(1, len(hook.Entries))
|
||||
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
|
||||
assert.Equal("Hello error", hook.LastEntry().Message)
|
||||
func TestSomething(t*testing.T){
|
||||
logger, hook := test.NewNullLogger()
|
||||
logger.Error("Helloerror")
|
||||
|
||||
hook.Reset()
|
||||
assert.Nil(hook.LastEntry())
|
||||
assert.Equal(t, 1, len(hook.Entries))
|
||||
assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
|
||||
assert.Equal(t, "Helloerror", hook.LastEntry().Message)
|
||||
|
||||
hook.Reset()
|
||||
assert.Nil(t, hook.LastEntry())
|
||||
}
|
||||
```
|
||||
|
||||
#### Fatal handlers
|
||||
|
@ -403,7 +487,7 @@ logrus.RegisterExitHandler(handler)
|
|||
...
|
||||
```
|
||||
|
||||
#### Thread safty
|
||||
#### Thread safety
|
||||
|
||||
By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
|
||||
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
|
||||
|
|
2
vendor/github.com/Sirupsen/logrus/alt_exit.go
generated
vendored
2
vendor/github.com/Sirupsen/logrus/alt_exit.go
generated
vendored
|
@ -1,7 +1,7 @@
|
|||
package logrus
|
||||
|
||||
// The following code was sourced and modified from the
|
||||
// https://bitbucket.org/tebeka/atexit package governed by the following license:
|
||||
// https://github.com/tebeka/atexit package governed by the following license:
|
||||
//
|
||||
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
|
||||
//
|
||||
|
|
25
vendor/github.com/Sirupsen/logrus/alt_exit_test.go
generated
vendored
25
vendor/github.com/Sirupsen/logrus/alt_exit_test.go
generated
vendored
|
@ -2,7 +2,10 @@ package logrus
|
|||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
@ -11,30 +14,36 @@ func TestRegister(t *testing.T) {
|
|||
current := len(handlers)
|
||||
RegisterExitHandler(func() {})
|
||||
if len(handlers) != current+1 {
|
||||
t.Fatalf("can't add handler")
|
||||
t.Fatalf("expected %d handlers, got %d", current+1, len(handlers))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandler(t *testing.T) {
|
||||
gofile := "/tmp/testprog.go"
|
||||
tempDir, err := ioutil.TempDir("", "test_handler")
|
||||
if err != nil {
|
||||
log.Fatalf("can't create temp dir. %q", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
gofile := filepath.Join(tempDir, "gofile.go")
|
||||
if err := ioutil.WriteFile(gofile, testprog, 0666); err != nil {
|
||||
t.Fatalf("can't create go file")
|
||||
t.Fatalf("can't create go file. %q", err)
|
||||
}
|
||||
|
||||
outfile := "/tmp/testprog.out"
|
||||
outfile := filepath.Join(tempDir, "outfile.out")
|
||||
arg := time.Now().UTC().String()
|
||||
err := exec.Command("go", "run", gofile, outfile, arg).Run()
|
||||
err = exec.Command("go", "run", gofile, outfile, arg).Run()
|
||||
if err == nil {
|
||||
t.Fatalf("completed normally, should have failed")
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadFile(outfile)
|
||||
if err != nil {
|
||||
t.Fatalf("can't read output file %s", outfile)
|
||||
t.Fatalf("can't read output file %s. %q", outfile, err)
|
||||
}
|
||||
|
||||
if string(data) != arg {
|
||||
t.Fatalf("bad data")
|
||||
t.Fatalf("bad data. Expected %q, got %q", data, arg)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -44,7 +53,7 @@ var testprog = []byte(`
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
|
14
vendor/github.com/Sirupsen/logrus/appveyor.yml
generated
vendored
Normal file
14
vendor/github.com/Sirupsen/logrus/appveyor.yml
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
version: "{build}"
|
||||
platform: x64
|
||||
clone_folder: c:\gopath\src\github.com\sirupsen\logrus
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
install:
|
||||
- set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
|
||||
- go version
|
||||
build_script:
|
||||
- go get -t
|
||||
- go test
|
4
vendor/github.com/Sirupsen/logrus/doc.go
generated
vendored
4
vendor/github.com/Sirupsen/logrus/doc.go
generated
vendored
|
@ -7,7 +7,7 @@ The simplest way to use Logrus is simply the package-level exported logger:
|
|||
package main
|
||||
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -21,6 +21,6 @@ The simplest way to use Logrus is simply the package-level exported logger:
|
|||
Output:
|
||||
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
|
||||
|
||||
For a full guide visit https://github.com/Sirupsen/logrus
|
||||
For a full guide visit https://github.com/sirupsen/logrus
|
||||
*/
|
||||
package logrus
|
||||
|
|
37
vendor/github.com/Sirupsen/logrus/entry.go
generated
vendored
37
vendor/github.com/Sirupsen/logrus/entry.go
generated
vendored
|
@ -35,6 +35,7 @@ type Entry struct {
|
|||
Time time.Time
|
||||
|
||||
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
|
||||
// This field will be set on entry firing and the value will be equal to the one in Logger struct field.
|
||||
Level Level
|
||||
|
||||
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
|
||||
|
@ -126,7 +127,7 @@ func (entry Entry) log(level Level, msg string) {
|
|||
}
|
||||
|
||||
func (entry *Entry) Debug(args ...interface{}) {
|
||||
if entry.Logger.Level >= DebugLevel {
|
||||
if entry.Logger.level() >= DebugLevel {
|
||||
entry.log(DebugLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
@ -136,13 +137,13 @@ func (entry *Entry) Print(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (entry *Entry) Info(args ...interface{}) {
|
||||
if entry.Logger.Level >= InfoLevel {
|
||||
if entry.Logger.level() >= InfoLevel {
|
||||
entry.log(InfoLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warn(args ...interface{}) {
|
||||
if entry.Logger.Level >= WarnLevel {
|
||||
if entry.Logger.level() >= WarnLevel {
|
||||
entry.log(WarnLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
@ -152,20 +153,20 @@ func (entry *Entry) Warning(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (entry *Entry) Error(args ...interface{}) {
|
||||
if entry.Logger.Level >= ErrorLevel {
|
||||
if entry.Logger.level() >= ErrorLevel {
|
||||
entry.log(ErrorLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatal(args ...interface{}) {
|
||||
if entry.Logger.Level >= FatalLevel {
|
||||
if entry.Logger.level() >= FatalLevel {
|
||||
entry.log(FatalLevel, fmt.Sprint(args...))
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panic(args ...interface{}) {
|
||||
if entry.Logger.Level >= PanicLevel {
|
||||
if entry.Logger.level() >= PanicLevel {
|
||||
entry.log(PanicLevel, fmt.Sprint(args...))
|
||||
}
|
||||
panic(fmt.Sprint(args...))
|
||||
|
@ -174,13 +175,13 @@ func (entry *Entry) Panic(args ...interface{}) {
|
|||
// Entry Printf family functions
|
||||
|
||||
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= DebugLevel {
|
||||
if entry.Logger.level() >= DebugLevel {
|
||||
entry.Debug(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Infof(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= InfoLevel {
|
||||
if entry.Logger.level() >= InfoLevel {
|
||||
entry.Info(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
@ -190,7 +191,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) {
|
|||
}
|
||||
|
||||
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= WarnLevel {
|
||||
if entry.Logger.level() >= WarnLevel {
|
||||
entry.Warn(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
@ -200,20 +201,20 @@ func (entry *Entry) Warningf(format string, args ...interface{}) {
|
|||
}
|
||||
|
||||
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= ErrorLevel {
|
||||
if entry.Logger.level() >= ErrorLevel {
|
||||
entry.Error(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= FatalLevel {
|
||||
if entry.Logger.level() >= FatalLevel {
|
||||
entry.Fatal(fmt.Sprintf(format, args...))
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= PanicLevel {
|
||||
if entry.Logger.level() >= PanicLevel {
|
||||
entry.Panic(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
@ -221,13 +222,13 @@ func (entry *Entry) Panicf(format string, args ...interface{}) {
|
|||
// Entry Println family functions
|
||||
|
||||
func (entry *Entry) Debugln(args ...interface{}) {
|
||||
if entry.Logger.Level >= DebugLevel {
|
||||
if entry.Logger.level() >= DebugLevel {
|
||||
entry.Debug(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Infoln(args ...interface{}) {
|
||||
if entry.Logger.Level >= InfoLevel {
|
||||
if entry.Logger.level() >= InfoLevel {
|
||||
entry.Info(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
@ -237,7 +238,7 @@ func (entry *Entry) Println(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (entry *Entry) Warnln(args ...interface{}) {
|
||||
if entry.Logger.Level >= WarnLevel {
|
||||
if entry.Logger.level() >= WarnLevel {
|
||||
entry.Warn(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
@ -247,20 +248,20 @@ func (entry *Entry) Warningln(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (entry *Entry) Errorln(args ...interface{}) {
|
||||
if entry.Logger.Level >= ErrorLevel {
|
||||
if entry.Logger.level() >= ErrorLevel {
|
||||
entry.Error(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalln(args ...interface{}) {
|
||||
if entry.Logger.Level >= FatalLevel {
|
||||
if entry.Logger.level() >= FatalLevel {
|
||||
entry.Fatal(entry.sprintlnn(args...))
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicln(args ...interface{}) {
|
||||
if entry.Logger.Level >= PanicLevel {
|
||||
if entry.Logger.level() >= PanicLevel {
|
||||
entry.Panic(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
|
69
vendor/github.com/Sirupsen/logrus/example_basic_test.go
generated
vendored
Normal file
69
vendor/github.com/Sirupsen/logrus/example_basic_test.go
generated
vendored
Normal file
|
@ -0,0 +1,69 @@
|
|||
package logrus_test
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"os"
|
||||
)
|
||||
|
||||
func Example_basic() {
|
||||
var log = logrus.New()
|
||||
log.Formatter = new(logrus.JSONFormatter)
|
||||
log.Formatter = new(logrus.TextFormatter) //default
|
||||
log.Formatter.(*logrus.TextFormatter).DisableTimestamp = true // remove timestamp from test output
|
||||
log.Level = logrus.DebugLevel
|
||||
log.Out = os.Stdout
|
||||
|
||||
// file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
|
||||
// if err == nil {
|
||||
// log.Out = file
|
||||
// } else {
|
||||
// log.Info("Failed to log to file, using default stderr")
|
||||
// }
|
||||
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err != nil {
|
||||
entry := err.(*logrus.Entry)
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"err_animal": entry.Data["animal"],
|
||||
"err_size": entry.Data["size"],
|
||||
"err_level": entry.Level,
|
||||
"err_message": entry.Message,
|
||||
"number": 100,
|
||||
}).Error("The ice breaks!") // or use Fatal() to force the process to exit with a nonzero code
|
||||
}
|
||||
}()
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"number": 8,
|
||||
}).Debug("Started observing beach")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"number": 122,
|
||||
}).Warn("The group's number increased tremendously!")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"temperature": -4,
|
||||
}).Debug("Temperature changes")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "orca",
|
||||
"size": 9009,
|
||||
}).Panic("It's over 9000!")
|
||||
|
||||
// Output:
|
||||
// level=debug msg="Started observing beach" animal=walrus number=8
|
||||
// level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
|
||||
// level=warning msg="The group's number increased tremendously!" number=122 omg=true
|
||||
// level=debug msg="Temperature changes" temperature=-4
|
||||
// level=panic msg="It's over 9000!" animal=orca size=9009
|
||||
// level=error msg="The ice breaks!" err_animal=orca err_level=panic err_message="It's over 9000!" err_size=9009 number=100 omg=true
|
||||
}
|
35
vendor/github.com/Sirupsen/logrus/example_hook_test.go
generated
vendored
Normal file
35
vendor/github.com/Sirupsen/logrus/example_hook_test.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
package logrus_test
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"gopkg.in/gemnasium/logrus-airbrake-hook.v2"
|
||||
"os"
|
||||
)
|
||||
|
||||
func Example_hook() {
|
||||
var log = logrus.New()
|
||||
log.Formatter = new(logrus.TextFormatter) // default
|
||||
log.Formatter.(*logrus.TextFormatter).DisableTimestamp = true // remove timestamp from test output
|
||||
log.Hooks.Add(airbrake.NewHook(123, "xyz", "development"))
|
||||
log.Out = os.Stdout
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"number": 122,
|
||||
}).Warn("The group's number increased tremendously!")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"number": 100,
|
||||
}).Error("The ice breaks!")
|
||||
|
||||
// Output:
|
||||
// level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
|
||||
// level=warning msg="The group's number increased tremendously!" number=122 omg=true
|
||||
// level=error msg="The ice breaks!" number=100 omg=true
|
||||
}
|
50
vendor/github.com/Sirupsen/logrus/examples/basic/basic.go
generated
vendored
50
vendor/github.com/Sirupsen/logrus/examples/basic/basic.go
generated
vendored
|
@ -1,50 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
var log = logrus.New()
|
||||
|
||||
func init() {
|
||||
log.Formatter = new(logrus.JSONFormatter)
|
||||
log.Formatter = new(logrus.TextFormatter) // default
|
||||
log.Level = logrus.DebugLevel
|
||||
}
|
||||
|
||||
func main() {
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"err": err,
|
||||
"number": 100,
|
||||
}).Fatal("The ice breaks!")
|
||||
}
|
||||
}()
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"number": 8,
|
||||
}).Debug("Started observing beach")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"number": 122,
|
||||
}).Warn("The group's number increased tremendously!")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"temperature": -4,
|
||||
}).Debug("Temperature changes")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "orca",
|
||||
"size": 9009,
|
||||
}).Panic("It's over 9000!")
|
||||
}
|
30
vendor/github.com/Sirupsen/logrus/examples/hook/hook.go
generated
vendored
30
vendor/github.com/Sirupsen/logrus/examples/hook/hook.go
generated
vendored
|
@ -1,30 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"gopkg.in/gemnasium/logrus-airbrake-hook.v2"
|
||||
)
|
||||
|
||||
var log = logrus.New()
|
||||
|
||||
func init() {
|
||||
log.Formatter = new(logrus.TextFormatter) // default
|
||||
log.Hooks.Add(airbrake.NewHook(123, "xyz", "development"))
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"number": 122,
|
||||
}).Warn("The group's number increased tremendously!")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"number": 100,
|
||||
}).Fatal("The ice breaks!")
|
||||
}
|
4
vendor/github.com/Sirupsen/logrus/exported.go
generated
vendored
4
vendor/github.com/Sirupsen/logrus/exported.go
generated
vendored
|
@ -31,14 +31,14 @@ func SetFormatter(formatter Formatter) {
|
|||
func SetLevel(level Level) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Level = level
|
||||
std.SetLevel(level)
|
||||
}
|
||||
|
||||
// GetLevel returns the standard logger level.
|
||||
func GetLevel() Level {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
return std.Level
|
||||
return std.level()
|
||||
}
|
||||
|
||||
// AddHook adds a hook to the standard logger hooks.
|
||||
|
|
2
vendor/github.com/Sirupsen/logrus/formatter.go
generated
vendored
2
vendor/github.com/Sirupsen/logrus/formatter.go
generated
vendored
|
@ -2,7 +2,7 @@ package logrus
|
|||
|
||||
import "time"
|
||||
|
||||
const DefaultTimestampFormat = time.RFC3339
|
||||
const defaultTimestampFormat = time.RFC3339
|
||||
|
||||
// The Formatter interface is used to implement a custom Formatter. It takes an
|
||||
// `Entry`. It exposes all the fields, including the default ones:
|
||||
|
|
3
vendor/github.com/Sirupsen/logrus/formatter_bench_test.go
generated
vendored
3
vendor/github.com/Sirupsen/logrus/formatter_bench_test.go
generated
vendored
|
@ -80,11 +80,14 @@ func BenchmarkLargeJSONFormatter(b *testing.B) {
|
|||
}
|
||||
|
||||
func doBenchmark(b *testing.B, formatter Formatter, fields Fields) {
|
||||
logger := New()
|
||||
|
||||
entry := &Entry{
|
||||
Time: time.Time{},
|
||||
Level: InfoLevel,
|
||||
Message: "message",
|
||||
Data: fields,
|
||||
Logger: logger,
|
||||
}
|
||||
var d []byte
|
||||
var err error
|
||||
|
|
12
vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md
generated
vendored
12
vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md
generated
vendored
|
@ -5,13 +5,13 @@
|
|||
```go
|
||||
import (
|
||||
"log/syslog"
|
||||
"github.com/Sirupsen/logrus"
|
||||
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
|
||||
"github.com/sirupsen/logrus"
|
||||
lSyslog "github.com/sirupsen/logrus/hooks/syslog"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log := logrus.New()
|
||||
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||
hook, err := lSyslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||
|
||||
if err == nil {
|
||||
log.Hooks.Add(hook)
|
||||
|
@ -24,13 +24,13 @@ If you want to connect to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "
|
|||
```go
|
||||
import (
|
||||
"log/syslog"
|
||||
"github.com/Sirupsen/logrus"
|
||||
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
|
||||
"github.com/sirupsen/logrus"
|
||||
lSyslog "github.com/sirupsen/logrus/hooks/syslog"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log := logrus.New()
|
||||
hook, err := logrus_syslog.NewSyslogHook("", "", syslog.LOG_INFO, "")
|
||||
hook, err := lSyslog.NewSyslogHook("", "", syslog.LOG_INFO, "")
|
||||
|
||||
if err == nil {
|
||||
log.Hooks.Add(hook)
|
||||
|
|
5
vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
generated
vendored
5
vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
generated
vendored
|
@ -1,12 +1,13 @@
|
|||
// +build !windows,!nacl,!plan9
|
||||
|
||||
package logrus_syslog
|
||||
package syslog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"log/syslog"
|
||||
"os"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SyslogHook to send logs via syslog.
|
||||
|
|
5
vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
generated
vendored
5
vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
generated
vendored
|
@ -1,9 +1,10 @@
|
|||
package logrus_syslog
|
||||
package syslog
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"log/syslog"
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func TestLocalhostAddAndPrint(t *testing.T) {
|
||||
|
|
48
vendor/github.com/Sirupsen/logrus/hooks/test/test.go
generated
vendored
48
vendor/github.com/Sirupsen/logrus/hooks/test/test.go
generated
vendored
|
@ -1,17 +1,25 @@
|
|||
// The Test package is used for testing logrus. It is here for backwards
|
||||
// compatibility from when logrus' organization was upper-case. Please use
|
||||
// lower-case logrus and the `null` package instead of this one.
|
||||
package test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// test.Hook is a hook designed for dealing with logs in test scenarios.
|
||||
// Hook is a hook designed for dealing with logs in test scenarios.
|
||||
type Hook struct {
|
||||
// Entries is an array of all entries that have been received by this hook.
|
||||
// For safe access, use the AllEntries() method, rather than reading this
|
||||
// value directly.
|
||||
Entries []*logrus.Entry
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// Installs a test hook for the global logger.
|
||||
// NewGlobal installs a test hook for the global logger.
|
||||
func NewGlobal() *Hook {
|
||||
|
||||
hook := new(Hook)
|
||||
|
@ -21,7 +29,7 @@ func NewGlobal() *Hook {
|
|||
|
||||
}
|
||||
|
||||
// Installs a test hook for a given local logger.
|
||||
// NewLocal installs a test hook for a given local logger.
|
||||
func NewLocal(logger *logrus.Logger) *Hook {
|
||||
|
||||
hook := new(Hook)
|
||||
|
@ -31,7 +39,7 @@ func NewLocal(logger *logrus.Logger) *Hook {
|
|||
|
||||
}
|
||||
|
||||
// Creates a discarding logger and installs the test hook.
|
||||
// NewNullLogger creates a discarding logger and installs the test hook.
|
||||
func NewNullLogger() (*logrus.Logger, *Hook) {
|
||||
|
||||
logger := logrus.New()
|
||||
|
@ -42,6 +50,8 @@ func NewNullLogger() (*logrus.Logger, *Hook) {
|
|||
}
|
||||
|
||||
func (t *Hook) Fire(e *logrus.Entry) error {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.Entries = append(t.Entries, e)
|
||||
return nil
|
||||
}
|
||||
|
@ -51,17 +61,35 @@ func (t *Hook) Levels() []logrus.Level {
|
|||
}
|
||||
|
||||
// LastEntry returns the last entry that was logged or nil.
|
||||
func (t *Hook) LastEntry() (l *logrus.Entry) {
|
||||
|
||||
if i := len(t.Entries) - 1; i < 0 {
|
||||
func (t *Hook) LastEntry() *logrus.Entry {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
i := len(t.Entries) - 1
|
||||
if i < 0 {
|
||||
return nil
|
||||
} else {
|
||||
return t.Entries[i]
|
||||
}
|
||||
// Make a copy, for safety
|
||||
e := *t.Entries[i]
|
||||
return &e
|
||||
}
|
||||
|
||||
// AllEntries returns all entries that were logged.
|
||||
func (t *Hook) AllEntries() []*logrus.Entry {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
// Make a copy so the returned value won't race with future log requests
|
||||
entries := make([]*logrus.Entry, len(t.Entries))
|
||||
for i, entry := range t.Entries {
|
||||
// Make a copy, for safety
|
||||
e := *entry
|
||||
entries[i] = &e
|
||||
}
|
||||
return entries
|
||||
}
|
||||
|
||||
// Reset removes all Entries from this test hook.
|
||||
func (t *Hook) Reset() {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.Entries = make([]*logrus.Entry, 0)
|
||||
}
|
||||
|
|
2
vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go
generated
vendored
2
vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go
generated
vendored
|
@ -3,7 +3,7 @@ package test
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
|
48
vendor/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
48
vendor/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
|
@ -5,18 +5,54 @@ import (
|
|||
"fmt"
|
||||
)
|
||||
|
||||
type fieldKey string
|
||||
|
||||
// FieldMap allows customization of the key names for default fields.
|
||||
type FieldMap map[fieldKey]string
|
||||
|
||||
// Default key names for the default fields
|
||||
const (
|
||||
FieldKeyMsg = "msg"
|
||||
FieldKeyLevel = "level"
|
||||
FieldKeyTime = "time"
|
||||
)
|
||||
|
||||
func (f FieldMap) resolve(key fieldKey) string {
|
||||
if k, ok := f[key]; ok {
|
||||
return k
|
||||
}
|
||||
|
||||
return string(key)
|
||||
}
|
||||
|
||||
// JSONFormatter formats logs into parsable json
|
||||
type JSONFormatter struct {
|
||||
// TimestampFormat sets the format used for marshaling timestamps.
|
||||
TimestampFormat string
|
||||
|
||||
// DisableTimestamp allows disabling automatic timestamps in output
|
||||
DisableTimestamp bool
|
||||
|
||||
// FieldMap allows users to customize the names of keys for default fields.
|
||||
// As an example:
|
||||
// formatter := &JSONFormatter{
|
||||
// FieldMap: FieldMap{
|
||||
// FieldKeyTime: "@timestamp",
|
||||
// FieldKeyLevel: "@level",
|
||||
// FieldKeyMsg: "@message",
|
||||
// },
|
||||
// }
|
||||
FieldMap FieldMap
|
||||
}
|
||||
|
||||
// Format renders a single log entry
|
||||
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
data := make(Fields, len(entry.Data)+3)
|
||||
for k, v := range entry.Data {
|
||||
switch v := v.(type) {
|
||||
case error:
|
||||
// Otherwise errors are ignored by `encoding/json`
|
||||
// https://github.com/Sirupsen/logrus/issues/137
|
||||
// https://github.com/sirupsen/logrus/issues/137
|
||||
data[k] = v.Error()
|
||||
default:
|
||||
data[k] = v
|
||||
|
@ -26,12 +62,14 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
|||
|
||||
timestampFormat := f.TimestampFormat
|
||||
if timestampFormat == "" {
|
||||
timestampFormat = DefaultTimestampFormat
|
||||
timestampFormat = defaultTimestampFormat
|
||||
}
|
||||
|
||||
data["time"] = entry.Time.Format(timestampFormat)
|
||||
data["msg"] = entry.Message
|
||||
data["level"] = entry.Level.String()
|
||||
if !f.DisableTimestamp {
|
||||
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
|
||||
}
|
||||
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
|
||||
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
|
||||
|
||||
serialized, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
|
|
81
vendor/github.com/Sirupsen/logrus/json_formatter_test.go
generated
vendored
81
vendor/github.com/Sirupsen/logrus/json_formatter_test.go
generated
vendored
|
@ -3,7 +3,7 @@ package logrus
|
|||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
|
@ -118,3 +118,82 @@ func TestJSONEntryEndsWithNewline(t *testing.T) {
|
|||
t.Fatal("Expected JSON log entry to end with a newline")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONMessageKey(t *testing.T) {
|
||||
formatter := &JSONFormatter{
|
||||
FieldMap: FieldMap{
|
||||
FieldKeyMsg: "message",
|
||||
},
|
||||
}
|
||||
|
||||
b, err := formatter.Format(&Entry{Message: "oh hai"})
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
s := string(b)
|
||||
if !(strings.Contains(s, "message") && strings.Contains(s, "oh hai")) {
|
||||
t.Fatal("Expected JSON to format message key")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONLevelKey(t *testing.T) {
|
||||
formatter := &JSONFormatter{
|
||||
FieldMap: FieldMap{
|
||||
FieldKeyLevel: "somelevel",
|
||||
},
|
||||
}
|
||||
|
||||
b, err := formatter.Format(WithField("level", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
s := string(b)
|
||||
if !strings.Contains(s, "somelevel") {
|
||||
t.Fatal("Expected JSON to format level key")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONTimeKey(t *testing.T) {
|
||||
formatter := &JSONFormatter{
|
||||
FieldMap: FieldMap{
|
||||
FieldKeyTime: "timeywimey",
|
||||
},
|
||||
}
|
||||
|
||||
b, err := formatter.Format(WithField("level", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
s := string(b)
|
||||
if !strings.Contains(s, "timeywimey") {
|
||||
t.Fatal("Expected JSON to format time key")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONDisableTimestamp(t *testing.T) {
|
||||
formatter := &JSONFormatter{
|
||||
DisableTimestamp: true,
|
||||
}
|
||||
|
||||
b, err := formatter.Format(WithField("level", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
s := string(b)
|
||||
if strings.Contains(s, FieldKeyTime) {
|
||||
t.Error("Did not prevent timestamp", s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONEnableTimestamp(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("level", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
s := string(b)
|
||||
if !strings.Contains(s, FieldKeyTime) {
|
||||
t.Error("Timestamp not present", s)
|
||||
}
|
||||
}
|
||||
|
|
53
vendor/github.com/Sirupsen/logrus/logger.go
generated
vendored
53
vendor/github.com/Sirupsen/logrus/logger.go
generated
vendored
|
@ -4,6 +4,7 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type Logger struct {
|
||||
|
@ -24,7 +25,7 @@ type Logger struct {
|
|||
Formatter Formatter
|
||||
// The logging level the logger should log at. This is typically (and defaults
|
||||
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
|
||||
// logged. `logrus.Debug` is useful in
|
||||
// logged.
|
||||
Level Level
|
||||
// Used to sync writing to the log. Locking is enabled by Default
|
||||
mu MutexWrap
|
||||
|
@ -112,7 +113,7 @@ func (logger *Logger) WithError(err error) *Entry {
|
|||
}
|
||||
|
||||
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||
if logger.Level >= DebugLevel {
|
||||
if logger.level() >= DebugLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Debugf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
|
@ -120,7 +121,7 @@ func (logger *Logger) Debugf(format string, args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Infof(format string, args ...interface{}) {
|
||||
if logger.Level >= InfoLevel {
|
||||
if logger.level() >= InfoLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Infof(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
|
@ -134,7 +135,7 @@ func (logger *Logger) Printf(format string, args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
||||
if logger.Level >= WarnLevel {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
|
@ -142,7 +143,7 @@ func (logger *Logger) Warnf(format string, args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
||||
if logger.Level >= WarnLevel {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
|
@ -150,7 +151,7 @@ func (logger *Logger) Warningf(format string, args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
||||
if logger.Level >= ErrorLevel {
|
||||
if logger.level() >= ErrorLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Errorf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
|
@ -158,7 +159,7 @@ func (logger *Logger) Errorf(format string, args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||
if logger.Level >= FatalLevel {
|
||||
if logger.level() >= FatalLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatalf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
|
@ -167,7 +168,7 @@ func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||
if logger.Level >= PanicLevel {
|
||||
if logger.level() >= PanicLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Panicf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
|
@ -175,7 +176,7 @@ func (logger *Logger) Panicf(format string, args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Debug(args ...interface{}) {
|
||||
if logger.Level >= DebugLevel {
|
||||
if logger.level() >= DebugLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Debug(args...)
|
||||
logger.releaseEntry(entry)
|
||||
|
@ -183,7 +184,7 @@ func (logger *Logger) Debug(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Info(args ...interface{}) {
|
||||
if logger.Level >= InfoLevel {
|
||||
if logger.level() >= InfoLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Info(args...)
|
||||
logger.releaseEntry(entry)
|
||||
|
@ -197,7 +198,7 @@ func (logger *Logger) Print(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Warn(args ...interface{}) {
|
||||
if logger.Level >= WarnLevel {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warn(args...)
|
||||
logger.releaseEntry(entry)
|
||||
|
@ -205,7 +206,7 @@ func (logger *Logger) Warn(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Warning(args ...interface{}) {
|
||||
if logger.Level >= WarnLevel {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warn(args...)
|
||||
logger.releaseEntry(entry)
|
||||
|
@ -213,7 +214,7 @@ func (logger *Logger) Warning(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Error(args ...interface{}) {
|
||||
if logger.Level >= ErrorLevel {
|
||||
if logger.level() >= ErrorLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Error(args...)
|
||||
logger.releaseEntry(entry)
|
||||
|
@ -221,7 +222,7 @@ func (logger *Logger) Error(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Fatal(args ...interface{}) {
|
||||
if logger.Level >= FatalLevel {
|
||||
if logger.level() >= FatalLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatal(args...)
|
||||
logger.releaseEntry(entry)
|
||||
|
@ -230,7 +231,7 @@ func (logger *Logger) Fatal(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Panic(args ...interface{}) {
|
||||
if logger.Level >= PanicLevel {
|
||||
if logger.level() >= PanicLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Panic(args...)
|
||||
logger.releaseEntry(entry)
|
||||
|
@ -238,7 +239,7 @@ func (logger *Logger) Panic(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Debugln(args ...interface{}) {
|
||||
if logger.Level >= DebugLevel {
|
||||
if logger.level() >= DebugLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Debugln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
|
@ -246,7 +247,7 @@ func (logger *Logger) Debugln(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Infoln(args ...interface{}) {
|
||||
if logger.Level >= InfoLevel {
|
||||
if logger.level() >= InfoLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Infoln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
|
@ -260,7 +261,7 @@ func (logger *Logger) Println(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Warnln(args ...interface{}) {
|
||||
if logger.Level >= WarnLevel {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
|
@ -268,7 +269,7 @@ func (logger *Logger) Warnln(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Warningln(args ...interface{}) {
|
||||
if logger.Level >= WarnLevel {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
|
@ -276,7 +277,7 @@ func (logger *Logger) Warningln(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Errorln(args ...interface{}) {
|
||||
if logger.Level >= ErrorLevel {
|
||||
if logger.level() >= ErrorLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Errorln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
|
@ -284,7 +285,7 @@ func (logger *Logger) Errorln(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Fatalln(args ...interface{}) {
|
||||
if logger.Level >= FatalLevel {
|
||||
if logger.level() >= FatalLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatalln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
|
@ -293,7 +294,7 @@ func (logger *Logger) Fatalln(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Panicln(args ...interface{}) {
|
||||
if logger.Level >= PanicLevel {
|
||||
if logger.level() >= PanicLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Panicln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
|
@ -306,3 +307,11 @@ func (logger *Logger) Panicln(args ...interface{}) {
|
|||
func (logger *Logger) SetNoLock() {
|
||||
logger.mu.Disable()
|
||||
}
|
||||
|
||||
func (logger *Logger) level() Level {
|
||||
return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
|
||||
}
|
||||
|
||||
func (logger *Logger) SetLevel(level Level) {
|
||||
atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
|
||||
}
|
||||
|
|
2
vendor/github.com/Sirupsen/logrus/logrus.go
generated
vendored
2
vendor/github.com/Sirupsen/logrus/logrus.go
generated
vendored
|
@ -10,7 +10,7 @@ import (
|
|||
type Fields map[string]interface{}
|
||||
|
||||
// Level type
|
||||
type Level uint8
|
||||
type Level uint32
|
||||
|
||||
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
||||
func (level Level) String() string {
|
||||
|
|
25
vendor/github.com/Sirupsen/logrus/logrus_test.go
generated
vendored
25
vendor/github.com/Sirupsen/logrus/logrus_test.go
generated
vendored
|
@ -359,3 +359,28 @@ func TestLogrusInterface(t *testing.T) {
|
|||
e := logger.WithField("another", "value")
|
||||
fn(e)
|
||||
}
|
||||
|
||||
// Implements io.Writer using channels for synchronization, so we can wait on
|
||||
// the Entry.Writer goroutine to write in a non-racey way. This does assume that
|
||||
// there is a single call to Logger.Out for each message.
|
||||
type channelWriter chan []byte
|
||||
|
||||
func (cw channelWriter) Write(p []byte) (int, error) {
|
||||
cw <- p
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func TestEntryWriter(t *testing.T) {
|
||||
cw := channelWriter(make(chan []byte, 1))
|
||||
log := New()
|
||||
log.Out = cw
|
||||
log.Formatter = new(JSONFormatter)
|
||||
log.WithField("foo", "bar").WriterLevel(WarnLevel).Write([]byte("hello\n"))
|
||||
|
||||
bs := <-cw
|
||||
var fields Fields
|
||||
err := json.Unmarshal(bs, &fields)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, fields["foo"], "bar")
|
||||
assert.Equal(t, fields["level"], "warning")
|
||||
}
|
||||
|
|
8
vendor/github.com/Sirupsen/logrus/terminal_appengine.go
generated
vendored
8
vendor/github.com/Sirupsen/logrus/terminal_appengine.go
generated
vendored
|
@ -1,8 +0,0 @@
|
|||
// +build appengine
|
||||
|
||||
package logrus
|
||||
|
||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||
func IsTerminal() bool {
|
||||
return true
|
||||
}
|
6
vendor/github.com/Sirupsen/logrus/terminal_bsd.go
generated
vendored
6
vendor/github.com/Sirupsen/logrus/terminal_bsd.go
generated
vendored
|
@ -3,8 +3,8 @@
|
|||
|
||||
package logrus
|
||||
|
||||
import "syscall"
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const ioctlReadTermios = syscall.TIOCGETA
|
||||
const ioctlReadTermios = unix.TIOCGETA
|
||||
|
||||
type Termios syscall.Termios
|
||||
type Termios unix.Termios
|
||||
|
|
6
vendor/github.com/Sirupsen/logrus/terminal_linux.go
generated
vendored
6
vendor/github.com/Sirupsen/logrus/terminal_linux.go
generated
vendored
|
@ -7,8 +7,8 @@
|
|||
|
||||
package logrus
|
||||
|
||||
import "syscall"
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const ioctlReadTermios = syscall.TCGETS
|
||||
const ioctlReadTermios = unix.TCGETS
|
||||
|
||||
type Termios syscall.Termios
|
||||
type Termios unix.Termios
|
||||
|
|
22
vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
generated
vendored
22
vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
generated
vendored
|
@ -1,22 +0,0 @@
|
|||
// Based on ssh/terminal:
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux darwin freebsd openbsd netbsd dragonfly
|
||||
// +build !appengine
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||
func IsTerminal() bool {
|
||||
fd := syscall.Stderr
|
||||
var termios Termios
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
||||
return err == 0
|
||||
}
|
15
vendor/github.com/Sirupsen/logrus/terminal_solaris.go
generated
vendored
15
vendor/github.com/Sirupsen/logrus/terminal_solaris.go
generated
vendored
|
@ -1,15 +0,0 @@
|
|||
// +build solaris,!appengine
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
func IsTerminal() bool {
|
||||
_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
|
||||
return err == nil
|
||||
}
|
27
vendor/github.com/Sirupsen/logrus/terminal_windows.go
generated
vendored
27
vendor/github.com/Sirupsen/logrus/terminal_windows.go
generated
vendored
|
@ -1,27 +0,0 @@
|
|||
// Based on ssh/terminal:
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build windows,!appengine
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
|
||||
var (
|
||||
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
||||
)
|
||||
|
||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||
func IsTerminal() bool {
|
||||
fd := syscall.Stderr
|
||||
var st uint32
|
||||
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
|
||||
return r != 0 && e == 0
|
||||
}
|
96
vendor/github.com/Sirupsen/logrus/text_formatter.go
generated
vendored
96
vendor/github.com/Sirupsen/logrus/text_formatter.go
generated
vendored
|
@ -3,10 +3,14 @@ package logrus
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -14,24 +18,19 @@ const (
|
|||
red = 31
|
||||
green = 32
|
||||
yellow = 33
|
||||
blue = 34
|
||||
blue = 36
|
||||
gray = 37
|
||||
)
|
||||
|
||||
var (
|
||||
baseTimestamp time.Time
|
||||
isTerminal bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
baseTimestamp = time.Now()
|
||||
isTerminal = IsTerminal()
|
||||
}
|
||||
|
||||
func miniTS() int {
|
||||
return int(time.Since(baseTimestamp) / time.Second)
|
||||
}
|
||||
|
||||
// TextFormatter formats logs into text
|
||||
type TextFormatter struct {
|
||||
// Set to true to bypass checking for a TTY before outputting colors.
|
||||
ForceColors bool
|
||||
|
@ -54,11 +53,35 @@ type TextFormatter struct {
|
|||
// that log extremely frequently and don't use the JSON formatter this may not
|
||||
// be desired.
|
||||
DisableSorting bool
|
||||
|
||||
// QuoteEmptyFields will wrap empty fields in quotes if true
|
||||
QuoteEmptyFields bool
|
||||
|
||||
// Whether the logger's out is to a terminal
|
||||
isTerminal bool
|
||||
|
||||
sync.Once
|
||||
}
|
||||
|
||||
func (f *TextFormatter) init(entry *Entry) {
|
||||
if entry.Logger != nil {
|
||||
f.isTerminal = f.checkIfTerminal(entry.Logger.Out)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *TextFormatter) checkIfTerminal(w io.Writer) bool {
|
||||
switch v := w.(type) {
|
||||
case *os.File:
|
||||
return terminal.IsTerminal(int(v.Fd()))
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Format renders a single log entry
|
||||
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
var b *bytes.Buffer
|
||||
var keys []string = make([]string, 0, len(entry.Data))
|
||||
keys := make([]string, 0, len(entry.Data))
|
||||
for k := range entry.Data {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
@ -74,12 +97,13 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
|||
|
||||
prefixFieldClashes(entry.Data)
|
||||
|
||||
isColorTerminal := isTerminal && (runtime.GOOS != "windows")
|
||||
isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
|
||||
f.Do(func() { f.init(entry) })
|
||||
|
||||
isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
|
||||
|
||||
timestampFormat := f.TimestampFormat
|
||||
if timestampFormat == "" {
|
||||
timestampFormat = DefaultTimestampFormat
|
||||
timestampFormat = defaultTimestampFormat
|
||||
}
|
||||
if isColored {
|
||||
f.printColored(b, entry, keys, timestampFormat)
|
||||
|
@ -115,23 +139,29 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
|
|||
|
||||
levelText := strings.ToUpper(entry.Level.String())[0:4]
|
||||
|
||||
if !f.FullTimestamp {
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
|
||||
if f.DisableTimestamp {
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
|
||||
} else if !f.FullTimestamp {
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
|
||||
} else {
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
|
||||
}
|
||||
for _, k := range keys {
|
||||
v := entry.Data[k]
|
||||
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v)
|
||||
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
|
||||
f.appendValue(b, v)
|
||||
}
|
||||
}
|
||||
|
||||
func needsQuoting(text string) bool {
|
||||
func (f *TextFormatter) needsQuoting(text string) bool {
|
||||
if f.QuoteEmptyFields && len(text) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, ch := range text {
|
||||
if !((ch >= 'a' && ch <= 'z') ||
|
||||
(ch >= 'A' && ch <= 'Z') ||
|
||||
(ch >= '0' && ch <= '9') ||
|
||||
ch == '-' || ch == '.') {
|
||||
ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -139,27 +169,23 @@ func needsQuoting(text string) bool {
|
|||
}
|
||||
|
||||
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
|
||||
|
||||
if b.Len() > 0 {
|
||||
b.WriteByte(' ')
|
||||
}
|
||||
b.WriteString(key)
|
||||
b.WriteByte('=')
|
||||
f.appendValue(b, value)
|
||||
}
|
||||
|
||||
switch value := value.(type) {
|
||||
case string:
|
||||
if !needsQuoting(value) {
|
||||
b.WriteString(value)
|
||||
} else {
|
||||
fmt.Fprintf(b, "%q", value)
|
||||
}
|
||||
case error:
|
||||
errmsg := value.Error()
|
||||
if !needsQuoting(errmsg) {
|
||||
b.WriteString(errmsg)
|
||||
} else {
|
||||
fmt.Fprintf(b, "%q", value)
|
||||
}
|
||||
default:
|
||||
fmt.Fprint(b, value)
|
||||
func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
|
||||
stringVal, ok := value.(string)
|
||||
if !ok {
|
||||
stringVal = fmt.Sprint(value)
|
||||
}
|
||||
|
||||
b.WriteByte(' ')
|
||||
if !f.needsQuoting(stringVal) {
|
||||
b.WriteString(stringVal)
|
||||
} else {
|
||||
b.WriteString(fmt.Sprintf("%q", stringVal))
|
||||
}
|
||||
}
|
||||
|
|
92
vendor/github.com/Sirupsen/logrus/text_formatter_test.go
generated
vendored
92
vendor/github.com/Sirupsen/logrus/text_formatter_test.go
generated
vendored
|
@ -3,17 +3,38 @@ package logrus
|
|||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestFormatting(t *testing.T) {
|
||||
tf := &TextFormatter{DisableColors: true}
|
||||
|
||||
testCases := []struct {
|
||||
value string
|
||||
expected string
|
||||
}{
|
||||
{`foo`, "time=\"0001-01-01T00:00:00Z\" level=panic test=foo\n"},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
b, _ := tf.Format(WithField("test", tc.value))
|
||||
|
||||
if string(b) != tc.expected {
|
||||
t.Errorf("formatting expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestQuoting(t *testing.T) {
|
||||
tf := &TextFormatter{DisableColors: true}
|
||||
|
||||
checkQuoting := func(q bool, value interface{}) {
|
||||
b, _ := tf.Format(WithField("test", value))
|
||||
idx := bytes.Index(b, ([]byte)("test="))
|
||||
cont := bytes.Contains(b[idx+5:], []byte{'"'})
|
||||
cont := bytes.Contains(b[idx+5:], []byte("\""))
|
||||
if cont != q {
|
||||
if q {
|
||||
t.Errorf("quoting expected for: %#v", value)
|
||||
|
@ -23,14 +44,67 @@ func TestQuoting(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
checkQuoting(false, "")
|
||||
checkQuoting(false, "abcd")
|
||||
checkQuoting(false, "v1.0")
|
||||
checkQuoting(false, "1234567890")
|
||||
checkQuoting(true, "/foobar")
|
||||
checkQuoting(false, "/foobar")
|
||||
checkQuoting(false, "foo_bar")
|
||||
checkQuoting(false, "foo@bar")
|
||||
checkQuoting(false, "foobar^")
|
||||
checkQuoting(false, "+/-_^@f.oobar")
|
||||
checkQuoting(true, "foobar$")
|
||||
checkQuoting(true, "&foobar")
|
||||
checkQuoting(true, "x y")
|
||||
checkQuoting(true, "x,y")
|
||||
checkQuoting(false, errors.New("invalid"))
|
||||
checkQuoting(true, errors.New("invalid argument"))
|
||||
|
||||
// Test for quoting empty fields.
|
||||
tf.QuoteEmptyFields = true
|
||||
checkQuoting(true, "")
|
||||
checkQuoting(false, "abcd")
|
||||
checkQuoting(true, errors.New("invalid argument"))
|
||||
}
|
||||
|
||||
func TestEscaping(t *testing.T) {
|
||||
tf := &TextFormatter{DisableColors: true}
|
||||
|
||||
testCases := []struct {
|
||||
value string
|
||||
expected string
|
||||
}{
|
||||
{`ba"r`, `ba\"r`},
|
||||
{`ba'r`, `ba'r`},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
b, _ := tf.Format(WithField("test", tc.value))
|
||||
if !bytes.Contains(b, []byte(tc.expected)) {
|
||||
t.Errorf("escaping expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEscaping_Interface(t *testing.T) {
|
||||
tf := &TextFormatter{DisableColors: true}
|
||||
|
||||
ts := time.Now()
|
||||
|
||||
testCases := []struct {
|
||||
value interface{}
|
||||
expected string
|
||||
}{
|
||||
{ts, fmt.Sprintf("\"%s\"", ts.String())},
|
||||
{errors.New("error: something went wrong"), "\"error: something went wrong\""},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
b, _ := tf.Format(WithField("test", tc.value))
|
||||
if !bytes.Contains(b, []byte(tc.expected)) {
|
||||
t.Errorf("escaping expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampFormat(t *testing.T) {
|
||||
|
@ -39,10 +113,7 @@ func TestTimestampFormat(t *testing.T) {
|
|||
customStr, _ := customFormatter.Format(WithField("test", "test"))
|
||||
timeStart := bytes.Index(customStr, ([]byte)("time="))
|
||||
timeEnd := bytes.Index(customStr, ([]byte)("level="))
|
||||
timeStr := customStr[timeStart+5 : timeEnd-1]
|
||||
if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' {
|
||||
timeStr = timeStr[1 : len(timeStr)-1]
|
||||
}
|
||||
timeStr := customStr[timeStart+5+len("\"") : timeEnd-1-len("\"")]
|
||||
if format == "" {
|
||||
format = time.RFC3339
|
||||
}
|
||||
|
@ -57,5 +128,14 @@ func TestTimestampFormat(t *testing.T) {
|
|||
checkTimeStr("")
|
||||
}
|
||||
|
||||
func TestDisableTimestampWithColoredOutput(t *testing.T) {
|
||||
tf := &TextFormatter{DisableTimestamp: true, ForceColors: true}
|
||||
|
||||
b, _ := tf.Format(WithField("test", "test"))
|
||||
if strings.Contains(string(b), "[0000]") {
|
||||
t.Error("timestamp not expected when DisableTimestamp is true")
|
||||
}
|
||||
}
|
||||
|
||||
// TODO add tests for sorting etc., this requires a parser for the text
|
||||
// formatter output.
|
||||
|
|
29
vendor/github.com/Sirupsen/logrus/writer.go
generated
vendored
29
vendor/github.com/Sirupsen/logrus/writer.go
generated
vendored
|
@ -11,39 +11,48 @@ func (logger *Logger) Writer() *io.PipeWriter {
|
|||
}
|
||||
|
||||
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
|
||||
return NewEntry(logger).WriterLevel(level)
|
||||
}
|
||||
|
||||
func (entry *Entry) Writer() *io.PipeWriter {
|
||||
return entry.WriterLevel(InfoLevel)
|
||||
}
|
||||
|
||||
func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
|
||||
reader, writer := io.Pipe()
|
||||
|
||||
var printFunc func(args ...interface{})
|
||||
|
||||
switch level {
|
||||
case DebugLevel:
|
||||
printFunc = logger.Debug
|
||||
printFunc = entry.Debug
|
||||
case InfoLevel:
|
||||
printFunc = logger.Info
|
||||
printFunc = entry.Info
|
||||
case WarnLevel:
|
||||
printFunc = logger.Warn
|
||||
printFunc = entry.Warn
|
||||
case ErrorLevel:
|
||||
printFunc = logger.Error
|
||||
printFunc = entry.Error
|
||||
case FatalLevel:
|
||||
printFunc = logger.Fatal
|
||||
printFunc = entry.Fatal
|
||||
case PanicLevel:
|
||||
printFunc = logger.Panic
|
||||
printFunc = entry.Panic
|
||||
default:
|
||||
printFunc = logger.Print
|
||||
printFunc = entry.Print
|
||||
}
|
||||
|
||||
go logger.writerScanner(reader, printFunc)
|
||||
go entry.writerScanner(reader, printFunc)
|
||||
runtime.SetFinalizer(writer, writerFinalizer)
|
||||
|
||||
return writer
|
||||
}
|
||||
|
||||
func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
|
||||
func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
|
||||
scanner := bufio.NewScanner(reader)
|
||||
for scanner.Scan() {
|
||||
printFunc(scanner.Text())
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
logger.Errorf("Error while reading from Writer: %s", err)
|
||||
entry.Errorf("Error while reading from Writer: %s", err)
|
||||
}
|
||||
reader.Close()
|
||||
}
|
||||
|
|
2
vendor/github.com/fatih/structs/.travis.yml
generated
vendored
2
vendor/github.com/fatih/structs/.travis.yml
generated
vendored
|
@ -1,6 +1,6 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.6
|
||||
- 1.7.x
|
||||
- tip
|
||||
sudo: false
|
||||
before_install:
|
||||
|
|
11
vendor/github.com/fatih/structs/field.go
generated
vendored
11
vendor/github.com/fatih/structs/field.go
generated
vendored
|
@ -117,7 +117,16 @@ func (f *Field) Field(name string) *Field {
|
|||
// FieldOk returns the field from a nested struct. The boolean returns whether
|
||||
// the field was found (true) or not (false).
|
||||
func (f *Field) FieldOk(name string) (*Field, bool) {
|
||||
v := strctVal(f.value.Interface())
|
||||
value := &f.value
|
||||
// value must be settable so we need to make sure it holds the address of the
|
||||
// variable and not a copy, so we can pass the pointer to strctVal instead of a
|
||||
// copy (which is not assigned to any variable, hence not settable).
|
||||
// see "https://blog.golang.org/laws-of-reflection#TOC_8."
|
||||
if f.value.Kind() != reflect.Ptr {
|
||||
a := f.value.Addr()
|
||||
value = &a
|
||||
}
|
||||
v := strctVal(value.Interface())
|
||||
t := v.Type()
|
||||
|
||||
field, ok := t.FieldByName(name)
|
||||
|
|
14
vendor/github.com/fatih/structs/field_test.go
generated
vendored
14
vendor/github.com/fatih/structs/field_test.go
generated
vendored
|
@ -133,6 +133,20 @@ func TestField_Set(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestField_NotSettable(t *testing.T) {
|
||||
a := map[int]Baz{
|
||||
4: Baz{
|
||||
A: "value",
|
||||
},
|
||||
}
|
||||
|
||||
s := New(a[4])
|
||||
|
||||
if err := s.Field("A").Set("newValue"); err != errNotSettable {
|
||||
t.Errorf("Trying to set non-settable field should error with %q. Got %q instead.", errNotSettable, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestField_Zero(t *testing.T) {
|
||||
s := newStruct()
|
||||
|
||||
|
|
105
vendor/github.com/fatih/structs/structs.go
generated
vendored
105
vendor/github.com/fatih/structs/structs.go
generated
vendored
|
@ -56,7 +56,7 @@ func New(s interface{}) *Struct {
|
|||
// in the output map. Example:
|
||||
//
|
||||
// // The FieldStruct's fields will be flattened into the output map.
|
||||
// FieldStruct time.Time `structs:"flatten"`
|
||||
// FieldStruct time.Time `structs:",flatten"`
|
||||
//
|
||||
// A tag value with the option of "omitnested" stops iterating further if the type
|
||||
// is a struct. Example:
|
||||
|
@ -115,17 +115,17 @@ func (s *Struct) FillMap(out map[string]interface{}) {
|
|||
}
|
||||
}
|
||||
|
||||
if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
|
||||
// look out for embedded structs, and convert them to a
|
||||
// map[string]interface{} too
|
||||
n := New(val.Interface())
|
||||
n.TagName = s.TagName
|
||||
m := n.Map()
|
||||
isSubStruct = true
|
||||
if len(m) == 0 {
|
||||
finalVal = val.Interface()
|
||||
} else {
|
||||
finalVal = m
|
||||
if !tagOpts.Has("omitnested") {
|
||||
finalVal = s.nested(val)
|
||||
|
||||
v := reflect.ValueOf(val.Interface())
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Map, reflect.Struct:
|
||||
isSubStruct = true
|
||||
}
|
||||
} else {
|
||||
finalVal = val.Interface()
|
||||
|
@ -431,7 +431,7 @@ func strctVal(s interface{}) reflect.Value {
|
|||
v := reflect.ValueOf(s)
|
||||
|
||||
// if pointer get the underlying element≤
|
||||
if v.Kind() == reflect.Ptr {
|
||||
for v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
|
@ -505,3 +505,82 @@ func IsStruct(s interface{}) bool {
|
|||
func Name(s interface{}) string {
|
||||
return New(s).Name()
|
||||
}
|
||||
|
||||
// nested retrieves recursively all types for the given value and returns the
|
||||
// nested value.
|
||||
func (s *Struct) nested(val reflect.Value) interface{} {
|
||||
var finalVal interface{}
|
||||
|
||||
v := reflect.ValueOf(val.Interface())
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Struct:
|
||||
n := New(val.Interface())
|
||||
n.TagName = s.TagName
|
||||
m := n.Map()
|
||||
|
||||
// do not add the converted value if there are no exported fields, ie:
|
||||
// time.Time
|
||||
if len(m) == 0 {
|
||||
finalVal = val.Interface()
|
||||
} else {
|
||||
finalVal = m
|
||||
}
|
||||
case reflect.Map:
|
||||
// get the element type of the map
|
||||
mapElem := val.Type()
|
||||
switch val.Type().Kind() {
|
||||
case reflect.Ptr, reflect.Array, reflect.Map,
|
||||
reflect.Slice, reflect.Chan:
|
||||
mapElem = val.Type().Elem()
|
||||
if mapElem.Kind() == reflect.Ptr {
|
||||
mapElem = mapElem.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// only iterate over struct types, ie: map[string]StructType,
|
||||
// map[string][]StructType,
|
||||
if mapElem.Kind() == reflect.Struct ||
|
||||
(mapElem.Kind() == reflect.Slice &&
|
||||
mapElem.Elem().Kind() == reflect.Struct) {
|
||||
m := make(map[string]interface{}, val.Len())
|
||||
for _, k := range val.MapKeys() {
|
||||
m[k.String()] = s.nested(val.MapIndex(k))
|
||||
}
|
||||
finalVal = m
|
||||
break
|
||||
}
|
||||
|
||||
// TODO(arslan): should this be optional?
|
||||
finalVal = val.Interface()
|
||||
case reflect.Slice, reflect.Array:
|
||||
if val.Type().Kind() == reflect.Interface {
|
||||
finalVal = val.Interface()
|
||||
break
|
||||
}
|
||||
|
||||
// TODO(arslan): should this be optional?
|
||||
// do not iterate of non struct types, just pass the value. Ie: []int,
|
||||
// []string, co... We only iterate further if it's a struct.
|
||||
// i.e []foo or []*foo
|
||||
if val.Type().Elem().Kind() != reflect.Struct &&
|
||||
!(val.Type().Elem().Kind() == reflect.Ptr &&
|
||||
val.Type().Elem().Elem().Kind() == reflect.Struct) {
|
||||
finalVal = val.Interface()
|
||||
break
|
||||
}
|
||||
|
||||
slices := make([]interface{}, val.Len(), val.Len())
|
||||
for x := 0; x < val.Len(); x++ {
|
||||
slices[x] = s.nested(val.Index(x))
|
||||
}
|
||||
finalVal = slices
|
||||
default:
|
||||
finalVal = val.Interface()
|
||||
}
|
||||
|
||||
return finalVal
|
||||
}
|
||||
|
|
2
vendor/github.com/fatih/structs/structs_example_test.go
generated
vendored
2
vendor/github.com/fatih/structs/structs_example_test.go
generated
vendored
|
@ -81,7 +81,7 @@ func ExampleMap_tags() {
|
|||
|
||||
}
|
||||
|
||||
func ExampleMap_nested() {
|
||||
func ExampleMap_omitNested() {
|
||||
// By default field with struct types are processed too. We can stop
|
||||
// processing them via "omitnested" tag option.
|
||||
type Server struct {
|
||||
|
|
344
vendor/github.com/fatih/structs/structs_test.go
generated
vendored
344
vendor/github.com/fatih/structs/structs_test.go
generated
vendored
|
@ -268,6 +268,268 @@ func TestMap_Nested(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMap_NestedMapWithStructValues(t *testing.T) {
|
||||
type A struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
type B struct {
|
||||
A map[string]*A
|
||||
}
|
||||
|
||||
a := &A{Name: "example"}
|
||||
|
||||
b := &B{
|
||||
A: map[string]*A{
|
||||
"example_key": a,
|
||||
},
|
||||
}
|
||||
|
||||
m := Map(b)
|
||||
|
||||
if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map {
|
||||
t.Errorf("Map should return a map type, got: %v", typ)
|
||||
}
|
||||
|
||||
in, ok := m["A"].(map[string]interface{})
|
||||
if !ok {
|
||||
t.Errorf("Nested type of map should be of type map[string]interface{}, have %T", m["A"])
|
||||
}
|
||||
|
||||
example := in["example_key"].(map[string]interface{})
|
||||
if name := example["Name"].(string); name != "example" {
|
||||
t.Errorf("Map nested struct's name field should give example, got: %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMap_NestedMapWithStringValues(t *testing.T) {
|
||||
type B struct {
|
||||
Foo map[string]string
|
||||
}
|
||||
|
||||
type A struct {
|
||||
B *B
|
||||
}
|
||||
|
||||
b := &B{
|
||||
Foo: map[string]string{
|
||||
"example_key": "example",
|
||||
},
|
||||
}
|
||||
|
||||
a := &A{B: b}
|
||||
|
||||
m := Map(a)
|
||||
|
||||
if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map {
|
||||
t.Errorf("Map should return a map type, got: %v", typ)
|
||||
}
|
||||
|
||||
in, ok := m["B"].(map[string]interface{})
|
||||
if !ok {
|
||||
t.Errorf("Nested type of map should be of type map[string]interface{}, have %T", m["B"])
|
||||
}
|
||||
|
||||
foo := in["Foo"].(map[string]string)
|
||||
if name := foo["example_key"]; name != "example" {
|
||||
t.Errorf("Map nested struct's name field should give example, got: %s", name)
|
||||
}
|
||||
}
|
||||
func TestMap_NestedMapWithInterfaceValues(t *testing.T) {
|
||||
type B struct {
|
||||
Foo map[string]interface{}
|
||||
}
|
||||
|
||||
type A struct {
|
||||
B *B
|
||||
}
|
||||
|
||||
b := &B{
|
||||
Foo: map[string]interface{}{
|
||||
"example_key": "example",
|
||||
},
|
||||
}
|
||||
|
||||
a := &A{B: b}
|
||||
|
||||
m := Map(a)
|
||||
|
||||
if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map {
|
||||
t.Errorf("Map should return a map type, got: %v", typ)
|
||||
}
|
||||
|
||||
in, ok := m["B"].(map[string]interface{})
|
||||
if !ok {
|
||||
t.Errorf("Nested type of map should be of type map[string]interface{}, have %T", m["B"])
|
||||
}
|
||||
|
||||
foo := in["Foo"].(map[string]interface{})
|
||||
if name := foo["example_key"]; name != "example" {
|
||||
t.Errorf("Map nested struct's name field should give example, got: %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMap_NestedMapWithSliceIntValues(t *testing.T) {
|
||||
type B struct {
|
||||
Foo map[string][]int
|
||||
}
|
||||
|
||||
type A struct {
|
||||
B *B
|
||||
}
|
||||
|
||||
b := &B{
|
||||
Foo: map[string][]int{
|
||||
"example_key": []int{80},
|
||||
},
|
||||
}
|
||||
|
||||
a := &A{B: b}
|
||||
|
||||
m := Map(a)
|
||||
|
||||
if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map {
|
||||
t.Errorf("Map should return a map type, got: %v", typ)
|
||||
}
|
||||
|
||||
in, ok := m["B"].(map[string]interface{})
|
||||
if !ok {
|
||||
t.Errorf("Nested type of map should be of type map[string]interface{}, have %T", m["B"])
|
||||
}
|
||||
|
||||
foo := in["Foo"].(map[string][]int)
|
||||
if name := foo["example_key"]; name[0] != 80 {
|
||||
t.Errorf("Map nested struct's name field should give example, got: %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMap_NestedMapWithSliceStructValues(t *testing.T) {
|
||||
type address struct {
|
||||
Country string `structs:"country"`
|
||||
}
|
||||
|
||||
type B struct {
|
||||
Foo map[string][]address
|
||||
}
|
||||
|
||||
type A struct {
|
||||
B *B
|
||||
}
|
||||
|
||||
b := &B{
|
||||
Foo: map[string][]address{
|
||||
"example_key": []address{
|
||||
{Country: "Turkey"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
a := &A{B: b}
|
||||
m := Map(a)
|
||||
|
||||
if typ := reflect.TypeOf(m).Kind(); typ != reflect.Map {
|
||||
t.Errorf("Map should return a map type, got: %v", typ)
|
||||
}
|
||||
|
||||
in, ok := m["B"].(map[string]interface{})
|
||||
if !ok {
|
||||
t.Errorf("Nested type of map should be of type map[string]interface{}, have %T", m["B"])
|
||||
}
|
||||
|
||||
foo := in["Foo"].(map[string]interface{})
|
||||
|
||||
addresses := foo["example_key"].([]interface{})
|
||||
|
||||
addr, ok := addresses[0].(map[string]interface{})
|
||||
if !ok {
|
||||
t.Errorf("Nested type of map should be of type map[string]interface{}, have %T", m["B"])
|
||||
}
|
||||
|
||||
if _, exists := addr["country"]; !exists {
|
||||
t.Errorf("Expecting country, but found Country")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMap_NestedSliceWithStructValues(t *testing.T) {
|
||||
type address struct {
|
||||
Country string `structs:"customCountryName"`
|
||||
}
|
||||
|
||||
type person struct {
|
||||
Name string `structs:"name"`
|
||||
Addresses []address `structs:"addresses"`
|
||||
}
|
||||
|
||||
p := person{
|
||||
Name: "test",
|
||||
Addresses: []address{
|
||||
address{Country: "England"},
|
||||
address{Country: "Italy"},
|
||||
},
|
||||
}
|
||||
mp := Map(p)
|
||||
|
||||
mpAddresses := mp["addresses"].([]interface{})
|
||||
if _, exists := mpAddresses[0].(map[string]interface{})["Country"]; exists {
|
||||
t.Errorf("Expecting customCountryName, but found Country")
|
||||
}
|
||||
|
||||
if _, exists := mpAddresses[0].(map[string]interface{})["customCountryName"]; !exists {
|
||||
t.Errorf("customCountryName key not found")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMap_NestedSliceWithPointerOfStructValues(t *testing.T) {
|
||||
type address struct {
|
||||
Country string `structs:"customCountryName"`
|
||||
}
|
||||
|
||||
type person struct {
|
||||
Name string `structs:"name"`
|
||||
Addresses []*address `structs:"addresses"`
|
||||
}
|
||||
|
||||
p := person{
|
||||
Name: "test",
|
||||
Addresses: []*address{
|
||||
&address{Country: "England"},
|
||||
&address{Country: "Italy"},
|
||||
},
|
||||
}
|
||||
mp := Map(p)
|
||||
|
||||
mpAddresses := mp["addresses"].([]interface{})
|
||||
if _, exists := mpAddresses[0].(map[string]interface{})["Country"]; exists {
|
||||
t.Errorf("Expecting customCountryName, but found Country")
|
||||
}
|
||||
|
||||
if _, exists := mpAddresses[0].(map[string]interface{})["customCountryName"]; !exists {
|
||||
t.Errorf("customCountryName key not found")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMap_NestedSliceWithIntValues(t *testing.T) {
|
||||
type person struct {
|
||||
Name string `structs:"name"`
|
||||
Ports []int `structs:"ports"`
|
||||
}
|
||||
|
||||
p := person{
|
||||
Name: "test",
|
||||
Ports: []int{80},
|
||||
}
|
||||
m := Map(p)
|
||||
|
||||
ports, ok := m["ports"].([]int)
|
||||
if !ok {
|
||||
t.Errorf("Nested type of map should be of type []int, have %T", m["ports"])
|
||||
}
|
||||
|
||||
if ports[0] != 80 {
|
||||
t.Errorf("Map nested struct's ports field should give 80, got: %v", ports)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMap_Anonymous(t *testing.T) {
|
||||
type A struct {
|
||||
Name string
|
||||
|
@ -1022,6 +1284,28 @@ func TestNestedNilPointer(t *testing.T) {
|
|||
_ = Map(personWithDogWithCollar) // Doesn't panic
|
||||
}
|
||||
|
||||
func TestSetValueOnNestedField(t *testing.T) {
|
||||
type Base struct {
|
||||
ID int
|
||||
}
|
||||
|
||||
type User struct {
|
||||
Base
|
||||
Name string
|
||||
}
|
||||
|
||||
u := User{}
|
||||
s := New(&u)
|
||||
f := s.Field("Base").Field("ID")
|
||||
err := f.Set(10)
|
||||
if err != nil {
|
||||
t.Errorf("Error %v", err)
|
||||
}
|
||||
if f.Value().(int) != 10 {
|
||||
t.Errorf("Value should be equal to 10, got %v", f.Value())
|
||||
}
|
||||
}
|
||||
|
||||
type Person struct {
|
||||
Name string
|
||||
Age int
|
||||
|
@ -1107,3 +1391,63 @@ func TestNonStringerTagWithStringOption(t *testing.T) {
|
|||
t.Errorf("Value for field Animal should not exist")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMap_InterfaceValue(t *testing.T) {
|
||||
type TestStruct struct {
|
||||
A interface{}
|
||||
}
|
||||
|
||||
expected := []byte("test value")
|
||||
|
||||
a := TestStruct{A: expected}
|
||||
s := Map(a)
|
||||
if !reflect.DeepEqual(s["A"], expected) {
|
||||
t.Errorf("Value does not match expected: %q != %q", s["A"], expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPointer2Pointer(t *testing.T) {
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err != nil {
|
||||
fmt.Printf("err %+v\n", err)
|
||||
t.Error("Internal nil pointer should not panic")
|
||||
}
|
||||
}()
|
||||
a := &Animal{
|
||||
Name: "Fluff",
|
||||
Age: 4,
|
||||
}
|
||||
_ = Map(&a)
|
||||
|
||||
b := &a
|
||||
_ = Map(&b)
|
||||
|
||||
c := &b
|
||||
_ = Map(&c)
|
||||
}
|
||||
|
||||
func TestMap_InterfaceTypeWithMapValue(t *testing.T) {
|
||||
type A struct {
|
||||
Name string `structs:"name"`
|
||||
Ip string `structs:"ip"`
|
||||
Query string `structs:"query"`
|
||||
Payload interface{} `structs:"payload"`
|
||||
}
|
||||
|
||||
a := A{
|
||||
Name: "test",
|
||||
Ip: "127.0.0.1",
|
||||
Query: "",
|
||||
Payload: map[string]string{"test_param": "test_param"},
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err != nil {
|
||||
t.Error("Converting Map with an interface{} type with map value should not panic")
|
||||
}
|
||||
}()
|
||||
|
||||
_ = Map(a)
|
||||
}
|
||||
|
|
16
vendor/github.com/golang/snappy/.gitignore
generated
vendored
Normal file
16
vendor/github.com/golang/snappy/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
cmd/snappytool/snappytool
|
||||
testdata/bench
|
||||
|
||||
# These explicitly listed benchmark data files are for an obsolete version of
|
||||
# snappy_test.go.
|
||||
testdata/alice29.txt
|
||||
testdata/asyoulik.txt
|
||||
testdata/fireworks.jpeg
|
||||
testdata/geo.protodata
|
||||
testdata/html
|
||||
testdata/html_x_4
|
||||
testdata/kppkn.gtb
|
||||
testdata/lcet10.txt
|
||||
testdata/paper-100k.pdf
|
||||
testdata/plrabn12.txt
|
||||
testdata/urls.10K
|
15
vendor/github.com/golang/snappy/AUTHORS
generated
vendored
Normal file
15
vendor/github.com/golang/snappy/AUTHORS
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
# This is the official list of Snappy-Go authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS files.
|
||||
# See the latter for an explanation.
|
||||
|
||||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Damian Gryski <dgryski@gmail.com>
|
||||
Google Inc.
|
||||
Jan Mercl <0xjnml@gmail.com>
|
||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||
Sebastien Binet <seb.binet@gmail.com>
|
37
vendor/github.com/golang/snappy/CONTRIBUTORS
generated
vendored
Normal file
37
vendor/github.com/golang/snappy/CONTRIBUTORS
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
# This is the official list of people who can contribute
|
||||
# (and typically have contributed) code to the Snappy-Go repository.
|
||||
# The AUTHORS file lists the copyright holders; this file
|
||||
# lists people. For example, Google employees are listed here
|
||||
# but not in AUTHORS, because Google holds the copyright.
|
||||
#
|
||||
# The submission process automatically checks to make sure
|
||||
# that people submitting code are listed in this file (by email address).
|
||||
#
|
||||
# Names should be added to this file only after verifying that
|
||||
# the individual or the individual's organization has agreed to
|
||||
# the appropriate Contributor License Agreement, found here:
|
||||
#
|
||||
# http://code.google.com/legal/individual-cla-v1.0.html
|
||||
# http://code.google.com/legal/corporate-cla-v1.0.html
|
||||
#
|
||||
# The agreement for individuals can be filled out on the web.
|
||||
#
|
||||
# When adding J Random Contributor's name to this file,
|
||||
# either J's name or J's organization's name should be
|
||||
# added to the AUTHORS file, depending on whether the
|
||||
# individual or corporate CLA was used.
|
||||
|
||||
# Names should be added to this file like so:
|
||||
# Name <email address>
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Damian Gryski <dgryski@gmail.com>
|
||||
Jan Mercl <0xjnml@gmail.com>
|
||||
Kai Backman <kaib@golang.org>
|
||||
Marc-Antoine Ruel <maruel@chromium.org>
|
||||
Nigel Tao <nigeltao@golang.org>
|
||||
Rob Pike <r@golang.org>
|
||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||
Russ Cox <rsc@golang.org>
|
||||
Sebastien Binet <seb.binet@gmail.com>
|
27
vendor/github.com/golang/snappy/LICENSE
generated
vendored
Normal file
27
vendor/github.com/golang/snappy/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
107
vendor/github.com/golang/snappy/README
generated
vendored
Normal file
107
vendor/github.com/golang/snappy/README
generated
vendored
Normal file
|
@ -0,0 +1,107 @@
|
|||
The Snappy compression format in the Go programming language.
|
||||
|
||||
To download and install from source:
|
||||
$ go get github.com/golang/snappy
|
||||
|
||||
Unless otherwise noted, the Snappy-Go source files are distributed
|
||||
under the BSD-style license found in the LICENSE file.
|
||||
|
||||
|
||||
|
||||
Benchmarks.
|
||||
|
||||
The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
|
||||
or so files, the same set used by the C++ Snappy code (github.com/google/snappy
|
||||
and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
|
||||
3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
|
||||
|
||||
"go test -test.bench=."
|
||||
|
||||
_UFlat0-8 2.19GB/s ± 0% html
|
||||
_UFlat1-8 1.41GB/s ± 0% urls
|
||||
_UFlat2-8 23.5GB/s ± 2% jpg
|
||||
_UFlat3-8 1.91GB/s ± 0% jpg_200
|
||||
_UFlat4-8 14.0GB/s ± 1% pdf
|
||||
_UFlat5-8 1.97GB/s ± 0% html4
|
||||
_UFlat6-8 814MB/s ± 0% txt1
|
||||
_UFlat7-8 785MB/s ± 0% txt2
|
||||
_UFlat8-8 857MB/s ± 0% txt3
|
||||
_UFlat9-8 719MB/s ± 1% txt4
|
||||
_UFlat10-8 2.84GB/s ± 0% pb
|
||||
_UFlat11-8 1.05GB/s ± 0% gaviota
|
||||
|
||||
_ZFlat0-8 1.04GB/s ± 0% html
|
||||
_ZFlat1-8 534MB/s ± 0% urls
|
||||
_ZFlat2-8 15.7GB/s ± 1% jpg
|
||||
_ZFlat3-8 740MB/s ± 3% jpg_200
|
||||
_ZFlat4-8 9.20GB/s ± 1% pdf
|
||||
_ZFlat5-8 991MB/s ± 0% html4
|
||||
_ZFlat6-8 379MB/s ± 0% txt1
|
||||
_ZFlat7-8 352MB/s ± 0% txt2
|
||||
_ZFlat8-8 396MB/s ± 1% txt3
|
||||
_ZFlat9-8 327MB/s ± 1% txt4
|
||||
_ZFlat10-8 1.33GB/s ± 1% pb
|
||||
_ZFlat11-8 605MB/s ± 1% gaviota
|
||||
|
||||
|
||||
|
||||
"go test -test.bench=. -tags=noasm"
|
||||
|
||||
_UFlat0-8 621MB/s ± 2% html
|
||||
_UFlat1-8 494MB/s ± 1% urls
|
||||
_UFlat2-8 23.2GB/s ± 1% jpg
|
||||
_UFlat3-8 1.12GB/s ± 1% jpg_200
|
||||
_UFlat4-8 4.35GB/s ± 1% pdf
|
||||
_UFlat5-8 609MB/s ± 0% html4
|
||||
_UFlat6-8 296MB/s ± 0% txt1
|
||||
_UFlat7-8 288MB/s ± 0% txt2
|
||||
_UFlat8-8 309MB/s ± 1% txt3
|
||||
_UFlat9-8 280MB/s ± 1% txt4
|
||||
_UFlat10-8 753MB/s ± 0% pb
|
||||
_UFlat11-8 400MB/s ± 0% gaviota
|
||||
|
||||
_ZFlat0-8 409MB/s ± 1% html
|
||||
_ZFlat1-8 250MB/s ± 1% urls
|
||||
_ZFlat2-8 12.3GB/s ± 1% jpg
|
||||
_ZFlat3-8 132MB/s ± 0% jpg_200
|
||||
_ZFlat4-8 2.92GB/s ± 0% pdf
|
||||
_ZFlat5-8 405MB/s ± 1% html4
|
||||
_ZFlat6-8 179MB/s ± 1% txt1
|
||||
_ZFlat7-8 170MB/s ± 1% txt2
|
||||
_ZFlat8-8 189MB/s ± 1% txt3
|
||||
_ZFlat9-8 164MB/s ± 1% txt4
|
||||
_ZFlat10-8 479MB/s ± 1% pb
|
||||
_ZFlat11-8 270MB/s ± 1% gaviota
|
||||
|
||||
|
||||
|
||||
For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
|
||||
are the numbers from C++ Snappy's
|
||||
|
||||
make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
|
||||
|
||||
BM_UFlat/0 2.4GB/s html
|
||||
BM_UFlat/1 1.4GB/s urls
|
||||
BM_UFlat/2 21.8GB/s jpg
|
||||
BM_UFlat/3 1.5GB/s jpg_200
|
||||
BM_UFlat/4 13.3GB/s pdf
|
||||
BM_UFlat/5 2.1GB/s html4
|
||||
BM_UFlat/6 1.0GB/s txt1
|
||||
BM_UFlat/7 959.4MB/s txt2
|
||||
BM_UFlat/8 1.0GB/s txt3
|
||||
BM_UFlat/9 864.5MB/s txt4
|
||||
BM_UFlat/10 2.9GB/s pb
|
||||
BM_UFlat/11 1.2GB/s gaviota
|
||||
|
||||
BM_ZFlat/0 944.3MB/s html (22.31 %)
|
||||
BM_ZFlat/1 501.6MB/s urls (47.78 %)
|
||||
BM_ZFlat/2 14.3GB/s jpg (99.95 %)
|
||||
BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %)
|
||||
BM_ZFlat/4 8.3GB/s pdf (83.30 %)
|
||||
BM_ZFlat/5 903.5MB/s html4 (22.52 %)
|
||||
BM_ZFlat/6 336.0MB/s txt1 (57.88 %)
|
||||
BM_ZFlat/7 312.3MB/s txt2 (61.91 %)
|
||||
BM_ZFlat/8 353.1MB/s txt3 (54.99 %)
|
||||
BM_ZFlat/9 289.9MB/s txt4 (66.26 %)
|
||||
BM_ZFlat/10 1.2GB/s pb (19.68 %)
|
||||
BM_ZFlat/11 527.4MB/s gaviota (37.72 %)
|
77
vendor/github.com/golang/snappy/cmd/snappytool/main.cpp
generated
vendored
Normal file
77
vendor/github.com/golang/snappy/cmd/snappytool/main.cpp
generated
vendored
Normal file
|
@ -0,0 +1,77 @@
|
|||
/*
|
||||
To build the snappytool binary:
|
||||
g++ main.cpp /usr/lib/libsnappy.a -o snappytool
|
||||
or, if you have built the C++ snappy library from source:
|
||||
g++ main.cpp /path/to/your/snappy/.libs/libsnappy.a -o snappytool
|
||||
after running "make" from your snappy checkout directory.
|
||||
*/
|
||||
|
||||
#include <errno.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "snappy.h"
|
||||
|
||||
#define N 1000000
|
||||
|
||||
char dst[N];
|
||||
char src[N];
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
// Parse args.
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "exactly one of -d or -e must be given\n");
|
||||
return 1;
|
||||
}
|
||||
bool decode = strcmp(argv[1], "-d") == 0;
|
||||
bool encode = strcmp(argv[1], "-e") == 0;
|
||||
if (decode == encode) {
|
||||
fprintf(stderr, "exactly one of -d or -e must be given\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Read all of stdin into src[:s].
|
||||
size_t s = 0;
|
||||
while (1) {
|
||||
if (s == N) {
|
||||
fprintf(stderr, "input too large\n");
|
||||
return 1;
|
||||
}
|
||||
ssize_t n = read(0, src+s, N-s);
|
||||
if (n == 0) {
|
||||
break;
|
||||
}
|
||||
if (n < 0) {
|
||||
fprintf(stderr, "read error: %s\n", strerror(errno));
|
||||
// TODO: handle EAGAIN, EINTR?
|
||||
return 1;
|
||||
}
|
||||
s += n;
|
||||
}
|
||||
|
||||
// Encode or decode src[:s] to dst[:d], and write to stdout.
|
||||
size_t d = 0;
|
||||
if (encode) {
|
||||
if (N < snappy::MaxCompressedLength(s)) {
|
||||
fprintf(stderr, "input too large after encoding\n");
|
||||
return 1;
|
||||
}
|
||||
snappy::RawCompress(src, s, dst, &d);
|
||||
} else {
|
||||
if (!snappy::GetUncompressedLength(src, s, &d)) {
|
||||
fprintf(stderr, "could not get uncompressed length\n");
|
||||
return 1;
|
||||
}
|
||||
if (N < d) {
|
||||
fprintf(stderr, "input too large after decoding\n");
|
||||
return 1;
|
||||
}
|
||||
if (!snappy::RawUncompress(src, s, dst)) {
|
||||
fprintf(stderr, "input was not valid Snappy-compressed data\n");
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
write(1, dst, d);
|
||||
return 0;
|
||||
}
|
237
vendor/github.com/golang/snappy/decode.go
generated
vendored
Normal file
237
vendor/github.com/golang/snappy/decode.go
generated
vendored
Normal file
|
@ -0,0 +1,237 @@
|
|||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package snappy
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrCorrupt reports that the input is invalid.
|
||||
ErrCorrupt = errors.New("snappy: corrupt input")
|
||||
// ErrTooLarge reports that the uncompressed length is too large.
|
||||
ErrTooLarge = errors.New("snappy: decoded block is too large")
|
||||
// ErrUnsupported reports that the input isn't supported.
|
||||
ErrUnsupported = errors.New("snappy: unsupported input")
|
||||
|
||||
errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
|
||||
)
|
||||
|
||||
// DecodedLen returns the length of the decoded block.
|
||||
func DecodedLen(src []byte) (int, error) {
|
||||
v, _, err := decodedLen(src)
|
||||
return v, err
|
||||
}
|
||||
|
||||
// decodedLen returns the length of the decoded block and the number of bytes
|
||||
// that the length header occupied.
|
||||
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
|
||||
v, n := binary.Uvarint(src)
|
||||
if n <= 0 || v > 0xffffffff {
|
||||
return 0, 0, ErrCorrupt
|
||||
}
|
||||
|
||||
const wordSize = 32 << (^uint(0) >> 32 & 1)
|
||||
if wordSize == 32 && v > 0x7fffffff {
|
||||
return 0, 0, ErrTooLarge
|
||||
}
|
||||
return int(v), n, nil
|
||||
}
|
||||
|
||||
const (
|
||||
decodeErrCodeCorrupt = 1
|
||||
decodeErrCodeUnsupportedLiteralLength = 2
|
||||
)
|
||||
|
||||
// Decode returns the decoded form of src. The returned slice may be a sub-
|
||||
// slice of dst if dst was large enough to hold the entire decoded block.
|
||||
// Otherwise, a newly allocated slice will be returned.
|
||||
//
|
||||
// The dst and src must not overlap. It is valid to pass a nil dst.
|
||||
func Decode(dst, src []byte) ([]byte, error) {
|
||||
dLen, s, err := decodedLen(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if dLen <= len(dst) {
|
||||
dst = dst[:dLen]
|
||||
} else {
|
||||
dst = make([]byte, dLen)
|
||||
}
|
||||
switch decode(dst, src[s:]) {
|
||||
case 0:
|
||||
return dst, nil
|
||||
case decodeErrCodeUnsupportedLiteralLength:
|
||||
return nil, errUnsupportedLiteralLength
|
||||
}
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
// NewReader returns a new Reader that decompresses from r, using the framing
|
||||
// format described at
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
func NewReader(r io.Reader) *Reader {
|
||||
return &Reader{
|
||||
r: r,
|
||||
decoded: make([]byte, maxBlockSize),
|
||||
buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
|
||||
}
|
||||
}
|
||||
|
||||
// Reader is an io.Reader that can read Snappy-compressed bytes.
|
||||
type Reader struct {
|
||||
r io.Reader
|
||||
err error
|
||||
decoded []byte
|
||||
buf []byte
|
||||
// decoded[i:j] contains decoded bytes that have not yet been passed on.
|
||||
i, j int
|
||||
readHeader bool
|
||||
}
|
||||
|
||||
// Reset discards any buffered data, resets all state, and switches the Snappy
|
||||
// reader to read from r. This permits reusing a Reader rather than allocating
|
||||
// a new one.
|
||||
func (r *Reader) Reset(reader io.Reader) {
|
||||
r.r = reader
|
||||
r.err = nil
|
||||
r.i = 0
|
||||
r.j = 0
|
||||
r.readHeader = false
|
||||
}
|
||||
|
||||
func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
|
||||
if _, r.err = io.ReadFull(r.r, p); r.err != nil {
|
||||
if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
|
||||
r.err = ErrCorrupt
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Read satisfies the io.Reader interface.
|
||||
func (r *Reader) Read(p []byte) (int, error) {
|
||||
if r.err != nil {
|
||||
return 0, r.err
|
||||
}
|
||||
for {
|
||||
if r.i < r.j {
|
||||
n := copy(p, r.decoded[r.i:r.j])
|
||||
r.i += n
|
||||
return n, nil
|
||||
}
|
||||
if !r.readFull(r.buf[:4], true) {
|
||||
return 0, r.err
|
||||
}
|
||||
chunkType := r.buf[0]
|
||||
if !r.readHeader {
|
||||
if chunkType != chunkTypeStreamIdentifier {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
r.readHeader = true
|
||||
}
|
||||
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
|
||||
if chunkLen > len(r.buf) {
|
||||
r.err = ErrUnsupported
|
||||
return 0, r.err
|
||||
}
|
||||
|
||||
// The chunk types are specified at
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
switch chunkType {
|
||||
case chunkTypeCompressedData:
|
||||
// Section 4.2. Compressed data (chunk type 0x00).
|
||||
if chunkLen < checksumSize {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
buf := r.buf[:chunkLen]
|
||||
if !r.readFull(buf, false) {
|
||||
return 0, r.err
|
||||
}
|
||||
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
|
||||
buf = buf[checksumSize:]
|
||||
|
||||
n, err := DecodedLen(buf)
|
||||
if err != nil {
|
||||
r.err = err
|
||||
return 0, r.err
|
||||
}
|
||||
if n > len(r.decoded) {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
if _, err := Decode(r.decoded, buf); err != nil {
|
||||
r.err = err
|
||||
return 0, r.err
|
||||
}
|
||||
if crc(r.decoded[:n]) != checksum {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
r.i, r.j = 0, n
|
||||
continue
|
||||
|
||||
case chunkTypeUncompressedData:
|
||||
// Section 4.3. Uncompressed data (chunk type 0x01).
|
||||
if chunkLen < checksumSize {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
buf := r.buf[:checksumSize]
|
||||
if !r.readFull(buf, false) {
|
||||
return 0, r.err
|
||||
}
|
||||
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
|
||||
// Read directly into r.decoded instead of via r.buf.
|
||||
n := chunkLen - checksumSize
|
||||
if n > len(r.decoded) {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
if !r.readFull(r.decoded[:n], false) {
|
||||
return 0, r.err
|
||||
}
|
||||
if crc(r.decoded[:n]) != checksum {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
r.i, r.j = 0, n
|
||||
continue
|
||||
|
||||
case chunkTypeStreamIdentifier:
|
||||
// Section 4.1. Stream identifier (chunk type 0xff).
|
||||
if chunkLen != len(magicBody) {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
if !r.readFull(r.buf[:len(magicBody)], false) {
|
||||
return 0, r.err
|
||||
}
|
||||
for i := 0; i < len(magicBody); i++ {
|
||||
if r.buf[i] != magicBody[i] {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if chunkType <= 0x7f {
|
||||
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
|
||||
r.err = ErrUnsupported
|
||||
return 0, r.err
|
||||
}
|
||||
// Section 4.4 Padding (chunk type 0xfe).
|
||||
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
|
||||
if !r.readFull(r.buf[:chunkLen], false) {
|
||||
return 0, r.err
|
||||
}
|
||||
}
|
||||
}
|
14
vendor/github.com/golang/snappy/decode_amd64.go
generated
vendored
Normal file
14
vendor/github.com/golang/snappy/decode_amd64.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
package snappy
|
||||
|
||||
// decode has the same semantics as in decode_other.go.
|
||||
//
|
||||
//go:noescape
|
||||
func decode(dst, src []byte) int
|
490
vendor/github.com/golang/snappy/decode_amd64.s
generated
vendored
Normal file
490
vendor/github.com/golang/snappy/decode_amd64.s
generated
vendored
Normal file
|
@ -0,0 +1,490 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// The asm code generally follows the pure Go code in decode_other.go, except
|
||||
// where marked with a "!!!".
|
||||
|
||||
// func decode(dst, src []byte) int
|
||||
//
|
||||
// All local variables fit into registers. The non-zero stack size is only to
|
||||
// spill registers and push args when issuing a CALL. The register allocation:
|
||||
// - AX scratch
|
||||
// - BX scratch
|
||||
// - CX length or x
|
||||
// - DX offset
|
||||
// - SI &src[s]
|
||||
// - DI &dst[d]
|
||||
// + R8 dst_base
|
||||
// + R9 dst_len
|
||||
// + R10 dst_base + dst_len
|
||||
// + R11 src_base
|
||||
// + R12 src_len
|
||||
// + R13 src_base + src_len
|
||||
// - R14 used by doCopy
|
||||
// - R15 used by doCopy
|
||||
//
|
||||
// The registers R8-R13 (marked with a "+") are set at the start of the
|
||||
// function, and after a CALL returns, and are not otherwise modified.
|
||||
//
|
||||
// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI.
|
||||
// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
|
||||
TEXT ·decode(SB), NOSPLIT, $48-56
|
||||
// Initialize SI, DI and R8-R13.
|
||||
MOVQ dst_base+0(FP), R8
|
||||
MOVQ dst_len+8(FP), R9
|
||||
MOVQ R8, DI
|
||||
MOVQ R8, R10
|
||||
ADDQ R9, R10
|
||||
MOVQ src_base+24(FP), R11
|
||||
MOVQ src_len+32(FP), R12
|
||||
MOVQ R11, SI
|
||||
MOVQ R11, R13
|
||||
ADDQ R12, R13
|
||||
|
||||
loop:
|
||||
// for s < len(src)
|
||||
CMPQ SI, R13
|
||||
JEQ end
|
||||
|
||||
// CX = uint32(src[s])
|
||||
//
|
||||
// switch src[s] & 0x03
|
||||
MOVBLZX (SI), CX
|
||||
MOVL CX, BX
|
||||
ANDL $3, BX
|
||||
CMPL BX, $1
|
||||
JAE tagCopy
|
||||
|
||||
// ----------------------------------------
|
||||
// The code below handles literal tags.
|
||||
|
||||
// case tagLiteral:
|
||||
// x := uint32(src[s] >> 2)
|
||||
// switch
|
||||
SHRL $2, CX
|
||||
CMPL CX, $60
|
||||
JAE tagLit60Plus
|
||||
|
||||
// case x < 60:
|
||||
// s++
|
||||
INCQ SI
|
||||
|
||||
doLit:
|
||||
// This is the end of the inner "switch", when we have a literal tag.
|
||||
//
|
||||
// We assume that CX == x and x fits in a uint32, where x is the variable
|
||||
// used in the pure Go decode_other.go code.
|
||||
|
||||
// length = int(x) + 1
|
||||
//
|
||||
// Unlike the pure Go code, we don't need to check if length <= 0 because
|
||||
// CX can hold 64 bits, so the increment cannot overflow.
|
||||
INCQ CX
|
||||
|
||||
// Prepare to check if copying length bytes will run past the end of dst or
|
||||
// src.
|
||||
//
|
||||
// AX = len(dst) - d
|
||||
// BX = len(src) - s
|
||||
MOVQ R10, AX
|
||||
SUBQ DI, AX
|
||||
MOVQ R13, BX
|
||||
SUBQ SI, BX
|
||||
|
||||
// !!! Try a faster technique for short (16 or fewer bytes) copies.
|
||||
//
|
||||
// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
|
||||
// goto callMemmove // Fall back on calling runtime·memmove.
|
||||
// }
|
||||
//
|
||||
// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
|
||||
// against 21 instead of 16, because it cannot assume that all of its input
|
||||
// is contiguous in memory and so it needs to leave enough source bytes to
|
||||
// read the next tag without refilling buffers, but Go's Decode assumes
|
||||
// contiguousness (the src argument is a []byte).
|
||||
CMPQ CX, $16
|
||||
JGT callMemmove
|
||||
CMPQ AX, $16
|
||||
JLT callMemmove
|
||||
CMPQ BX, $16
|
||||
JLT callMemmove
|
||||
|
||||
// !!! Implement the copy from src to dst as a 16-byte load and store.
|
||||
// (Decode's documentation says that dst and src must not overlap.)
|
||||
//
|
||||
// This always copies 16 bytes, instead of only length bytes, but that's
|
||||
// OK. If the input is a valid Snappy encoding then subsequent iterations
|
||||
// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
|
||||
// non-nil error), so the overrun will be ignored.
|
||||
//
|
||||
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
|
||||
// 16-byte loads and stores. This technique probably wouldn't be as
|
||||
// effective on architectures that are fussier about alignment.
|
||||
MOVOU 0(SI), X0
|
||||
MOVOU X0, 0(DI)
|
||||
|
||||
// d += length
|
||||
// s += length
|
||||
ADDQ CX, DI
|
||||
ADDQ CX, SI
|
||||
JMP loop
|
||||
|
||||
callMemmove:
|
||||
// if length > len(dst)-d || length > len(src)-s { etc }
|
||||
CMPQ CX, AX
|
||||
JGT errCorrupt
|
||||
CMPQ CX, BX
|
||||
JGT errCorrupt
|
||||
|
||||
// copy(dst[d:], src[s:s+length])
|
||||
//
|
||||
// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
|
||||
// DI, SI and CX as arguments. Coincidentally, we also need to spill those
|
||||
// three registers to the stack, to save local variables across the CALL.
|
||||
MOVQ DI, 0(SP)
|
||||
MOVQ SI, 8(SP)
|
||||
MOVQ CX, 16(SP)
|
||||
MOVQ DI, 24(SP)
|
||||
MOVQ SI, 32(SP)
|
||||
MOVQ CX, 40(SP)
|
||||
CALL runtime·memmove(SB)
|
||||
|
||||
// Restore local variables: unspill registers from the stack and
|
||||
// re-calculate R8-R13.
|
||||
MOVQ 24(SP), DI
|
||||
MOVQ 32(SP), SI
|
||||
MOVQ 40(SP), CX
|
||||
MOVQ dst_base+0(FP), R8
|
||||
MOVQ dst_len+8(FP), R9
|
||||
MOVQ R8, R10
|
||||
ADDQ R9, R10
|
||||
MOVQ src_base+24(FP), R11
|
||||
MOVQ src_len+32(FP), R12
|
||||
MOVQ R11, R13
|
||||
ADDQ R12, R13
|
||||
|
||||
// d += length
|
||||
// s += length
|
||||
ADDQ CX, DI
|
||||
ADDQ CX, SI
|
||||
JMP loop
|
||||
|
||||
tagLit60Plus:
|
||||
// !!! This fragment does the
|
||||
//
|
||||
// s += x - 58; if uint(s) > uint(len(src)) { etc }
|
||||
//
|
||||
// checks. In the asm version, we code it once instead of once per switch case.
|
||||
ADDQ CX, SI
|
||||
SUBQ $58, SI
|
||||
MOVQ SI, BX
|
||||
SUBQ R11, BX
|
||||
CMPQ BX, R12
|
||||
JA errCorrupt
|
||||
|
||||
// case x == 60:
|
||||
CMPL CX, $61
|
||||
JEQ tagLit61
|
||||
JA tagLit62Plus
|
||||
|
||||
// x = uint32(src[s-1])
|
||||
MOVBLZX -1(SI), CX
|
||||
JMP doLit
|
||||
|
||||
tagLit61:
|
||||
// case x == 61:
|
||||
// x = uint32(src[s-2]) | uint32(src[s-1])<<8
|
||||
MOVWLZX -2(SI), CX
|
||||
JMP doLit
|
||||
|
||||
tagLit62Plus:
|
||||
CMPL CX, $62
|
||||
JA tagLit63
|
||||
|
||||
// case x == 62:
|
||||
// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
|
||||
MOVWLZX -3(SI), CX
|
||||
MOVBLZX -1(SI), BX
|
||||
SHLL $16, BX
|
||||
ORL BX, CX
|
||||
JMP doLit
|
||||
|
||||
tagLit63:
|
||||
// case x == 63:
|
||||
// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
|
||||
MOVL -4(SI), CX
|
||||
JMP doLit
|
||||
|
||||
// The code above handles literal tags.
|
||||
// ----------------------------------------
|
||||
// The code below handles copy tags.
|
||||
|
||||
tagCopy4:
|
||||
// case tagCopy4:
|
||||
// s += 5
|
||||
ADDQ $5, SI
|
||||
|
||||
// if uint(s) > uint(len(src)) { etc }
|
||||
MOVQ SI, BX
|
||||
SUBQ R11, BX
|
||||
CMPQ BX, R12
|
||||
JA errCorrupt
|
||||
|
||||
// length = 1 + int(src[s-5])>>2
|
||||
SHRQ $2, CX
|
||||
INCQ CX
|
||||
|
||||
// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
|
||||
MOVLQZX -4(SI), DX
|
||||
JMP doCopy
|
||||
|
||||
tagCopy2:
|
||||
// case tagCopy2:
|
||||
// s += 3
|
||||
ADDQ $3, SI
|
||||
|
||||
// if uint(s) > uint(len(src)) { etc }
|
||||
MOVQ SI, BX
|
||||
SUBQ R11, BX
|
||||
CMPQ BX, R12
|
||||
JA errCorrupt
|
||||
|
||||
// length = 1 + int(src[s-3])>>2
|
||||
SHRQ $2, CX
|
||||
INCQ CX
|
||||
|
||||
// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
|
||||
MOVWQZX -2(SI), DX
|
||||
JMP doCopy
|
||||
|
||||
tagCopy:
|
||||
// We have a copy tag. We assume that:
|
||||
// - BX == src[s] & 0x03
|
||||
// - CX == src[s]
|
||||
CMPQ BX, $2
|
||||
JEQ tagCopy2
|
||||
JA tagCopy4
|
||||
|
||||
// case tagCopy1:
|
||||
// s += 2
|
||||
ADDQ $2, SI
|
||||
|
||||
// if uint(s) > uint(len(src)) { etc }
|
||||
MOVQ SI, BX
|
||||
SUBQ R11, BX
|
||||
CMPQ BX, R12
|
||||
JA errCorrupt
|
||||
|
||||
// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
|
||||
MOVQ CX, DX
|
||||
ANDQ $0xe0, DX
|
||||
SHLQ $3, DX
|
||||
MOVBQZX -1(SI), BX
|
||||
ORQ BX, DX
|
||||
|
||||
// length = 4 + int(src[s-2])>>2&0x7
|
||||
SHRQ $2, CX
|
||||
ANDQ $7, CX
|
||||
ADDQ $4, CX
|
||||
|
||||
doCopy:
|
||||
// This is the end of the outer "switch", when we have a copy tag.
|
||||
//
|
||||
// We assume that:
|
||||
// - CX == length && CX > 0
|
||||
// - DX == offset
|
||||
|
||||
// if offset <= 0 { etc }
|
||||
CMPQ DX, $0
|
||||
JLE errCorrupt
|
||||
|
||||
// if d < offset { etc }
|
||||
MOVQ DI, BX
|
||||
SUBQ R8, BX
|
||||
CMPQ BX, DX
|
||||
JLT errCorrupt
|
||||
|
||||
// if length > len(dst)-d { etc }
|
||||
MOVQ R10, BX
|
||||
SUBQ DI, BX
|
||||
CMPQ CX, BX
|
||||
JGT errCorrupt
|
||||
|
||||
// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
|
||||
//
|
||||
// Set:
|
||||
// - R14 = len(dst)-d
|
||||
// - R15 = &dst[d-offset]
|
||||
MOVQ R10, R14
|
||||
SUBQ DI, R14
|
||||
MOVQ DI, R15
|
||||
SUBQ DX, R15
|
||||
|
||||
// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
|
||||
//
|
||||
// First, try using two 8-byte load/stores, similar to the doLit technique
|
||||
// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
|
||||
// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
|
||||
// and not one 16-byte load/store, and the first store has to be before the
|
||||
// second load, due to the overlap if offset is in the range [8, 16).
|
||||
//
|
||||
// if length > 16 || offset < 8 || len(dst)-d < 16 {
|
||||
// goto slowForwardCopy
|
||||
// }
|
||||
// copy 16 bytes
|
||||
// d += length
|
||||
CMPQ CX, $16
|
||||
JGT slowForwardCopy
|
||||
CMPQ DX, $8
|
||||
JLT slowForwardCopy
|
||||
CMPQ R14, $16
|
||||
JLT slowForwardCopy
|
||||
MOVQ 0(R15), AX
|
||||
MOVQ AX, 0(DI)
|
||||
MOVQ 8(R15), BX
|
||||
MOVQ BX, 8(DI)
|
||||
ADDQ CX, DI
|
||||
JMP loop
|
||||
|
||||
slowForwardCopy:
|
||||
// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
|
||||
// can still try 8-byte load stores, provided we can overrun up to 10 extra
|
||||
// bytes. As above, the overrun will be fixed up by subsequent iterations
|
||||
// of the outermost loop.
|
||||
//
|
||||
// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
|
||||
// commentary says:
|
||||
//
|
||||
// ----
|
||||
//
|
||||
// The main part of this loop is a simple copy of eight bytes at a time
|
||||
// until we've copied (at least) the requested amount of bytes. However,
|
||||
// if d and d-offset are less than eight bytes apart (indicating a
|
||||
// repeating pattern of length < 8), we first need to expand the pattern in
|
||||
// order to get the correct results. For instance, if the buffer looks like
|
||||
// this, with the eight-byte <d-offset> and <d> patterns marked as
|
||||
// intervals:
|
||||
//
|
||||
// abxxxxxxxxxxxx
|
||||
// [------] d-offset
|
||||
// [------] d
|
||||
//
|
||||
// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
|
||||
// once, after which we can move <d> two bytes without moving <d-offset>:
|
||||
//
|
||||
// ababxxxxxxxxxx
|
||||
// [------] d-offset
|
||||
// [------] d
|
||||
//
|
||||
// and repeat the exercise until the two no longer overlap.
|
||||
//
|
||||
// This allows us to do very well in the special case of one single byte
|
||||
// repeated many times, without taking a big hit for more general cases.
|
||||
//
|
||||
// The worst case of extra writing past the end of the match occurs when
|
||||
// offset == 1 and length == 1; the last copy will read from byte positions
|
||||
// [0..7] and write to [4..11], whereas it was only supposed to write to
|
||||
// position 1. Thus, ten excess bytes.
|
||||
//
|
||||
// ----
|
||||
//
|
||||
// That "10 byte overrun" worst case is confirmed by Go's
|
||||
// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
|
||||
// and finishSlowForwardCopy algorithm.
|
||||
//
|
||||
// if length > len(dst)-d-10 {
|
||||
// goto verySlowForwardCopy
|
||||
// }
|
||||
SUBQ $10, R14
|
||||
CMPQ CX, R14
|
||||
JGT verySlowForwardCopy
|
||||
|
||||
makeOffsetAtLeast8:
|
||||
// !!! As above, expand the pattern so that offset >= 8 and we can use
|
||||
// 8-byte load/stores.
|
||||
//
|
||||
// for offset < 8 {
|
||||
// copy 8 bytes from dst[d-offset:] to dst[d:]
|
||||
// length -= offset
|
||||
// d += offset
|
||||
// offset += offset
|
||||
// // The two previous lines together means that d-offset, and therefore
|
||||
// // R15, is unchanged.
|
||||
// }
|
||||
CMPQ DX, $8
|
||||
JGE fixUpSlowForwardCopy
|
||||
MOVQ (R15), BX
|
||||
MOVQ BX, (DI)
|
||||
SUBQ DX, CX
|
||||
ADDQ DX, DI
|
||||
ADDQ DX, DX
|
||||
JMP makeOffsetAtLeast8
|
||||
|
||||
fixUpSlowForwardCopy:
|
||||
// !!! Add length (which might be negative now) to d (implied by DI being
|
||||
// &dst[d]) so that d ends up at the right place when we jump back to the
|
||||
// top of the loop. Before we do that, though, we save DI to AX so that, if
|
||||
// length is positive, copying the remaining length bytes will write to the
|
||||
// right place.
|
||||
MOVQ DI, AX
|
||||
ADDQ CX, DI
|
||||
|
||||
finishSlowForwardCopy:
|
||||
// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
|
||||
// length means that we overrun, but as above, that will be fixed up by
|
||||
// subsequent iterations of the outermost loop.
|
||||
CMPQ CX, $0
|
||||
JLE loop
|
||||
MOVQ (R15), BX
|
||||
MOVQ BX, (AX)
|
||||
ADDQ $8, R15
|
||||
ADDQ $8, AX
|
||||
SUBQ $8, CX
|
||||
JMP finishSlowForwardCopy
|
||||
|
||||
verySlowForwardCopy:
|
||||
// verySlowForwardCopy is a simple implementation of forward copy. In C
|
||||
// parlance, this is a do/while loop instead of a while loop, since we know
|
||||
// that length > 0. In Go syntax:
|
||||
//
|
||||
// for {
|
||||
// dst[d] = dst[d - offset]
|
||||
// d++
|
||||
// length--
|
||||
// if length == 0 {
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
MOVB (R15), BX
|
||||
MOVB BX, (DI)
|
||||
INCQ R15
|
||||
INCQ DI
|
||||
DECQ CX
|
||||
JNZ verySlowForwardCopy
|
||||
JMP loop
|
||||
|
||||
// The code above handles copy tags.
|
||||
// ----------------------------------------
|
||||
|
||||
end:
|
||||
// This is the end of the "for s < len(src)".
|
||||
//
|
||||
// if d != len(dst) { etc }
|
||||
CMPQ DI, R10
|
||||
JNE errCorrupt
|
||||
|
||||
// return 0
|
||||
MOVQ $0, ret+48(FP)
|
||||
RET
|
||||
|
||||
errCorrupt:
|
||||
// return decodeErrCodeCorrupt
|
||||
MOVQ $1, ret+48(FP)
|
||||
RET
|
101
vendor/github.com/golang/snappy/decode_other.go
generated
vendored
Normal file
101
vendor/github.com/golang/snappy/decode_other.go
generated
vendored
Normal file
|
@ -0,0 +1,101 @@
|
|||
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !amd64 appengine !gc noasm
|
||||
|
||||
package snappy
|
||||
|
||||
// decode writes the decoding of src to dst. It assumes that the varint-encoded
|
||||
// length of the decompressed bytes has already been read, and that len(dst)
|
||||
// equals that length.
|
||||
//
|
||||
// It returns 0 on success or a decodeErrCodeXxx error code on failure.
|
||||
func decode(dst, src []byte) int {
|
||||
var d, s, offset, length int
|
||||
for s < len(src) {
|
||||
switch src[s] & 0x03 {
|
||||
case tagLiteral:
|
||||
x := uint32(src[s] >> 2)
|
||||
switch {
|
||||
case x < 60:
|
||||
s++
|
||||
case x == 60:
|
||||
s += 2
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
x = uint32(src[s-1])
|
||||
case x == 61:
|
||||
s += 3
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
x = uint32(src[s-2]) | uint32(src[s-1])<<8
|
||||
case x == 62:
|
||||
s += 4
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
|
||||
case x == 63:
|
||||
s += 5
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
|
||||
}
|
||||
length = int(x) + 1
|
||||
if length <= 0 {
|
||||
return decodeErrCodeUnsupportedLiteralLength
|
||||
}
|
||||
if length > len(dst)-d || length > len(src)-s {
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
copy(dst[d:], src[s:s+length])
|
||||
d += length
|
||||
s += length
|
||||
continue
|
||||
|
||||
case tagCopy1:
|
||||
s += 2
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
length = 4 + int(src[s-2])>>2&0x7
|
||||
offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
|
||||
|
||||
case tagCopy2:
|
||||
s += 3
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
length = 1 + int(src[s-3])>>2
|
||||
offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
|
||||
|
||||
case tagCopy4:
|
||||
s += 5
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
length = 1 + int(src[s-5])>>2
|
||||
offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
|
||||
}
|
||||
|
||||
if offset <= 0 || d < offset || length > len(dst)-d {
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
// Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
|
||||
// the built-in copy function, this byte-by-byte copy always runs
|
||||
// forwards, even if the slices overlap. Conceptually, this is:
|
||||
//
|
||||
// d += forwardCopy(dst[d:d+length], dst[d-offset:])
|
||||
for end := d + length; d != end; d++ {
|
||||
dst[d] = dst[d-offset]
|
||||
}
|
||||
}
|
||||
if d != len(dst) {
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
return 0
|
||||
}
|
285
vendor/github.com/golang/snappy/encode.go
generated
vendored
Normal file
285
vendor/github.com/golang/snappy/encode.go
generated
vendored
Normal file
|
@ -0,0 +1,285 @@
|
|||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package snappy
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Encode returns the encoded form of src. The returned slice may be a sub-
|
||||
// slice of dst if dst was large enough to hold the entire encoded block.
|
||||
// Otherwise, a newly allocated slice will be returned.
|
||||
//
|
||||
// The dst and src must not overlap. It is valid to pass a nil dst.
|
||||
func Encode(dst, src []byte) []byte {
|
||||
if n := MaxEncodedLen(len(src)); n < 0 {
|
||||
panic(ErrTooLarge)
|
||||
} else if len(dst) < n {
|
||||
dst = make([]byte, n)
|
||||
}
|
||||
|
||||
// The block starts with the varint-encoded length of the decompressed bytes.
|
||||
d := binary.PutUvarint(dst, uint64(len(src)))
|
||||
|
||||
for len(src) > 0 {
|
||||
p := src
|
||||
src = nil
|
||||
if len(p) > maxBlockSize {
|
||||
p, src = p[:maxBlockSize], p[maxBlockSize:]
|
||||
}
|
||||
if len(p) < minNonLiteralBlockSize {
|
||||
d += emitLiteral(dst[d:], p)
|
||||
} else {
|
||||
d += encodeBlock(dst[d:], p)
|
||||
}
|
||||
}
|
||||
return dst[:d]
|
||||
}
|
||||
|
||||
// inputMargin is the minimum number of extra input bytes to keep, inside
|
||||
// encodeBlock's inner loop. On some architectures, this margin lets us
|
||||
// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
|
||||
// literals can be implemented as a single load to and store from a 16-byte
|
||||
// register. That literal's actual length can be as short as 1 byte, so this
|
||||
// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
|
||||
// the encoding loop will fix up the copy overrun, and this inputMargin ensures
|
||||
// that we don't overrun the dst and src buffers.
|
||||
const inputMargin = 16 - 1
|
||||
|
||||
// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
|
||||
// could be encoded with a copy tag. This is the minimum with respect to the
|
||||
// algorithm used by encodeBlock, not a minimum enforced by the file format.
|
||||
//
|
||||
// The encoded output must start with at least a 1 byte literal, as there are
|
||||
// no previous bytes to copy. A minimal (1 byte) copy after that, generated
|
||||
// from an emitCopy call in encodeBlock's main loop, would require at least
|
||||
// another inputMargin bytes, for the reason above: we want any emitLiteral
|
||||
// calls inside encodeBlock's main loop to use the fast path if possible, which
|
||||
// requires being able to overrun by inputMargin bytes. Thus,
|
||||
// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
|
||||
//
|
||||
// The C++ code doesn't use this exact threshold, but it could, as discussed at
|
||||
// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
|
||||
// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
|
||||
// optimization. It should not affect the encoded form. This is tested by
|
||||
// TestSameEncodingAsCppShortCopies.
|
||||
const minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
|
||||
// MaxEncodedLen returns the maximum length of a snappy block, given its
|
||||
// uncompressed length.
|
||||
//
|
||||
// It will return a negative value if srcLen is too large to encode.
|
||||
func MaxEncodedLen(srcLen int) int {
|
||||
n := uint64(srcLen)
|
||||
if n > 0xffffffff {
|
||||
return -1
|
||||
}
|
||||
// Compressed data can be defined as:
|
||||
// compressed := item* literal*
|
||||
// item := literal* copy
|
||||
//
|
||||
// The trailing literal sequence has a space blowup of at most 62/60
|
||||
// since a literal of length 60 needs one tag byte + one extra byte
|
||||
// for length information.
|
||||
//
|
||||
// Item blowup is trickier to measure. Suppose the "copy" op copies
|
||||
// 4 bytes of data. Because of a special check in the encoding code,
|
||||
// we produce a 4-byte copy only if the offset is < 65536. Therefore
|
||||
// the copy op takes 3 bytes to encode, and this type of item leads
|
||||
// to at most the 62/60 blowup for representing literals.
|
||||
//
|
||||
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
|
||||
// enough, it will take 5 bytes to encode the copy op. Therefore the
|
||||
// worst case here is a one-byte literal followed by a five-byte copy.
|
||||
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
|
||||
//
|
||||
// This last factor dominates the blowup, so the final estimate is:
|
||||
n = 32 + n + n/6
|
||||
if n > 0xffffffff {
|
||||
return -1
|
||||
}
|
||||
return int(n)
|
||||
}
|
||||
|
||||
var errClosed = errors.New("snappy: Writer is closed")
|
||||
|
||||
// NewWriter returns a new Writer that compresses to w.
|
||||
//
|
||||
// The Writer returned does not buffer writes. There is no need to Flush or
|
||||
// Close such a Writer.
|
||||
//
|
||||
// Deprecated: the Writer returned is not suitable for many small writes, only
|
||||
// for few large writes. Use NewBufferedWriter instead, which is efficient
|
||||
// regardless of the frequency and shape of the writes, and remember to Close
|
||||
// that Writer when done.
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
return &Writer{
|
||||
w: w,
|
||||
obuf: make([]byte, obufLen),
|
||||
}
|
||||
}
|
||||
|
||||
// NewBufferedWriter returns a new Writer that compresses to w, using the
|
||||
// framing format described at
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
//
|
||||
// The Writer returned buffers writes. Users must call Close to guarantee all
|
||||
// data has been forwarded to the underlying io.Writer. They may also call
|
||||
// Flush zero or more times before calling Close.
|
||||
func NewBufferedWriter(w io.Writer) *Writer {
|
||||
return &Writer{
|
||||
w: w,
|
||||
ibuf: make([]byte, 0, maxBlockSize),
|
||||
obuf: make([]byte, obufLen),
|
||||
}
|
||||
}
|
||||
|
||||
// Writer is an io.Writer that can write Snappy-compressed bytes.
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
err error
|
||||
|
||||
// ibuf is a buffer for the incoming (uncompressed) bytes.
|
||||
//
|
||||
// Its use is optional. For backwards compatibility, Writers created by the
|
||||
// NewWriter function have ibuf == nil, do not buffer incoming bytes, and
|
||||
// therefore do not need to be Flush'ed or Close'd.
|
||||
ibuf []byte
|
||||
|
||||
// obuf is a buffer for the outgoing (compressed) bytes.
|
||||
obuf []byte
|
||||
|
||||
// wroteStreamHeader is whether we have written the stream header.
|
||||
wroteStreamHeader bool
|
||||
}
|
||||
|
||||
// Reset discards the writer's state and switches the Snappy writer to write to
|
||||
// w. This permits reusing a Writer rather than allocating a new one.
|
||||
func (w *Writer) Reset(writer io.Writer) {
|
||||
w.w = writer
|
||||
w.err = nil
|
||||
if w.ibuf != nil {
|
||||
w.ibuf = w.ibuf[:0]
|
||||
}
|
||||
w.wroteStreamHeader = false
|
||||
}
|
||||
|
||||
// Write satisfies the io.Writer interface.
|
||||
func (w *Writer) Write(p []byte) (nRet int, errRet error) {
|
||||
if w.ibuf == nil {
|
||||
// Do not buffer incoming bytes. This does not perform or compress well
|
||||
// if the caller of Writer.Write writes many small slices. This
|
||||
// behavior is therefore deprecated, but still supported for backwards
|
||||
// compatibility with code that doesn't explicitly Flush or Close.
|
||||
return w.write(p)
|
||||
}
|
||||
|
||||
// The remainder of this method is based on bufio.Writer.Write from the
|
||||
// standard library.
|
||||
|
||||
for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
|
||||
var n int
|
||||
if len(w.ibuf) == 0 {
|
||||
// Large write, empty buffer.
|
||||
// Write directly from p to avoid copy.
|
||||
n, _ = w.write(p)
|
||||
} else {
|
||||
n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
|
||||
w.ibuf = w.ibuf[:len(w.ibuf)+n]
|
||||
w.Flush()
|
||||
}
|
||||
nRet += n
|
||||
p = p[n:]
|
||||
}
|
||||
if w.err != nil {
|
||||
return nRet, w.err
|
||||
}
|
||||
n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
|
||||
w.ibuf = w.ibuf[:len(w.ibuf)+n]
|
||||
nRet += n
|
||||
return nRet, nil
|
||||
}
|
||||
|
||||
func (w *Writer) write(p []byte) (nRet int, errRet error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
for len(p) > 0 {
|
||||
obufStart := len(magicChunk)
|
||||
if !w.wroteStreamHeader {
|
||||
w.wroteStreamHeader = true
|
||||
copy(w.obuf, magicChunk)
|
||||
obufStart = 0
|
||||
}
|
||||
|
||||
var uncompressed []byte
|
||||
if len(p) > maxBlockSize {
|
||||
uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
|
||||
} else {
|
||||
uncompressed, p = p, nil
|
||||
}
|
||||
checksum := crc(uncompressed)
|
||||
|
||||
// Compress the buffer, discarding the result if the improvement
|
||||
// isn't at least 12.5%.
|
||||
compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
|
||||
chunkType := uint8(chunkTypeCompressedData)
|
||||
chunkLen := 4 + len(compressed)
|
||||
obufEnd := obufHeaderLen + len(compressed)
|
||||
if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
|
||||
chunkType = chunkTypeUncompressedData
|
||||
chunkLen = 4 + len(uncompressed)
|
||||
obufEnd = obufHeaderLen
|
||||
}
|
||||
|
||||
// Fill in the per-chunk header that comes before the body.
|
||||
w.obuf[len(magicChunk)+0] = chunkType
|
||||
w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
|
||||
w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
|
||||
w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
|
||||
w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
|
||||
w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
|
||||
w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
|
||||
w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
|
||||
|
||||
if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
|
||||
w.err = err
|
||||
return nRet, err
|
||||
}
|
||||
if chunkType == chunkTypeUncompressedData {
|
||||
if _, err := w.w.Write(uncompressed); err != nil {
|
||||
w.err = err
|
||||
return nRet, err
|
||||
}
|
||||
}
|
||||
nRet += len(uncompressed)
|
||||
}
|
||||
return nRet, nil
|
||||
}
|
||||
|
||||
// Flush flushes the Writer to its underlying io.Writer.
|
||||
func (w *Writer) Flush() error {
|
||||
if w.err != nil {
|
||||
return w.err
|
||||
}
|
||||
if len(w.ibuf) == 0 {
|
||||
return nil
|
||||
}
|
||||
w.write(w.ibuf)
|
||||
w.ibuf = w.ibuf[:0]
|
||||
return w.err
|
||||
}
|
||||
|
||||
// Close calls Flush and then closes the Writer.
|
||||
func (w *Writer) Close() error {
|
||||
w.Flush()
|
||||
ret := w.err
|
||||
if w.err == nil {
|
||||
w.err = errClosed
|
||||
}
|
||||
return ret
|
||||
}
|
29
vendor/github.com/golang/snappy/encode_amd64.go
generated
vendored
Normal file
29
vendor/github.com/golang/snappy/encode_amd64.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
package snappy
|
||||
|
||||
// emitLiteral has the same semantics as in encode_other.go.
|
||||
//
|
||||
//go:noescape
|
||||
func emitLiteral(dst, lit []byte) int
|
||||
|
||||
// emitCopy has the same semantics as in encode_other.go.
|
||||
//
|
||||
//go:noescape
|
||||
func emitCopy(dst []byte, offset, length int) int
|
||||
|
||||
// extendMatch has the same semantics as in encode_other.go.
|
||||
//
|
||||
//go:noescape
|
||||
func extendMatch(src []byte, i, j int) int
|
||||
|
||||
// encodeBlock has the same semantics as in encode_other.go.
|
||||
//
|
||||
//go:noescape
|
||||
func encodeBlock(dst, src []byte) (d int)
|
730
vendor/github.com/golang/snappy/encode_amd64.s
generated
vendored
Normal file
730
vendor/github.com/golang/snappy/encode_amd64.s
generated
vendored
Normal file
|
@ -0,0 +1,730 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
|
||||
// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
|
||||
// https://github.com/golang/snappy/issues/29
|
||||
//
|
||||
// As a workaround, the package was built with a known good assembler, and
|
||||
// those instructions were disassembled by "objdump -d" to yield the
|
||||
// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
|
||||
// style comments, in AT&T asm syntax. Note that rsp here is a physical
|
||||
// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
|
||||
// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
|
||||
// fine on Go 1.6.
|
||||
|
||||
// The asm code generally follows the pure Go code in encode_other.go, except
|
||||
// where marked with a "!!!".
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// func emitLiteral(dst, lit []byte) int
|
||||
//
|
||||
// All local variables fit into registers. The register allocation:
|
||||
// - AX len(lit)
|
||||
// - BX n
|
||||
// - DX return value
|
||||
// - DI &dst[i]
|
||||
// - R10 &lit[0]
|
||||
//
|
||||
// The 24 bytes of stack space is to call runtime·memmove.
|
||||
//
|
||||
// The unusual register allocation of local variables, such as R10 for the
|
||||
// source pointer, matches the allocation used at the call site in encodeBlock,
|
||||
// which makes it easier to manually inline this function.
|
||||
TEXT ·emitLiteral(SB), NOSPLIT, $24-56
|
||||
MOVQ dst_base+0(FP), DI
|
||||
MOVQ lit_base+24(FP), R10
|
||||
MOVQ lit_len+32(FP), AX
|
||||
MOVQ AX, DX
|
||||
MOVL AX, BX
|
||||
SUBL $1, BX
|
||||
|
||||
CMPL BX, $60
|
||||
JLT oneByte
|
||||
CMPL BX, $256
|
||||
JLT twoBytes
|
||||
|
||||
threeBytes:
|
||||
MOVB $0xf4, 0(DI)
|
||||
MOVW BX, 1(DI)
|
||||
ADDQ $3, DI
|
||||
ADDQ $3, DX
|
||||
JMP memmove
|
||||
|
||||
twoBytes:
|
||||
MOVB $0xf0, 0(DI)
|
||||
MOVB BX, 1(DI)
|
||||
ADDQ $2, DI
|
||||
ADDQ $2, DX
|
||||
JMP memmove
|
||||
|
||||
oneByte:
|
||||
SHLB $2, BX
|
||||
MOVB BX, 0(DI)
|
||||
ADDQ $1, DI
|
||||
ADDQ $1, DX
|
||||
|
||||
memmove:
|
||||
MOVQ DX, ret+48(FP)
|
||||
|
||||
// copy(dst[i:], lit)
|
||||
//
|
||||
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
|
||||
// DI, R10 and AX as arguments.
|
||||
MOVQ DI, 0(SP)
|
||||
MOVQ R10, 8(SP)
|
||||
MOVQ AX, 16(SP)
|
||||
CALL runtime·memmove(SB)
|
||||
RET
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// func emitCopy(dst []byte, offset, length int) int
|
||||
//
|
||||
// All local variables fit into registers. The register allocation:
|
||||
// - AX length
|
||||
// - SI &dst[0]
|
||||
// - DI &dst[i]
|
||||
// - R11 offset
|
||||
//
|
||||
// The unusual register allocation of local variables, such as R11 for the
|
||||
// offset, matches the allocation used at the call site in encodeBlock, which
|
||||
// makes it easier to manually inline this function.
|
||||
TEXT ·emitCopy(SB), NOSPLIT, $0-48
|
||||
MOVQ dst_base+0(FP), DI
|
||||
MOVQ DI, SI
|
||||
MOVQ offset+24(FP), R11
|
||||
MOVQ length+32(FP), AX
|
||||
|
||||
loop0:
|
||||
// for length >= 68 { etc }
|
||||
CMPL AX, $68
|
||||
JLT step1
|
||||
|
||||
// Emit a length 64 copy, encoded as 3 bytes.
|
||||
MOVB $0xfe, 0(DI)
|
||||
MOVW R11, 1(DI)
|
||||
ADDQ $3, DI
|
||||
SUBL $64, AX
|
||||
JMP loop0
|
||||
|
||||
step1:
|
||||
// if length > 64 { etc }
|
||||
CMPL AX, $64
|
||||
JLE step2
|
||||
|
||||
// Emit a length 60 copy, encoded as 3 bytes.
|
||||
MOVB $0xee, 0(DI)
|
||||
MOVW R11, 1(DI)
|
||||
ADDQ $3, DI
|
||||
SUBL $60, AX
|
||||
|
||||
step2:
|
||||
// if length >= 12 || offset >= 2048 { goto step3 }
|
||||
CMPL AX, $12
|
||||
JGE step3
|
||||
CMPL R11, $2048
|
||||
JGE step3
|
||||
|
||||
// Emit the remaining copy, encoded as 2 bytes.
|
||||
MOVB R11, 1(DI)
|
||||
SHRL $8, R11
|
||||
SHLB $5, R11
|
||||
SUBB $4, AX
|
||||
SHLB $2, AX
|
||||
ORB AX, R11
|
||||
ORB $1, R11
|
||||
MOVB R11, 0(DI)
|
||||
ADDQ $2, DI
|
||||
|
||||
// Return the number of bytes written.
|
||||
SUBQ SI, DI
|
||||
MOVQ DI, ret+40(FP)
|
||||
RET
|
||||
|
||||
step3:
|
||||
// Emit the remaining copy, encoded as 3 bytes.
|
||||
SUBL $1, AX
|
||||
SHLB $2, AX
|
||||
ORB $2, AX
|
||||
MOVB AX, 0(DI)
|
||||
MOVW R11, 1(DI)
|
||||
ADDQ $3, DI
|
||||
|
||||
// Return the number of bytes written.
|
||||
SUBQ SI, DI
|
||||
MOVQ DI, ret+40(FP)
|
||||
RET
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// func extendMatch(src []byte, i, j int) int
|
||||
//
|
||||
// All local variables fit into registers. The register allocation:
|
||||
// - DX &src[0]
|
||||
// - SI &src[j]
|
||||
// - R13 &src[len(src) - 8]
|
||||
// - R14 &src[len(src)]
|
||||
// - R15 &src[i]
|
||||
//
|
||||
// The unusual register allocation of local variables, such as R15 for a source
|
||||
// pointer, matches the allocation used at the call site in encodeBlock, which
|
||||
// makes it easier to manually inline this function.
|
||||
TEXT ·extendMatch(SB), NOSPLIT, $0-48
|
||||
MOVQ src_base+0(FP), DX
|
||||
MOVQ src_len+8(FP), R14
|
||||
MOVQ i+24(FP), R15
|
||||
MOVQ j+32(FP), SI
|
||||
ADDQ DX, R14
|
||||
ADDQ DX, R15
|
||||
ADDQ DX, SI
|
||||
MOVQ R14, R13
|
||||
SUBQ $8, R13
|
||||
|
||||
cmp8:
|
||||
// As long as we are 8 or more bytes before the end of src, we can load and
|
||||
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
|
||||
CMPQ SI, R13
|
||||
JA cmp1
|
||||
MOVQ (R15), AX
|
||||
MOVQ (SI), BX
|
||||
CMPQ AX, BX
|
||||
JNE bsf
|
||||
ADDQ $8, R15
|
||||
ADDQ $8, SI
|
||||
JMP cmp8
|
||||
|
||||
bsf:
|
||||
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
|
||||
// the index of the first byte that differs. The BSF instruction finds the
|
||||
// least significant 1 bit, the amd64 architecture is little-endian, and
|
||||
// the shift by 3 converts a bit index to a byte index.
|
||||
XORQ AX, BX
|
||||
BSFQ BX, BX
|
||||
SHRQ $3, BX
|
||||
ADDQ BX, SI
|
||||
|
||||
// Convert from &src[ret] to ret.
|
||||
SUBQ DX, SI
|
||||
MOVQ SI, ret+40(FP)
|
||||
RET
|
||||
|
||||
cmp1:
|
||||
// In src's tail, compare 1 byte at a time.
|
||||
CMPQ SI, R14
|
||||
JAE extendMatchEnd
|
||||
MOVB (R15), AX
|
||||
MOVB (SI), BX
|
||||
CMPB AX, BX
|
||||
JNE extendMatchEnd
|
||||
ADDQ $1, R15
|
||||
ADDQ $1, SI
|
||||
JMP cmp1
|
||||
|
||||
extendMatchEnd:
|
||||
// Convert from &src[ret] to ret.
|
||||
SUBQ DX, SI
|
||||
MOVQ SI, ret+40(FP)
|
||||
RET
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// func encodeBlock(dst, src []byte) (d int)
|
||||
//
|
||||
// All local variables fit into registers, other than "var table". The register
|
||||
// allocation:
|
||||
// - AX . .
|
||||
// - BX . .
|
||||
// - CX 56 shift (note that amd64 shifts by non-immediates must use CX).
|
||||
// - DX 64 &src[0], tableSize
|
||||
// - SI 72 &src[s]
|
||||
// - DI 80 &dst[d]
|
||||
// - R9 88 sLimit
|
||||
// - R10 . &src[nextEmit]
|
||||
// - R11 96 prevHash, currHash, nextHash, offset
|
||||
// - R12 104 &src[base], skip
|
||||
// - R13 . &src[nextS], &src[len(src) - 8]
|
||||
// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
|
||||
// - R15 112 candidate
|
||||
//
|
||||
// The second column (56, 64, etc) is the stack offset to spill the registers
|
||||
// when calling other functions. We could pack this slightly tighter, but it's
|
||||
// simpler to have a dedicated spill map independent of the function called.
|
||||
//
|
||||
// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
|
||||
// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
|
||||
// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
|
||||
TEXT ·encodeBlock(SB), 0, $32888-56
|
||||
MOVQ dst_base+0(FP), DI
|
||||
MOVQ src_base+24(FP), SI
|
||||
MOVQ src_len+32(FP), R14
|
||||
|
||||
// shift, tableSize := uint32(32-8), 1<<8
|
||||
MOVQ $24, CX
|
||||
MOVQ $256, DX
|
||||
|
||||
calcShift:
|
||||
// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
|
||||
// shift--
|
||||
// }
|
||||
CMPQ DX, $16384
|
||||
JGE varTable
|
||||
CMPQ DX, R14
|
||||
JGE varTable
|
||||
SUBQ $1, CX
|
||||
SHLQ $1, DX
|
||||
JMP calcShift
|
||||
|
||||
varTable:
|
||||
// var table [maxTableSize]uint16
|
||||
//
|
||||
// In the asm code, unlike the Go code, we can zero-initialize only the
|
||||
// first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
|
||||
// writes 16 bytes, so we can do only tableSize/8 writes instead of the
|
||||
// 2048 writes that would zero-initialize all of table's 32768 bytes.
|
||||
SHRQ $3, DX
|
||||
LEAQ table-32768(SP), BX
|
||||
PXOR X0, X0
|
||||
|
||||
memclr:
|
||||
MOVOU X0, 0(BX)
|
||||
ADDQ $16, BX
|
||||
SUBQ $1, DX
|
||||
JNZ memclr
|
||||
|
||||
// !!! DX = &src[0]
|
||||
MOVQ SI, DX
|
||||
|
||||
// sLimit := len(src) - inputMargin
|
||||
MOVQ R14, R9
|
||||
SUBQ $15, R9
|
||||
|
||||
// !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
|
||||
// change for the rest of the function.
|
||||
MOVQ CX, 56(SP)
|
||||
MOVQ DX, 64(SP)
|
||||
MOVQ R9, 88(SP)
|
||||
|
||||
// nextEmit := 0
|
||||
MOVQ DX, R10
|
||||
|
||||
// s := 1
|
||||
ADDQ $1, SI
|
||||
|
||||
// nextHash := hash(load32(src, s), shift)
|
||||
MOVL 0(SI), R11
|
||||
IMULL $0x1e35a7bd, R11
|
||||
SHRL CX, R11
|
||||
|
||||
outer:
|
||||
// for { etc }
|
||||
|
||||
// skip := 32
|
||||
MOVQ $32, R12
|
||||
|
||||
// nextS := s
|
||||
MOVQ SI, R13
|
||||
|
||||
// candidate := 0
|
||||
MOVQ $0, R15
|
||||
|
||||
inner0:
|
||||
// for { etc }
|
||||
|
||||
// s := nextS
|
||||
MOVQ R13, SI
|
||||
|
||||
// bytesBetweenHashLookups := skip >> 5
|
||||
MOVQ R12, R14
|
||||
SHRQ $5, R14
|
||||
|
||||
// nextS = s + bytesBetweenHashLookups
|
||||
ADDQ R14, R13
|
||||
|
||||
// skip += bytesBetweenHashLookups
|
||||
ADDQ R14, R12
|
||||
|
||||
// if nextS > sLimit { goto emitRemainder }
|
||||
MOVQ R13, AX
|
||||
SUBQ DX, AX
|
||||
CMPQ AX, R9
|
||||
JA emitRemainder
|
||||
|
||||
// candidate = int(table[nextHash])
|
||||
// XXX: MOVWQZX table-32768(SP)(R11*2), R15
|
||||
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
|
||||
BYTE $0x4e
|
||||
BYTE $0x0f
|
||||
BYTE $0xb7
|
||||
BYTE $0x7c
|
||||
BYTE $0x5c
|
||||
BYTE $0x78
|
||||
|
||||
// table[nextHash] = uint16(s)
|
||||
MOVQ SI, AX
|
||||
SUBQ DX, AX
|
||||
|
||||
// XXX: MOVW AX, table-32768(SP)(R11*2)
|
||||
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
|
||||
BYTE $0x66
|
||||
BYTE $0x42
|
||||
BYTE $0x89
|
||||
BYTE $0x44
|
||||
BYTE $0x5c
|
||||
BYTE $0x78
|
||||
|
||||
// nextHash = hash(load32(src, nextS), shift)
|
||||
MOVL 0(R13), R11
|
||||
IMULL $0x1e35a7bd, R11
|
||||
SHRL CX, R11
|
||||
|
||||
// if load32(src, s) != load32(src, candidate) { continue } break
|
||||
MOVL 0(SI), AX
|
||||
MOVL (DX)(R15*1), BX
|
||||
CMPL AX, BX
|
||||
JNE inner0
|
||||
|
||||
fourByteMatch:
|
||||
// As per the encode_other.go code:
|
||||
//
|
||||
// A 4-byte match has been found. We'll later see etc.
|
||||
|
||||
// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
|
||||
// on inputMargin in encode.go.
|
||||
MOVQ SI, AX
|
||||
SUBQ R10, AX
|
||||
CMPQ AX, $16
|
||||
JLE emitLiteralFastPath
|
||||
|
||||
// ----------------------------------------
|
||||
// Begin inline of the emitLiteral call.
|
||||
//
|
||||
// d += emitLiteral(dst[d:], src[nextEmit:s])
|
||||
|
||||
MOVL AX, BX
|
||||
SUBL $1, BX
|
||||
|
||||
CMPL BX, $60
|
||||
JLT inlineEmitLiteralOneByte
|
||||
CMPL BX, $256
|
||||
JLT inlineEmitLiteralTwoBytes
|
||||
|
||||
inlineEmitLiteralThreeBytes:
|
||||
MOVB $0xf4, 0(DI)
|
||||
MOVW BX, 1(DI)
|
||||
ADDQ $3, DI
|
||||
JMP inlineEmitLiteralMemmove
|
||||
|
||||
inlineEmitLiteralTwoBytes:
|
||||
MOVB $0xf0, 0(DI)
|
||||
MOVB BX, 1(DI)
|
||||
ADDQ $2, DI
|
||||
JMP inlineEmitLiteralMemmove
|
||||
|
||||
inlineEmitLiteralOneByte:
|
||||
SHLB $2, BX
|
||||
MOVB BX, 0(DI)
|
||||
ADDQ $1, DI
|
||||
|
||||
inlineEmitLiteralMemmove:
|
||||
// Spill local variables (registers) onto the stack; call; unspill.
|
||||
//
|
||||
// copy(dst[i:], lit)
|
||||
//
|
||||
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
|
||||
// DI, R10 and AX as arguments.
|
||||
MOVQ DI, 0(SP)
|
||||
MOVQ R10, 8(SP)
|
||||
MOVQ AX, 16(SP)
|
||||
ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)".
|
||||
MOVQ SI, 72(SP)
|
||||
MOVQ DI, 80(SP)
|
||||
MOVQ R15, 112(SP)
|
||||
CALL runtime·memmove(SB)
|
||||
MOVQ 56(SP), CX
|
||||
MOVQ 64(SP), DX
|
||||
MOVQ 72(SP), SI
|
||||
MOVQ 80(SP), DI
|
||||
MOVQ 88(SP), R9
|
||||
MOVQ 112(SP), R15
|
||||
JMP inner1
|
||||
|
||||
inlineEmitLiteralEnd:
|
||||
// End inline of the emitLiteral call.
|
||||
// ----------------------------------------
|
||||
|
||||
emitLiteralFastPath:
|
||||
// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
|
||||
MOVB AX, BX
|
||||
SUBB $1, BX
|
||||
SHLB $2, BX
|
||||
MOVB BX, (DI)
|
||||
ADDQ $1, DI
|
||||
|
||||
// !!! Implement the copy from lit to dst as a 16-byte load and store.
|
||||
// (Encode's documentation says that dst and src must not overlap.)
|
||||
//
|
||||
// This always copies 16 bytes, instead of only len(lit) bytes, but that's
|
||||
// OK. Subsequent iterations will fix up the overrun.
|
||||
//
|
||||
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
|
||||
// 16-byte loads and stores. This technique probably wouldn't be as
|
||||
// effective on architectures that are fussier about alignment.
|
||||
MOVOU 0(R10), X0
|
||||
MOVOU X0, 0(DI)
|
||||
ADDQ AX, DI
|
||||
|
||||
inner1:
|
||||
// for { etc }
|
||||
|
||||
// base := s
|
||||
MOVQ SI, R12
|
||||
|
||||
// !!! offset := base - candidate
|
||||
MOVQ R12, R11
|
||||
SUBQ R15, R11
|
||||
SUBQ DX, R11
|
||||
|
||||
// ----------------------------------------
|
||||
// Begin inline of the extendMatch call.
|
||||
//
|
||||
// s = extendMatch(src, candidate+4, s+4)
|
||||
|
||||
// !!! R14 = &src[len(src)]
|
||||
MOVQ src_len+32(FP), R14
|
||||
ADDQ DX, R14
|
||||
|
||||
// !!! R13 = &src[len(src) - 8]
|
||||
MOVQ R14, R13
|
||||
SUBQ $8, R13
|
||||
|
||||
// !!! R15 = &src[candidate + 4]
|
||||
ADDQ $4, R15
|
||||
ADDQ DX, R15
|
||||
|
||||
// !!! s += 4
|
||||
ADDQ $4, SI
|
||||
|
||||
inlineExtendMatchCmp8:
|
||||
// As long as we are 8 or more bytes before the end of src, we can load and
|
||||
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
|
||||
CMPQ SI, R13
|
||||
JA inlineExtendMatchCmp1
|
||||
MOVQ (R15), AX
|
||||
MOVQ (SI), BX
|
||||
CMPQ AX, BX
|
||||
JNE inlineExtendMatchBSF
|
||||
ADDQ $8, R15
|
||||
ADDQ $8, SI
|
||||
JMP inlineExtendMatchCmp8
|
||||
|
||||
inlineExtendMatchBSF:
|
||||
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
|
||||
// the index of the first byte that differs. The BSF instruction finds the
|
||||
// least significant 1 bit, the amd64 architecture is little-endian, and
|
||||
// the shift by 3 converts a bit index to a byte index.
|
||||
XORQ AX, BX
|
||||
BSFQ BX, BX
|
||||
SHRQ $3, BX
|
||||
ADDQ BX, SI
|
||||
JMP inlineExtendMatchEnd
|
||||
|
||||
inlineExtendMatchCmp1:
|
||||
// In src's tail, compare 1 byte at a time.
|
||||
CMPQ SI, R14
|
||||
JAE inlineExtendMatchEnd
|
||||
MOVB (R15), AX
|
||||
MOVB (SI), BX
|
||||
CMPB AX, BX
|
||||
JNE inlineExtendMatchEnd
|
||||
ADDQ $1, R15
|
||||
ADDQ $1, SI
|
||||
JMP inlineExtendMatchCmp1
|
||||
|
||||
inlineExtendMatchEnd:
|
||||
// End inline of the extendMatch call.
|
||||
// ----------------------------------------
|
||||
|
||||
// ----------------------------------------
|
||||
// Begin inline of the emitCopy call.
|
||||
//
|
||||
// d += emitCopy(dst[d:], base-candidate, s-base)
|
||||
|
||||
// !!! length := s - base
|
||||
MOVQ SI, AX
|
||||
SUBQ R12, AX
|
||||
|
||||
inlineEmitCopyLoop0:
|
||||
// for length >= 68 { etc }
|
||||
CMPL AX, $68
|
||||
JLT inlineEmitCopyStep1
|
||||
|
||||
// Emit a length 64 copy, encoded as 3 bytes.
|
||||
MOVB $0xfe, 0(DI)
|
||||
MOVW R11, 1(DI)
|
||||
ADDQ $3, DI
|
||||
SUBL $64, AX
|
||||
JMP inlineEmitCopyLoop0
|
||||
|
||||
inlineEmitCopyStep1:
|
||||
// if length > 64 { etc }
|
||||
CMPL AX, $64
|
||||
JLE inlineEmitCopyStep2
|
||||
|
||||
// Emit a length 60 copy, encoded as 3 bytes.
|
||||
MOVB $0xee, 0(DI)
|
||||
MOVW R11, 1(DI)
|
||||
ADDQ $3, DI
|
||||
SUBL $60, AX
|
||||
|
||||
inlineEmitCopyStep2:
|
||||
// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
|
||||
CMPL AX, $12
|
||||
JGE inlineEmitCopyStep3
|
||||
CMPL R11, $2048
|
||||
JGE inlineEmitCopyStep3
|
||||
|
||||
// Emit the remaining copy, encoded as 2 bytes.
|
||||
MOVB R11, 1(DI)
|
||||
SHRL $8, R11
|
||||
SHLB $5, R11
|
||||
SUBB $4, AX
|
||||
SHLB $2, AX
|
||||
ORB AX, R11
|
||||
ORB $1, R11
|
||||
MOVB R11, 0(DI)
|
||||
ADDQ $2, DI
|
||||
JMP inlineEmitCopyEnd
|
||||
|
||||
inlineEmitCopyStep3:
|
||||
// Emit the remaining copy, encoded as 3 bytes.
|
||||
SUBL $1, AX
|
||||
SHLB $2, AX
|
||||
ORB $2, AX
|
||||
MOVB AX, 0(DI)
|
||||
MOVW R11, 1(DI)
|
||||
ADDQ $3, DI
|
||||
|
||||
inlineEmitCopyEnd:
|
||||
// End inline of the emitCopy call.
|
||||
// ----------------------------------------
|
||||
|
||||
// nextEmit = s
|
||||
MOVQ SI, R10
|
||||
|
||||
// if s >= sLimit { goto emitRemainder }
|
||||
MOVQ SI, AX
|
||||
SUBQ DX, AX
|
||||
CMPQ AX, R9
|
||||
JAE emitRemainder
|
||||
|
||||
// As per the encode_other.go code:
|
||||
//
|
||||
// We could immediately etc.
|
||||
|
||||
// x := load64(src, s-1)
|
||||
MOVQ -1(SI), R14
|
||||
|
||||
// prevHash := hash(uint32(x>>0), shift)
|
||||
MOVL R14, R11
|
||||
IMULL $0x1e35a7bd, R11
|
||||
SHRL CX, R11
|
||||
|
||||
// table[prevHash] = uint16(s-1)
|
||||
MOVQ SI, AX
|
||||
SUBQ DX, AX
|
||||
SUBQ $1, AX
|
||||
|
||||
// XXX: MOVW AX, table-32768(SP)(R11*2)
|
||||
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
|
||||
BYTE $0x66
|
||||
BYTE $0x42
|
||||
BYTE $0x89
|
||||
BYTE $0x44
|
||||
BYTE $0x5c
|
||||
BYTE $0x78
|
||||
|
||||
// currHash := hash(uint32(x>>8), shift)
|
||||
SHRQ $8, R14
|
||||
MOVL R14, R11
|
||||
IMULL $0x1e35a7bd, R11
|
||||
SHRL CX, R11
|
||||
|
||||
// candidate = int(table[currHash])
|
||||
// XXX: MOVWQZX table-32768(SP)(R11*2), R15
|
||||
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
|
||||
BYTE $0x4e
|
||||
BYTE $0x0f
|
||||
BYTE $0xb7
|
||||
BYTE $0x7c
|
||||
BYTE $0x5c
|
||||
BYTE $0x78
|
||||
|
||||
// table[currHash] = uint16(s)
|
||||
ADDQ $1, AX
|
||||
|
||||
// XXX: MOVW AX, table-32768(SP)(R11*2)
|
||||
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
|
||||
BYTE $0x66
|
||||
BYTE $0x42
|
||||
BYTE $0x89
|
||||
BYTE $0x44
|
||||
BYTE $0x5c
|
||||
BYTE $0x78
|
||||
|
||||
// if uint32(x>>8) == load32(src, candidate) { continue }
|
||||
MOVL (DX)(R15*1), BX
|
||||
CMPL R14, BX
|
||||
JEQ inner1
|
||||
|
||||
// nextHash = hash(uint32(x>>16), shift)
|
||||
SHRQ $8, R14
|
||||
MOVL R14, R11
|
||||
IMULL $0x1e35a7bd, R11
|
||||
SHRL CX, R11
|
||||
|
||||
// s++
|
||||
ADDQ $1, SI
|
||||
|
||||
// break out of the inner1 for loop, i.e. continue the outer loop.
|
||||
JMP outer
|
||||
|
||||
emitRemainder:
|
||||
// if nextEmit < len(src) { etc }
|
||||
MOVQ src_len+32(FP), AX
|
||||
ADDQ DX, AX
|
||||
CMPQ R10, AX
|
||||
JEQ encodeBlockEnd
|
||||
|
||||
// d += emitLiteral(dst[d:], src[nextEmit:])
|
||||
//
|
||||
// Push args.
|
||||
MOVQ DI, 0(SP)
|
||||
MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
|
||||
MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
|
||||
MOVQ R10, 24(SP)
|
||||
SUBQ R10, AX
|
||||
MOVQ AX, 32(SP)
|
||||
MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative.
|
||||
|
||||
// Spill local variables (registers) onto the stack; call; unspill.
|
||||
MOVQ DI, 80(SP)
|
||||
CALL ·emitLiteral(SB)
|
||||
MOVQ 80(SP), DI
|
||||
|
||||
// Finish the "d +=" part of "d += emitLiteral(etc)".
|
||||
ADDQ 48(SP), DI
|
||||
|
||||
encodeBlockEnd:
|
||||
MOVQ dst_base+0(FP), AX
|
||||
SUBQ AX, DI
|
||||
MOVQ DI, d+48(FP)
|
||||
RET
|
238
vendor/github.com/golang/snappy/encode_other.go
generated
vendored
Normal file
238
vendor/github.com/golang/snappy/encode_other.go
generated
vendored
Normal file
|
@ -0,0 +1,238 @@
|
|||
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !amd64 appengine !gc noasm
|
||||
|
||||
package snappy
|
||||
|
||||
func load32(b []byte, i int) uint32 {
|
||||
b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
||||
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
|
||||
}
|
||||
|
||||
func load64(b []byte, i int) uint64 {
|
||||
b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
||||
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
|
||||
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
|
||||
}
|
||||
|
||||
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||
//
|
||||
// It assumes that:
|
||||
// dst is long enough to hold the encoded bytes
|
||||
// 1 <= len(lit) && len(lit) <= 65536
|
||||
func emitLiteral(dst, lit []byte) int {
|
||||
i, n := 0, uint(len(lit)-1)
|
||||
switch {
|
||||
case n < 60:
|
||||
dst[0] = uint8(n)<<2 | tagLiteral
|
||||
i = 1
|
||||
case n < 1<<8:
|
||||
dst[0] = 60<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
i = 2
|
||||
default:
|
||||
dst[0] = 61<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
dst[2] = uint8(n >> 8)
|
||||
i = 3
|
||||
}
|
||||
return i + copy(dst[i:], lit)
|
||||
}
|
||||
|
||||
// emitCopy writes a copy chunk and returns the number of bytes written.
|
||||
//
|
||||
// It assumes that:
|
||||
// dst is long enough to hold the encoded bytes
|
||||
// 1 <= offset && offset <= 65535
|
||||
// 4 <= length && length <= 65535
|
||||
func emitCopy(dst []byte, offset, length int) int {
|
||||
i := 0
|
||||
// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
|
||||
// threshold for this loop is a little higher (at 68 = 64 + 4), and the
|
||||
// length emitted down below is is a little lower (at 60 = 64 - 4), because
|
||||
// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
|
||||
// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
|
||||
// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
|
||||
// 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
|
||||
// tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
|
||||
// encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
|
||||
for length >= 68 {
|
||||
// Emit a length 64 copy, encoded as 3 bytes.
|
||||
dst[i+0] = 63<<2 | tagCopy2
|
||||
dst[i+1] = uint8(offset)
|
||||
dst[i+2] = uint8(offset >> 8)
|
||||
i += 3
|
||||
length -= 64
|
||||
}
|
||||
if length > 64 {
|
||||
// Emit a length 60 copy, encoded as 3 bytes.
|
||||
dst[i+0] = 59<<2 | tagCopy2
|
||||
dst[i+1] = uint8(offset)
|
||||
dst[i+2] = uint8(offset >> 8)
|
||||
i += 3
|
||||
length -= 60
|
||||
}
|
||||
if length >= 12 || offset >= 2048 {
|
||||
// Emit the remaining copy, encoded as 3 bytes.
|
||||
dst[i+0] = uint8(length-1)<<2 | tagCopy2
|
||||
dst[i+1] = uint8(offset)
|
||||
dst[i+2] = uint8(offset >> 8)
|
||||
return i + 3
|
||||
}
|
||||
// Emit the remaining copy, encoded as 2 bytes.
|
||||
dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
|
||||
dst[i+1] = uint8(offset)
|
||||
return i + 2
|
||||
}
|
||||
|
||||
// extendMatch returns the largest k such that k <= len(src) and that
|
||||
// src[i:i+k-j] and src[j:k] have the same contents.
|
||||
//
|
||||
// It assumes that:
|
||||
// 0 <= i && i < j && j <= len(src)
|
||||
func extendMatch(src []byte, i, j int) int {
|
||||
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
|
||||
}
|
||||
return j
|
||||
}
|
||||
|
||||
func hash(u, shift uint32) uint32 {
|
||||
return (u * 0x1e35a7bd) >> shift
|
||||
}
|
||||
|
||||
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||
// been written.
|
||||
//
|
||||
// It also assumes that:
|
||||
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||
func encodeBlock(dst, src []byte) (d int) {
|
||||
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
|
||||
// The table element type is uint16, as s < sLimit and sLimit < len(src)
|
||||
// and len(src) <= maxBlockSize and maxBlockSize == 65536.
|
||||
const (
|
||||
maxTableSize = 1 << 14
|
||||
// tableMask is redundant, but helps the compiler eliminate bounds
|
||||
// checks.
|
||||
tableMask = maxTableSize - 1
|
||||
)
|
||||
shift := uint32(32 - 8)
|
||||
for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
|
||||
shift--
|
||||
}
|
||||
// In Go, all array elements are zero-initialized, so there is no advantage
|
||||
// to a smaller tableSize per se. However, it matches the C++ algorithm,
|
||||
// and in the asm versions of this code, we can get away with zeroing only
|
||||
// the first tableSize elements.
|
||||
var table [maxTableSize]uint16
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := len(src) - inputMargin
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := 0
|
||||
|
||||
// The encoded form must start with a literal, as there are no previous
|
||||
// bytes to copy, so we start looking for hash matches at s == 1.
|
||||
s := 1
|
||||
nextHash := hash(load32(src, s), shift)
|
||||
|
||||
for {
|
||||
// Copied from the C++ snappy implementation:
|
||||
//
|
||||
// Heuristic match skipping: If 32 bytes are scanned with no matches
|
||||
// found, start looking only at every other byte. If 32 more bytes are
|
||||
// scanned (or skipped), look at every third byte, etc.. When a match
|
||||
// is found, immediately go back to looking at every byte. This is a
|
||||
// small loss (~5% performance, ~0.1% density) for compressible data
|
||||
// due to more bookkeeping, but for non-compressible data (such as
|
||||
// JPEG) it's a huge win since the compressor quickly "realizes" the
|
||||
// data is incompressible and doesn't bother looking for matches
|
||||
// everywhere.
|
||||
//
|
||||
// The "skip" variable keeps track of how many bytes there are since
|
||||
// the last match; dividing it by 32 (ie. right-shifting by five) gives
|
||||
// the number of bytes to move ahead for each iteration.
|
||||
skip := 32
|
||||
|
||||
nextS := s
|
||||
candidate := 0
|
||||
for {
|
||||
s = nextS
|
||||
bytesBetweenHashLookups := skip >> 5
|
||||
nextS = s + bytesBetweenHashLookups
|
||||
skip += bytesBetweenHashLookups
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
candidate = int(table[nextHash&tableMask])
|
||||
table[nextHash&tableMask] = uint16(s)
|
||||
nextHash = hash(load32(src, nextS), shift)
|
||||
if load32(src, s) == load32(src, candidate) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||
// them as literal bytes.
|
||||
d += emitLiteral(dst[d:], src[nextEmit:s])
|
||||
|
||||
// Call emitCopy, and then see if another emitCopy could be our next
|
||||
// move. Repeat until we find no match for the input immediately after
|
||||
// what was consumed by the last emitCopy call.
|
||||
//
|
||||
// If we exit this loop normally then we need to call emitLiteral next,
|
||||
// though we don't yet know how big the literal will be. We handle that
|
||||
// by proceeding to the next iteration of the main loop. We also can
|
||||
// exit this loop via goto if we get close to exhausting the input.
|
||||
for {
|
||||
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||
// literal bytes prior to s.
|
||||
base := s
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
//
|
||||
// This is an inlined version of:
|
||||
// s = extendMatch(src, candidate+4, s+4)
|
||||
s += 4
|
||||
for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
|
||||
}
|
||||
|
||||
d += emitCopy(dst[d:], base-candidate, s-base)
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-1 and at s. If
|
||||
// another emitCopy is not our next move, also calculate nextHash
|
||||
// at s+1. At least on GOARCH=amd64, these three hash calculations
|
||||
// are faster as one load64 call (with some shifts) instead of
|
||||
// three load32 calls.
|
||||
x := load64(src, s-1)
|
||||
prevHash := hash(uint32(x>>0), shift)
|
||||
table[prevHash&tableMask] = uint16(s - 1)
|
||||
currHash := hash(uint32(x>>8), shift)
|
||||
candidate = int(table[currHash&tableMask])
|
||||
table[currHash&tableMask] = uint16(s)
|
||||
if uint32(x>>8) != load32(src, candidate) {
|
||||
nextHash = hash(uint32(x>>16), shift)
|
||||
s++
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if nextEmit < len(src) {
|
||||
d += emitLiteral(dst[d:], src[nextEmit:])
|
||||
}
|
||||
return d
|
||||
}
|
1965
vendor/github.com/golang/snappy/golden_test.go
generated
vendored
Normal file
1965
vendor/github.com/golang/snappy/golden_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
87
vendor/github.com/golang/snappy/snappy.go
generated
vendored
Normal file
87
vendor/github.com/golang/snappy/snappy.go
generated
vendored
Normal file
|
@ -0,0 +1,87 @@
|
|||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package snappy implements the snappy block-based compression format.
|
||||
// It aims for very high speeds and reasonable compression.
|
||||
//
|
||||
// The C++ snappy implementation is at https://github.com/google/snappy
|
||||
package snappy // import "github.com/golang/snappy"
|
||||
|
||||
import (
|
||||
"hash/crc32"
|
||||
)
|
||||
|
||||
/*
|
||||
Each encoded block begins with the varint-encoded length of the decoded data,
|
||||
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
|
||||
first byte of each chunk is broken into its 2 least and 6 most significant bits
|
||||
called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
|
||||
Zero means a literal tag. All other values mean a copy tag.
|
||||
|
||||
For literal tags:
|
||||
- If m < 60, the next 1 + m bytes are literal bytes.
|
||||
- Otherwise, let n be the little-endian unsigned integer denoted by the next
|
||||
m - 59 bytes. The next 1 + n bytes after that are literal bytes.
|
||||
|
||||
For copy tags, length bytes are copied from offset bytes ago, in the style of
|
||||
Lempel-Ziv compression algorithms. In particular:
|
||||
- For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
|
||||
The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
|
||||
of the offset. The next byte is bits 0-7 of the offset.
|
||||
- For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
|
||||
The length is 1 + m. The offset is the little-endian unsigned integer
|
||||
denoted by the next 2 bytes.
|
||||
- For l == 3, this tag is a legacy format that is no longer issued by most
|
||||
encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
|
||||
[1, 65). The length is 1 + m. The offset is the little-endian unsigned
|
||||
integer denoted by the next 4 bytes.
|
||||
*/
|
||||
const (
|
||||
tagLiteral = 0x00
|
||||
tagCopy1 = 0x01
|
||||
tagCopy2 = 0x02
|
||||
tagCopy4 = 0x03
|
||||
)
|
||||
|
||||
const (
|
||||
checksumSize = 4
|
||||
chunkHeaderSize = 4
|
||||
magicChunk = "\xff\x06\x00\x00" + magicBody
|
||||
magicBody = "sNaPpY"
|
||||
|
||||
// maxBlockSize is the maximum size of the input to encodeBlock. It is not
|
||||
// part of the wire format per se, but some parts of the encoder assume
|
||||
// that an offset fits into a uint16.
|
||||
//
|
||||
// Also, for the framing format (Writer type instead of Encode function),
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt says
|
||||
// that "the uncompressed data in a chunk must be no longer than 65536
|
||||
// bytes".
|
||||
maxBlockSize = 65536
|
||||
|
||||
// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
|
||||
// hard coded to be a const instead of a variable, so that obufLen can also
|
||||
// be a const. Their equivalence is confirmed by
|
||||
// TestMaxEncodedLenOfMaxBlockSize.
|
||||
maxEncodedLenOfMaxBlockSize = 76490
|
||||
|
||||
obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
|
||||
obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize
|
||||
)
|
||||
|
||||
const (
|
||||
chunkTypeCompressedData = 0x00
|
||||
chunkTypeUncompressedData = 0x01
|
||||
chunkTypePadding = 0xfe
|
||||
chunkTypeStreamIdentifier = 0xff
|
||||
)
|
||||
|
||||
var crcTable = crc32.MakeTable(crc32.Castagnoli)
|
||||
|
||||
// crc implements the checksum specified in section 3 of
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
func crc(b []byte) uint32 {
|
||||
c := crc32.Update(0, crcTable, b)
|
||||
return uint32(c>>15|c<<17) + 0xa282ead8
|
||||
}
|
1353
vendor/github.com/golang/snappy/snappy_test.go
generated
vendored
Normal file
1353
vendor/github.com/golang/snappy/snappy_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
396
vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt
generated
vendored
Normal file
396
vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt
generated
vendored
Normal file
|
@ -0,0 +1,396 @@
|
|||
Produced by David Widger. The previous edition was updated by Jose
|
||||
Menendez.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
THE ADVENTURES OF TOM SAWYER
|
||||
BY
|
||||
MARK TWAIN
|
||||
(Samuel Langhorne Clemens)
|
||||
|
||||
|
||||
|
||||
|
||||
P R E F A C E
|
||||
|
||||
MOST of the adventures recorded in this book really occurred; one or
|
||||
two were experiences of my own, the rest those of boys who were
|
||||
schoolmates of mine. Huck Finn is drawn from life; Tom Sawyer also, but
|
||||
not from an individual--he is a combination of the characteristics of
|
||||
three boys whom I knew, and therefore belongs to the composite order of
|
||||
architecture.
|
||||
|
||||
The odd superstitions touched upon were all prevalent among children
|
||||
and slaves in the West at the period of this story--that is to say,
|
||||
thirty or forty years ago.
|
||||
|
||||
Although my book is intended mainly for the entertainment of boys and
|
||||
girls, I hope it will not be shunned by men and women on that account,
|
||||
for part of my plan has been to try to pleasantly remind adults of what
|
||||
they once were themselves, and of how they felt and thought and talked,
|
||||
and what queer enterprises they sometimes engaged in.
|
||||
|
||||
THE AUTHOR.
|
||||
|
||||
HARTFORD, 1876.
|
||||
|
||||
|
||||
|
||||
T O M S A W Y E R
|
||||
|
||||
|
||||
|
||||
CHAPTER I
|
||||
|
||||
"TOM!"
|
||||
|
||||
No answer.
|
||||
|
||||
"TOM!"
|
||||
|
||||
No answer.
|
||||
|
||||
"What's gone with that boy, I wonder? You TOM!"
|
||||
|
||||
No answer.
|
||||
|
||||
The old lady pulled her spectacles down and looked over them about the
|
||||
room; then she put them up and looked out under them. She seldom or
|
||||
never looked THROUGH them for so small a thing as a boy; they were her
|
||||
state pair, the pride of her heart, and were built for "style," not
|
||||
service--she could have seen through a pair of stove-lids just as well.
|
||||
She looked perplexed for a moment, and then said, not fiercely, but
|
||||
still loud enough for the furniture to hear:
|
||||
|
||||
"Well, I lay if I get hold of you I'll--"
|
||||
|
||||
She did not finish, for by this time she was bending down and punching
|
||||
under the bed with the broom, and so she needed breath to punctuate the
|
||||
punches with. She resurrected nothing but the cat.
|
||||
|
||||
"I never did see the beat of that boy!"
|
||||
|
||||
She went to the open door and stood in it and looked out among the
|
||||
tomato vines and "jimpson" weeds that constituted the garden. No Tom.
|
||||
So she lifted up her voice at an angle calculated for distance and
|
||||
shouted:
|
||||
|
||||
"Y-o-u-u TOM!"
|
||||
|
||||
There was a slight noise behind her and she turned just in time to
|
||||
seize a small boy by the slack of his roundabout and arrest his flight.
|
||||
|
||||
"There! I might 'a' thought of that closet. What you been doing in
|
||||
there?"
|
||||
|
||||
"Nothing."
|
||||
|
||||
"Nothing! Look at your hands. And look at your mouth. What IS that
|
||||
truck?"
|
||||
|
||||
"I don't know, aunt."
|
||||
|
||||
"Well, I know. It's jam--that's what it is. Forty times I've said if
|
||||
you didn't let that jam alone I'd skin you. Hand me that switch."
|
||||
|
||||
The switch hovered in the air--the peril was desperate--
|
||||
|
||||
"My! Look behind you, aunt!"
|
||||
|
||||
The old lady whirled round, and snatched her skirts out of danger. The
|
||||
lad fled on the instant, scrambled up the high board-fence, and
|
||||
disappeared over it.
|
||||
|
||||
His aunt Polly stood surprised a moment, and then broke into a gentle
|
||||
laugh.
|
||||
|
||||
"Hang the boy, can't I never learn anything? Ain't he played me tricks
|
||||
enough like that for me to be looking out for him by this time? But old
|
||||
fools is the biggest fools there is. Can't learn an old dog new tricks,
|
||||
as the saying is. But my goodness, he never plays them alike, two days,
|
||||
and how is a body to know what's coming? He 'pears to know just how
|
||||
long he can torment me before I get my dander up, and he knows if he
|
||||
can make out to put me off for a minute or make me laugh, it's all down
|
||||
again and I can't hit him a lick. I ain't doing my duty by that boy,
|
||||
and that's the Lord's truth, goodness knows. Spare the rod and spile
|
||||
the child, as the Good Book says. I'm a laying up sin and suffering for
|
||||
us both, I know. He's full of the Old Scratch, but laws-a-me! he's my
|
||||
own dead sister's boy, poor thing, and I ain't got the heart to lash
|
||||
him, somehow. Every time I let him off, my conscience does hurt me so,
|
||||
and every time I hit him my old heart most breaks. Well-a-well, man
|
||||
that is born of woman is of few days and full of trouble, as the
|
||||
Scripture says, and I reckon it's so. He'll play hookey this evening, *
|
||||
and [* Southwestern for "afternoon"] I'll just be obleeged to make him
|
||||
work, to-morrow, to punish him. It's mighty hard to make him work
|
||||
Saturdays, when all the boys is having holiday, but he hates work more
|
||||
than he hates anything else, and I've GOT to do some of my duty by him,
|
||||
or I'll be the ruination of the child."
|
||||
|
||||
Tom did play hookey, and he had a very good time. He got back home
|
||||
barely in season to help Jim, the small colored boy, saw next-day's
|
||||
wood and split the kindlings before supper--at least he was there in
|
||||
time to tell his adventures to Jim while Jim did three-fourths of the
|
||||
work. Tom's younger brother (or rather half-brother) Sid was already
|
||||
through with his part of the work (picking up chips), for he was a
|
||||
quiet boy, and had no adventurous, troublesome ways.
|
||||
|
||||
While Tom was eating his supper, and stealing sugar as opportunity
|
||||
offered, Aunt Polly asked him questions that were full of guile, and
|
||||
very deep--for she wanted to trap him into damaging revealments. Like
|
||||
many other simple-hearted souls, it was her pet vanity to believe she
|
||||
was endowed with a talent for dark and mysterious diplomacy, and she
|
||||
loved to contemplate her most transparent devices as marvels of low
|
||||
cunning. Said she:
|
||||
|
||||
"Tom, it was middling warm in school, warn't it?"
|
||||
|
||||
"Yes'm."
|
||||
|
||||
"Powerful warm, warn't it?"
|
||||
|
||||
"Yes'm."
|
||||
|
||||
"Didn't you want to go in a-swimming, Tom?"
|
||||
|
||||
A bit of a scare shot through Tom--a touch of uncomfortable suspicion.
|
||||
He searched Aunt Polly's face, but it told him nothing. So he said:
|
||||
|
||||
"No'm--well, not very much."
|
||||
|
||||
The old lady reached out her hand and felt Tom's shirt, and said:
|
||||
|
||||
"But you ain't too warm now, though." And it flattered her to reflect
|
||||
that she had discovered that the shirt was dry without anybody knowing
|
||||
that that was what she had in her mind. But in spite of her, Tom knew
|
||||
where the wind lay, now. So he forestalled what might be the next move:
|
||||
|
||||
"Some of us pumped on our heads--mine's damp yet. See?"
|
||||
|
||||
Aunt Polly was vexed to think she had overlooked that bit of
|
||||
circumstantial evidence, and missed a trick. Then she had a new
|
||||
inspiration:
|
||||
|
||||
"Tom, you didn't have to undo your shirt collar where I sewed it, to
|
||||
pump on your head, did you? Unbutton your jacket!"
|
||||
|
||||
The trouble vanished out of Tom's face. He opened his jacket. His
|
||||
shirt collar was securely sewed.
|
||||
|
||||
"Bother! Well, go 'long with you. I'd made sure you'd played hookey
|
||||
and been a-swimming. But I forgive ye, Tom. I reckon you're a kind of a
|
||||
singed cat, as the saying is--better'n you look. THIS time."
|
||||
|
||||
She was half sorry her sagacity had miscarried, and half glad that Tom
|
||||
had stumbled into obedient conduct for once.
|
||||
|
||||
But Sidney said:
|
||||
|
||||
"Well, now, if I didn't think you sewed his collar with white thread,
|
||||
but it's black."
|
||||
|
||||
"Why, I did sew it with white! Tom!"
|
||||
|
||||
But Tom did not wait for the rest. As he went out at the door he said:
|
||||
|
||||
"Siddy, I'll lick you for that."
|
||||
|
||||
In a safe place Tom examined two large needles which were thrust into
|
||||
the lapels of his jacket, and had thread bound about them--one needle
|
||||
carried white thread and the other black. He said:
|
||||
|
||||
"She'd never noticed if it hadn't been for Sid. Confound it! sometimes
|
||||
she sews it with white, and sometimes she sews it with black. I wish to
|
||||
geeminy she'd stick to one or t'other--I can't keep the run of 'em. But
|
||||
I bet you I'll lam Sid for that. I'll learn him!"
|
||||
|
||||
He was not the Model Boy of the village. He knew the model boy very
|
||||
well though--and loathed him.
|
||||
|
||||
Within two minutes, or even less, he had forgotten all his troubles.
|
||||
Not because his troubles were one whit less heavy and bitter to him
|
||||
than a man's are to a man, but because a new and powerful interest bore
|
||||
them down and drove them out of his mind for the time--just as men's
|
||||
misfortunes are forgotten in the excitement of new enterprises. This
|
||||
new interest was a valued novelty in whistling, which he had just
|
||||
acquired from a negro, and he was suffering to practise it undisturbed.
|
||||
It consisted in a peculiar bird-like turn, a sort of liquid warble,
|
||||
produced by touching the tongue to the roof of the mouth at short
|
||||
intervals in the midst of the music--the reader probably remembers how
|
||||
to do it, if he has ever been a boy. Diligence and attention soon gave
|
||||
him the knack of it, and he strode down the street with his mouth full
|
||||
of harmony and his soul full of gratitude. He felt much as an
|
||||
astronomer feels who has discovered a new planet--no doubt, as far as
|
||||
strong, deep, unalloyed pleasure is concerned, the advantage was with
|
||||
the boy, not the astronomer.
|
||||
|
||||
The summer evenings were long. It was not dark, yet. Presently Tom
|
||||
checked his whistle. A stranger was before him--a boy a shade larger
|
||||
than himself. A new-comer of any age or either sex was an impressive
|
||||
curiosity in the poor little shabby village of St. Petersburg. This boy
|
||||
was well dressed, too--well dressed on a week-day. This was simply
|
||||
astounding. His cap was a dainty thing, his close-buttoned blue cloth
|
||||
roundabout was new and natty, and so were his pantaloons. He had shoes
|
||||
on--and it was only Friday. He even wore a necktie, a bright bit of
|
||||
ribbon. He had a citified air about him that ate into Tom's vitals. The
|
||||
more Tom stared at the splendid marvel, the higher he turned up his
|
||||
nose at his finery and the shabbier and shabbier his own outfit seemed
|
||||
to him to grow. Neither boy spoke. If one moved, the other moved--but
|
||||
only sidewise, in a circle; they kept face to face and eye to eye all
|
||||
the time. Finally Tom said:
|
||||
|
||||
"I can lick you!"
|
||||
|
||||
"I'd like to see you try it."
|
||||
|
||||
"Well, I can do it."
|
||||
|
||||
"No you can't, either."
|
||||
|
||||
"Yes I can."
|
||||
|
||||
"No you can't."
|
||||
|
||||
"I can."
|
||||
|
||||
"You can't."
|
||||
|
||||
"Can!"
|
||||
|
||||
"Can't!"
|
||||
|
||||
An uncomfortable pause. Then Tom said:
|
||||
|
||||
"What's your name?"
|
||||
|
||||
"'Tisn't any of your business, maybe."
|
||||
|
||||
"Well I 'low I'll MAKE it my business."
|
||||
|
||||
"Well why don't you?"
|
||||
|
||||
"If you say much, I will."
|
||||
|
||||
"Much--much--MUCH. There now."
|
||||
|
||||
"Oh, you think you're mighty smart, DON'T you? I could lick you with
|
||||
one hand tied behind me, if I wanted to."
|
||||
|
||||
"Well why don't you DO it? You SAY you can do it."
|
||||
|
||||
"Well I WILL, if you fool with me."
|
||||
|
||||
"Oh yes--I've seen whole families in the same fix."
|
||||
|
||||
"Smarty! You think you're SOME, now, DON'T you? Oh, what a hat!"
|
||||
|
||||
"You can lump that hat if you don't like it. I dare you to knock it
|
||||
off--and anybody that'll take a dare will suck eggs."
|
||||
|
||||
"You're a liar!"
|
||||
|
||||
"You're another."
|
||||
|
||||
"You're a fighting liar and dasn't take it up."
|
||||
|
||||
"Aw--take a walk!"
|
||||
|
||||
"Say--if you give me much more of your sass I'll take and bounce a
|
||||
rock off'n your head."
|
||||
|
||||
"Oh, of COURSE you will."
|
||||
|
||||
"Well I WILL."
|
||||
|
||||
"Well why don't you DO it then? What do you keep SAYING you will for?
|
||||
Why don't you DO it? It's because you're afraid."
|
||||
|
||||
"I AIN'T afraid."
|
||||
|
||||
"You are."
|
||||
|
||||
"I ain't."
|
||||
|
||||
"You are."
|
||||
|
||||
Another pause, and more eying and sidling around each other. Presently
|
||||
they were shoulder to shoulder. Tom said:
|
||||
|
||||
"Get away from here!"
|
||||
|
||||
"Go away yourself!"
|
||||
|
||||
"I won't."
|
||||
|
||||
"I won't either."
|
||||
|
||||
So they stood, each with a foot placed at an angle as a brace, and
|
||||
both shoving with might and main, and glowering at each other with
|
||||
hate. But neither could get an advantage. After struggling till both
|
||||
were hot and flushed, each relaxed his strain with watchful caution,
|
||||
and Tom said:
|
||||
|
||||
"You're a coward and a pup. I'll tell my big brother on you, and he
|
||||
can thrash you with his little finger, and I'll make him do it, too."
|
||||
|
||||
"What do I care for your big brother? I've got a brother that's bigger
|
||||
than he is--and what's more, he can throw him over that fence, too."
|
||||
[Both brothers were imaginary.]
|
||||
|
||||
"That's a lie."
|
||||
|
||||
"YOUR saying so don't make it so."
|
||||
|
||||
Tom drew a line in the dust with his big toe, and said:
|
||||
|
||||
"I dare you to step over that, and I'll lick you till you can't stand
|
||||
up. Anybody that'll take a dare will steal sheep."
|
||||
|
||||
The new boy stepped over promptly, and said:
|
||||
|
||||
"Now you said you'd do it, now let's see you do it."
|
||||
|
||||
"Don't you crowd me now; you better look out."
|
||||
|
||||
"Well, you SAID you'd do it--why don't you do it?"
|
||||
|
||||
"By jingo! for two cents I WILL do it."
|
||||
|
||||
The new boy took two broad coppers out of his pocket and held them out
|
||||
with derision. Tom struck them to the ground. In an instant both boys
|
||||
were rolling and tumbling in the dirt, gripped together like cats; and
|
||||
for the space of a minute they tugged and tore at each other's hair and
|
||||
clothes, punched and scratched each other's nose, and covered
|
||||
themselves with dust and glory. Presently the confusion took form, and
|
||||
through the fog of battle Tom appeared, seated astride the new boy, and
|
||||
pounding him with his fists. "Holler 'nuff!" said he.
|
||||
|
||||
The boy only struggled to free himself. He was crying--mainly from rage.
|
||||
|
||||
"Holler 'nuff!"--and the pounding went on.
|
||||
|
||||
At last the stranger got out a smothered "'Nuff!" and Tom let him up
|
||||
and said:
|
||||
|
||||
"Now that'll learn you. Better look out who you're fooling with next
|
||||
time."
|
||||
|
||||
The new boy went off brushing the dust from his clothes, sobbing,
|
||||
snuffling, and occasionally looking back and shaking his head and
|
||||
threatening what he would do to Tom the "next time he caught him out."
|
||||
To which Tom responded with jeers, and started off in high feather, and
|
||||
as soon as his back was turned the new boy snatched up a stone, threw
|
||||
it and hit him between the shoulders and then turned tail and ran like
|
||||
an antelope. Tom chased the traitor home, and thus found out where he
|
||||
lived. He then held a position at the gate for some time, daring the
|
||||
enemy to come outside, but the enemy only made faces at him through the
|
||||
window and declined. At last the enemy's mother appeared, and called
|
||||
Tom a bad, vicious, vulgar child, and ordered him away. So he went
|
||||
away; but he said he "'lowed" to "lay" for that boy.
|
||||
|
||||
He got home pretty late that night, and when he climbed cautiously in
|
||||
at the window, he uncovered an ambuscade, in the person of his aunt;
|
||||
and when she saw the state his clothes were in her resolution to turn
|
||||
his Saturday holiday into captivity at hard labor became adamantine in
|
||||
its firmness.
|
BIN
vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy
generated
vendored
Normal file
BIN
vendor/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy
generated
vendored
Normal file
Binary file not shown.
25
vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
generated
vendored
25
vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
generated
vendored
|
@ -3,11 +3,12 @@ package cleanhttp
|
|||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DefaultTransport returns a new http.Transport with the same default values
|
||||
// as http.DefaultTransport, but with idle connections and keepalives disabled.
|
||||
// DefaultTransport returns a new http.Transport with similar default values to
|
||||
// http.DefaultTransport, but with idle connections and keepalives disabled.
|
||||
func DefaultTransport() *http.Transport {
|
||||
transport := DefaultPooledTransport()
|
||||
transport.DisableKeepAlives = true
|
||||
|
@ -22,13 +23,15 @@ func DefaultTransport() *http.Transport {
|
|||
func DefaultPooledTransport() *http.Transport {
|
||||
transport := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Dial: (&net.Dialer{
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
DisableKeepAlives: false,
|
||||
MaxIdleConnsPerHost: 1,
|
||||
}).DialContext,
|
||||
MaxIdleConns: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1,
|
||||
}
|
||||
return transport
|
||||
}
|
||||
|
@ -42,10 +45,10 @@ func DefaultClient() *http.Client {
|
|||
}
|
||||
}
|
||||
|
||||
// DefaultPooledClient returns a new http.Client with the same default values
|
||||
// as http.Client, but with a shared Transport. Do not use this function
|
||||
// for transient clients as it can leak file descriptors over time. Only use
|
||||
// this for clients that will be re-used for the same host(s).
|
||||
// DefaultPooledClient returns a new http.Client with similar default values to
|
||||
// http.Client, but with a shared Transport. Do not use this function for
|
||||
// transient clients as it can leak file descriptors over time. Only use this
|
||||
// for clients that will be re-used for the same host(s).
|
||||
func DefaultPooledClient() *http.Client {
|
||||
return &http.Client{
|
||||
Transport: DefaultPooledTransport(),
|
||||
|
|
12
vendor/github.com/hashicorp/go-multierror/.travis.yml
generated
vendored
Normal file
12
vendor/github.com/hashicorp/go-multierror/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
sudo: false
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.6
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
script: make test testrace
|
31
vendor/github.com/hashicorp/go-multierror/Makefile
generated
vendored
Normal file
31
vendor/github.com/hashicorp/go-multierror/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
TEST?=./...
|
||||
|
||||
default: test
|
||||
|
||||
# test runs the test suite and vets the code.
|
||||
test: generate
|
||||
@echo "==> Running tests..."
|
||||
@go list $(TEST) \
|
||||
| grep -v "/vendor/" \
|
||||
| xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS}
|
||||
|
||||
# testrace runs the race checker
|
||||
testrace: generate
|
||||
@echo "==> Running tests (race)..."
|
||||
@go list $(TEST) \
|
||||
| grep -v "/vendor/" \
|
||||
| xargs -n1 go test -timeout=60s -race ${TESTARGS}
|
||||
|
||||
# updatedeps installs all the dependencies needed to run and build.
|
||||
updatedeps:
|
||||
@sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'"
|
||||
|
||||
# generate runs `go generate` to build the dynamically generated source files.
|
||||
generate:
|
||||
@echo "==> Generating..."
|
||||
@find . -type f -name '.DS_Store' -delete
|
||||
@go list ./... \
|
||||
| grep -v "/vendor/" \
|
||||
| xargs -n1 go generate
|
||||
|
||||
.PHONY: default test testrace updatedeps generate
|
6
vendor/github.com/hashicorp/go-multierror/README.md
generated
vendored
6
vendor/github.com/hashicorp/go-multierror/README.md
generated
vendored
|
@ -1,5 +1,11 @@
|
|||
# go-multierror
|
||||
|
||||
[![Build Status](http://img.shields.io/travis/hashicorp/go-multierror.svg?style=flat-square)][travis]
|
||||
[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs]
|
||||
|
||||
[travis]: https://travis-ci.org/hashicorp/go-multierror
|
||||
[godocs]: https://godoc.org/github.com/hashicorp/go-multierror
|
||||
|
||||
`go-multierror` is a package for Go that provides a mechanism for
|
||||
representing a list of `error` values as a single `error`.
|
||||
|
||||
|
|
8
vendor/github.com/hashicorp/go-multierror/append.go
generated
vendored
8
vendor/github.com/hashicorp/go-multierror/append.go
generated
vendored
|
@ -18,9 +18,13 @@ func Append(err error, errs ...error) *Error {
|
|||
for _, e := range errs {
|
||||
switch e := e.(type) {
|
||||
case *Error:
|
||||
err.Errors = append(err.Errors, e.Errors...)
|
||||
if e != nil {
|
||||
err.Errors = append(err.Errors, e.Errors...)
|
||||
}
|
||||
default:
|
||||
err.Errors = append(err.Errors, e)
|
||||
if e != nil {
|
||||
err.Errors = append(err.Errors, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
18
vendor/github.com/hashicorp/go-multierror/append_test.go
generated
vendored
18
vendor/github.com/hashicorp/go-multierror/append_test.go
generated
vendored
|
@ -47,6 +47,24 @@ func TestAppend_NilError(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestAppend_NilErrorArg(t *testing.T) {
|
||||
var err error
|
||||
var nilErr *Error
|
||||
result := Append(err, nilErr)
|
||||
if len(result.Errors) != 0 {
|
||||
t.Fatalf("wrong len: %d", len(result.Errors))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppend_NilErrorIfaceArg(t *testing.T) {
|
||||
var err error
|
||||
var nilErr error
|
||||
result := Append(err, nilErr)
|
||||
if len(result.Errors) != 0 {
|
||||
t.Fatalf("wrong len: %d", len(result.Errors))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppend_NonError(t *testing.T) {
|
||||
original := errors.New("foo")
|
||||
result := Append(original, errors.New("bar"))
|
||||
|
|
2
vendor/github.com/hashicorp/go-multierror/flatten_test.go
generated
vendored
2
vendor/github.com/hashicorp/go-multierror/flatten_test.go
generated
vendored
|
@ -26,7 +26,7 @@ func TestFlatten(t *testing.T) {
|
|||
}
|
||||
|
||||
expected := strings.TrimSpace(`
|
||||
3 error(s) occurred:
|
||||
3 errors occurred:
|
||||
|
||||
* one
|
||||
* two
|
||||
|
|
6
vendor/github.com/hashicorp/go-multierror/format.go
generated
vendored
6
vendor/github.com/hashicorp/go-multierror/format.go
generated
vendored
|
@ -12,12 +12,16 @@ type ErrorFormatFunc func([]error) string
|
|||
// ListFormatFunc is a basic formatter that outputs the number of errors
|
||||
// that occurred along with a bullet point list of the errors.
|
||||
func ListFormatFunc(es []error) string {
|
||||
if len(es) == 1 {
|
||||
return fmt.Sprintf("1 error occurred:\n\n* %s", es[0])
|
||||
}
|
||||
|
||||
points := make([]string, len(es))
|
||||
for i, err := range es {
|
||||
points[i] = fmt.Sprintf("* %s", err)
|
||||
}
|
||||
|
||||
return fmt.Sprintf(
|
||||
"%d error(s) occurred:\n\n%s",
|
||||
"%d errors occurred:\n\n%s",
|
||||
len(es), strings.Join(points, "\n"))
|
||||
}
|
||||
|
|
19
vendor/github.com/hashicorp/go-multierror/format_test.go
generated
vendored
19
vendor/github.com/hashicorp/go-multierror/format_test.go
generated
vendored
|
@ -5,8 +5,23 @@ import (
|
|||
"testing"
|
||||
)
|
||||
|
||||
func TestListFormatFunc(t *testing.T) {
|
||||
expected := `2 error(s) occurred:
|
||||
func TestListFormatFuncSingle(t *testing.T) {
|
||||
expected := `1 error occurred:
|
||||
|
||||
* foo`
|
||||
|
||||
errors := []error{
|
||||
errors.New("foo"),
|
||||
}
|
||||
|
||||
actual := ListFormatFunc(errors)
|
||||
if actual != expected {
|
||||
t.Fatalf("bad: %#v", actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListFormatFuncMultiple(t *testing.T) {
|
||||
expected := `2 errors occurred:
|
||||
|
||||
* foo
|
||||
* bar`
|
||||
|
|
4
vendor/github.com/hashicorp/go-multierror/multierror.go
generated
vendored
4
vendor/github.com/hashicorp/go-multierror/multierror.go
generated
vendored
|
@ -40,11 +40,11 @@ func (e *Error) GoString() string {
|
|||
}
|
||||
|
||||
// WrappedErrors returns the list of errors that this Error is wrapping.
|
||||
// It is an implementatin of the errwrap.Wrapper interface so that
|
||||
// It is an implementation of the errwrap.Wrapper interface so that
|
||||
// multierror.Error can be used with that library.
|
||||
//
|
||||
// This method is not safe to be called concurrently and is no different
|
||||
// than accessing the Errors field directly. It is implementd only to
|
||||
// than accessing the Errors field directly. It is implemented only to
|
||||
// satisfy the errwrap.Wrapper interface.
|
||||
func (e *Error) WrappedErrors() []error {
|
||||
return e.Errors
|
||||
|
|
2
vendor/github.com/hashicorp/go-multierror/multierror_test.go
generated
vendored
2
vendor/github.com/hashicorp/go-multierror/multierror_test.go
generated
vendored
|
@ -27,7 +27,7 @@ func TestErrorError_custom(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestErrorError_default(t *testing.T) {
|
||||
expected := `2 error(s) occurred:
|
||||
expected := `2 errors occurred:
|
||||
|
||||
* foo
|
||||
* bar`
|
||||
|
|
54
vendor/github.com/hashicorp/go-multierror/scripts/deps.sh
generated
vendored
Executable file
54
vendor/github.com/hashicorp/go-multierror/scripts/deps.sh
generated
vendored
Executable file
|
@ -0,0 +1,54 @@
|
|||
#!/usr/bin/env bash
|
||||
#
|
||||
# This script updates dependencies using a temporary directory. This is required
|
||||
# to avoid any auxillary dependencies that sneak into GOPATH.
|
||||
set -e
|
||||
|
||||
# Get the parent directory of where this script is.
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
|
||||
DIR="$(cd -P "$(dirname "$SOURCE")/.." && pwd)"
|
||||
|
||||
# Change into that directory
|
||||
cd "$DIR"
|
||||
|
||||
# Get the name from the directory
|
||||
NAME=${NAME:-"$(basename $(pwd))"}
|
||||
|
||||
# Announce
|
||||
echo "==> Updating dependencies..."
|
||||
|
||||
echo "--> Making tmpdir..."
|
||||
tmpdir=$(mktemp -d)
|
||||
function cleanup {
|
||||
rm -rf "${tmpdir}"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
export GOPATH="${tmpdir}"
|
||||
export PATH="${tmpdir}/bin:$PATH"
|
||||
|
||||
mkdir -p "${tmpdir}/src/github.com/hashicorp"
|
||||
pushd "${tmpdir}/src/github.com/hashicorp" &>/dev/null
|
||||
|
||||
echo "--> Copying ${NAME}..."
|
||||
cp -R "$DIR" "${tmpdir}/src/github.com/hashicorp/${NAME}"
|
||||
pushd "${tmpdir}/src/github.com/hashicorp/${NAME}" &>/dev/null
|
||||
rm -rf vendor/
|
||||
|
||||
echo "--> Installing dependency manager..."
|
||||
go get -u github.com/kardianos/govendor
|
||||
govendor init
|
||||
|
||||
echo "--> Installing all dependencies (may take some time)..."
|
||||
govendor fetch -v +outside
|
||||
|
||||
echo "--> Vendoring..."
|
||||
govendor add +external
|
||||
|
||||
echo "--> Moving into place..."
|
||||
vpath="${tmpdir}/src/github.com/hashicorp/${NAME}/vendor"
|
||||
popd &>/dev/null
|
||||
popd &>/dev/null
|
||||
rm -rf vendor/
|
||||
cp -R "${vpath}" .
|
21
vendor/github.com/hashicorp/hcl/.github/ISSUE_TEMPLATE.md
generated
vendored
Normal file
21
vendor/github.com/hashicorp/hcl/.github/ISSUE_TEMPLATE.md
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
### HCL Template
|
||||
```hcl
|
||||
# Place your HCL configuration file here
|
||||
```
|
||||
|
||||
### Expected behavior
|
||||
What should have happened?
|
||||
|
||||
### Actual behavior
|
||||
What actually happened?
|
||||
|
||||
### Steps to reproduce
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
### References
|
||||
Are there any other GitHub issues (open or closed) that should
|
||||
be linked here? For example:
|
||||
- GH-1234
|
||||
- ...
|
12
vendor/github.com/hashicorp/hcl/.travis.yml
generated
vendored
12
vendor/github.com/hashicorp/hcl/.travis.yml
generated
vendored
|
@ -1,3 +1,13 @@
|
|||
sudo: false
|
||||
|
||||
language: go
|
||||
go: 1.7
|
||||
|
||||
go:
|
||||
- 1.x
|
||||
- tip
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
script: make test
|
||||
|
|
2
vendor/github.com/hashicorp/hcl/appveyor.yml
generated
vendored
2
vendor/github.com/hashicorp/hcl/appveyor.yml
generated
vendored
|
@ -4,7 +4,7 @@ clone_folder: c:\gopath\src\github.com\hashicorp\hcl
|
|||
environment:
|
||||
GOPATH: c:\gopath
|
||||
init:
|
||||
- git config --global core.autocrlf true
|
||||
- git config --global core.autocrlf false
|
||||
install:
|
||||
- cmd: >-
|
||||
echo %Path%
|
||||
|
|
20
vendor/github.com/hashicorp/hcl/decoder.go
generated
vendored
20
vendor/github.com/hashicorp/hcl/decoder.go
generated
vendored
|
@ -89,9 +89,9 @@ func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error
|
|||
switch k.Kind() {
|
||||
case reflect.Bool:
|
||||
return d.decodeBool(name, node, result)
|
||||
case reflect.Float64:
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return d.decodeFloat(name, node, result)
|
||||
case reflect.Int:
|
||||
case reflect.Int, reflect.Int32, reflect.Int64:
|
||||
return d.decodeInt(name, node, result)
|
||||
case reflect.Interface:
|
||||
// When we see an interface, we make our own thing
|
||||
|
@ -137,13 +137,13 @@ func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) e
|
|||
func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
|
||||
switch n := node.(type) {
|
||||
case *ast.LiteralType:
|
||||
if n.Token.Type == token.FLOAT {
|
||||
if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER {
|
||||
v, err := strconv.ParseFloat(n.Token.Text, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result.Set(reflect.ValueOf(v))
|
||||
result.Set(reflect.ValueOf(v).Convert(result.Type()))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -164,7 +164,11 @@ func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) er
|
|||
return err
|
||||
}
|
||||
|
||||
result.Set(reflect.ValueOf(int(v)))
|
||||
if result.Kind() == reflect.Interface {
|
||||
result.Set(reflect.ValueOf(int(v)))
|
||||
} else {
|
||||
result.SetInt(v)
|
||||
}
|
||||
return nil
|
||||
case token.STRING:
|
||||
v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
|
||||
|
@ -172,7 +176,11 @@ func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) er
|
|||
return err
|
||||
}
|
||||
|
||||
result.Set(reflect.ValueOf(int(v)))
|
||||
if result.Kind() == reflect.Interface {
|
||||
result.Set(reflect.ValueOf(int(v)))
|
||||
} else {
|
||||
result.SetInt(v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
172
vendor/github.com/hashicorp/hcl/decoder_test.go
generated
vendored
172
vendor/github.com/hashicorp/hcl/decoder_test.go
generated
vendored
|
@ -5,10 +5,10 @@ import (
|
|||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
"github.com/hashicorp/hcl/testhelper"
|
||||
)
|
||||
|
||||
func TestDecode_interface(t *testing.T) {
|
||||
|
@ -64,7 +64,7 @@ func TestDecode_interface(t *testing.T) {
|
|||
"qux": "back\\slash",
|
||||
"bar": "new\nline",
|
||||
"qax": `slash\:colon`,
|
||||
"nested": `${HH\:mm\:ss}`,
|
||||
"nested": `${HH\\:mm\\:ss}`,
|
||||
"nestedquotes": `${"\"stringwrappedinquotes\""}`,
|
||||
},
|
||||
},
|
||||
|
@ -73,6 +73,7 @@ func TestDecode_interface(t *testing.T) {
|
|||
false,
|
||||
map[string]interface{}{
|
||||
"a": 1.02,
|
||||
"b": 2,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -82,9 +83,13 @@ func TestDecode_interface(t *testing.T) {
|
|||
},
|
||||
{
|
||||
"multiline_literal.hcl",
|
||||
true,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"multiline_literal_with_hil.hcl",
|
||||
false,
|
||||
map[string]interface{}{"multiline_literal": testhelper.Unix2dos(`hello
|
||||
world`)},
|
||||
map[string]interface{}{"multiline_literal_with_hil": "${hello\n world}"},
|
||||
},
|
||||
{
|
||||
"multiline_no_marker.hcl",
|
||||
|
@ -94,22 +99,22 @@ func TestDecode_interface(t *testing.T) {
|
|||
{
|
||||
"multiline.hcl",
|
||||
false,
|
||||
map[string]interface{}{"foo": testhelper.Unix2dos("bar\nbaz\n")},
|
||||
map[string]interface{}{"foo": "bar\nbaz\n"},
|
||||
},
|
||||
{
|
||||
"multiline_indented.hcl",
|
||||
false,
|
||||
map[string]interface{}{"foo": testhelper.Unix2dos(" bar\n baz\n")},
|
||||
map[string]interface{}{"foo": " bar\n baz\n"},
|
||||
},
|
||||
{
|
||||
"multiline_no_hanging_indent.hcl",
|
||||
false,
|
||||
map[string]interface{}{"foo": testhelper.Unix2dos(" baz\n bar\n foo\n")},
|
||||
map[string]interface{}{"foo": " baz\n bar\n foo\n"},
|
||||
},
|
||||
{
|
||||
"multiline_no_eof.hcl",
|
||||
false,
|
||||
map[string]interface{}{"foo": testhelper.Unix2dos("bar\nbaz\n"), "key": "value"},
|
||||
map[string]interface{}{"foo": "bar\nbaz\n", "key": "value"},
|
||||
},
|
||||
{
|
||||
"multiline.json",
|
||||
|
@ -201,6 +206,16 @@ func TestDecode_interface(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"list_of_lists.hcl",
|
||||
false,
|
||||
map[string]interface{}{
|
||||
"foo": []interface{}{
|
||||
[]interface{}{"foo"},
|
||||
[]interface{}{"bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"list_of_maps.hcl",
|
||||
false,
|
||||
|
@ -274,6 +289,14 @@ func TestDecode_interface(t *testing.T) {
|
|||
},
|
||||
},
|
||||
|
||||
{
|
||||
"structure_list_empty.json",
|
||||
false,
|
||||
map[string]interface{}{
|
||||
"foo": []interface{}{},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
"nested_block_comment.hcl",
|
||||
false,
|
||||
|
@ -357,34 +380,72 @@ func TestDecode_interface(t *testing.T) {
|
|||
true,
|
||||
nil,
|
||||
},
|
||||
|
||||
{
|
||||
"escape_backslash.hcl",
|
||||
false,
|
||||
map[string]interface{}{
|
||||
"output": []map[string]interface{}{
|
||||
map[string]interface{}{
|
||||
"one": `${replace(var.sub_domain, ".", "\\.")}`,
|
||||
"two": `${replace(var.sub_domain, ".", "\\\\.")}`,
|
||||
"many": `${replace(var.sub_domain, ".", "\\\\\\\\.")}`,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
"git_crypt.hcl",
|
||||
true,
|
||||
nil,
|
||||
},
|
||||
|
||||
{
|
||||
"object_with_bool.hcl",
|
||||
false,
|
||||
map[string]interface{}{
|
||||
"path": []map[string]interface{}{
|
||||
map[string]interface{}{
|
||||
"policy": "write",
|
||||
"permissions": []map[string]interface{}{
|
||||
map[string]interface{}{
|
||||
"bool": []interface{}{false},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Logf("Testing: %s", tc.File)
|
||||
d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.File))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
t.Run(tc.File, func(t *testing.T) {
|
||||
d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.File))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
var out interface{}
|
||||
err = Decode(&out, string(d))
|
||||
if (err != nil) != tc.Err {
|
||||
t.Fatalf("Input: %s\n\nError: %s", tc.File, err)
|
||||
}
|
||||
var out interface{}
|
||||
err = Decode(&out, string(d))
|
||||
if (err != nil) != tc.Err {
|
||||
t.Fatalf("Input: %s\n\nError: %s", tc.File, err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(out, tc.Out) {
|
||||
t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out)
|
||||
}
|
||||
if !reflect.DeepEqual(out, tc.Out) {
|
||||
t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out)
|
||||
}
|
||||
|
||||
var v interface{}
|
||||
err = Unmarshal(d, &v)
|
||||
if (err != nil) != tc.Err {
|
||||
t.Fatalf("Input: %s\n\nError: %s", tc.File, err)
|
||||
}
|
||||
var v interface{}
|
||||
err = Unmarshal(d, &v)
|
||||
if (err != nil) != tc.Err {
|
||||
t.Fatalf("Input: %s\n\nError: %s", tc.File, err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(v, tc.Out) {
|
||||
t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out)
|
||||
}
|
||||
if !reflect.DeepEqual(v, tc.Out) {
|
||||
t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -748,6 +809,59 @@ func TestDecode_intString(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestDecode_float32(t *testing.T) {
|
||||
var value struct {
|
||||
A float32 `hcl:"a"`
|
||||
B float32 `hcl:"b"`
|
||||
}
|
||||
|
||||
err := Decode(&value, testReadFile(t, "float.hcl"))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if got, want := value.A, float32(1.02); got != want {
|
||||
t.Fatalf("wrong result %#v; want %#v", got, want)
|
||||
}
|
||||
if got, want := value.B, float32(2); got != want {
|
||||
t.Fatalf("wrong result %#v; want %#v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecode_float64(t *testing.T) {
|
||||
var value struct {
|
||||
A float64 `hcl:"a"`
|
||||
B float64 `hcl:"b"`
|
||||
}
|
||||
|
||||
err := Decode(&value, testReadFile(t, "float.hcl"))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if got, want := value.A, float64(1.02); got != want {
|
||||
t.Fatalf("wrong result %#v; want %#v", got, want)
|
||||
}
|
||||
if got, want := value.B, float64(2); got != want {
|
||||
t.Fatalf("wrong result %#v; want %#v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecode_intStringAliased(t *testing.T) {
|
||||
var value struct {
|
||||
Count time.Duration
|
||||
}
|
||||
|
||||
err := Decode(&value, testReadFile(t, "basic_int_string.hcl"))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
if value.Count != time.Duration(3) {
|
||||
t.Fatalf("bad: %#v", value.Count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecode_Node(t *testing.T) {
|
||||
// given
|
||||
var value struct {
|
||||
|
|
3
vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
generated
vendored
3
vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
generated
vendored
|
@ -156,7 +156,8 @@ func (o *ObjectKey) Pos() token.Pos {
|
|||
type LiteralType struct {
|
||||
Token token.Token
|
||||
|
||||
// associated line comment, only when used in a list
|
||||
// comment types, only used when in a list
|
||||
LeadComment *CommentGroup
|
||||
LineComment *CommentGroup
|
||||
}
|
||||
|
||||
|
|
2
vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go
generated
vendored
2
vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go
generated
vendored
|
@ -58,7 +58,7 @@ func processFile(filename string, in io.Reader, out io.Writer, stdin bool, opts
|
|||
|
||||
res, err := printer.Format(src)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("In %s: %s", filename, err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(src, res) {
|
||||
|
|
69
vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
generated
vendored
69
vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
generated
vendored
|
@ -3,6 +3,7 @@
|
|||
package parser
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
@ -36,6 +37,11 @@ func newParser(src []byte) *Parser {
|
|||
|
||||
// Parse returns the fully parsed source and returns the abstract syntax tree.
|
||||
func Parse(src []byte) (*ast.File, error) {
|
||||
// normalize all line endings
|
||||
// since the scanner and output only work with "\n" line endings, we may
|
||||
// end up with dangling "\r" characters in the parsed data.
|
||||
src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
|
||||
|
||||
p := newParser(src)
|
||||
return p.Parse()
|
||||
}
|
||||
|
@ -50,7 +56,7 @@ func (p *Parser) Parse() (*ast.File, error) {
|
|||
scerr = &PosError{Pos: pos, Err: errors.New(msg)}
|
||||
}
|
||||
|
||||
f.Node, err = p.objectList()
|
||||
f.Node, err = p.objectList(false)
|
||||
if scerr != nil {
|
||||
return nil, scerr
|
||||
}
|
||||
|
@ -62,11 +68,23 @@ func (p *Parser) Parse() (*ast.File, error) {
|
|||
return f, nil
|
||||
}
|
||||
|
||||
func (p *Parser) objectList() (*ast.ObjectList, error) {
|
||||
// objectList parses a list of items within an object (generally k/v pairs).
|
||||
// The parameter" obj" tells this whether to we are within an object (braces:
|
||||
// '{', '}') or just at the top level. If we're within an object, we end
|
||||
// at an RBRACE.
|
||||
func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
|
||||
defer un(trace(p, "ParseObjectList"))
|
||||
node := &ast.ObjectList{}
|
||||
|
||||
for {
|
||||
if obj {
|
||||
tok := p.scan()
|
||||
p.unscan()
|
||||
if tok.Type == token.RBRACE {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
n, err := p.objectItem()
|
||||
if err == errEofToken {
|
||||
break // we are finished
|
||||
|
@ -179,9 +197,12 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) {
|
|||
keyStr = append(keyStr, k.Token.Text)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf(
|
||||
"key '%s' expected start of object ('{') or assignment ('=')",
|
||||
strings.Join(keyStr, " "))
|
||||
return nil, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf(
|
||||
"key '%s' expected start of object ('{') or assignment ('=')",
|
||||
strings.Join(keyStr, " ")),
|
||||
}
|
||||
}
|
||||
|
||||
// do a look-ahead for line comment
|
||||
|
@ -244,7 +265,10 @@ func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
|
|||
keyCount++
|
||||
keys = append(keys, &ast.ObjectKey{Token: p.tok})
|
||||
case token.ILLEGAL:
|
||||
fmt.Println("illegal")
|
||||
return keys, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf("illegal character"),
|
||||
}
|
||||
default:
|
||||
return keys, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
|
@ -288,7 +312,7 @@ func (p *Parser) objectType() (*ast.ObjectType, error) {
|
|||
Lbrace: p.tok.Pos,
|
||||
}
|
||||
|
||||
l, err := p.objectList()
|
||||
l, err := p.objectList(true)
|
||||
|
||||
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
|
||||
// not a RBRACE, it's an syntax error and we just return it.
|
||||
|
@ -296,9 +320,12 @@ func (p *Parser) objectType() (*ast.ObjectType, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// If there is no error, we should be at a RBRACE to end the object
|
||||
if p.tok.Type != token.RBRACE {
|
||||
return nil, fmt.Errorf("object expected closing RBRACE got: %s", p.tok.Type)
|
||||
// No error, scan and expect the ending to be a brace
|
||||
if tok := p.scan(); tok.Type != token.RBRACE {
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type),
|
||||
}
|
||||
}
|
||||
|
||||
o.List = l
|
||||
|
@ -331,12 +358,18 @@ func (p *Parser) listType() (*ast.ListType, error) {
|
|||
}
|
||||
}
|
||||
switch tok.Type {
|
||||
case token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
|
||||
case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
|
||||
node, err := p.literalType()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If there is a lead comment, apply it
|
||||
if p.leadComment != nil {
|
||||
node.LeadComment = p.leadComment
|
||||
p.leadComment = nil
|
||||
}
|
||||
|
||||
l.Add(node)
|
||||
needComma = true
|
||||
case token.COMMA:
|
||||
|
@ -367,12 +400,16 @@ func (p *Parser) listType() (*ast.ListType, error) {
|
|||
}
|
||||
l.Add(node)
|
||||
needComma = true
|
||||
case token.BOOL:
|
||||
// TODO(arslan) should we support? not supported by HCL yet
|
||||
case token.LBRACK:
|
||||
// TODO(arslan) should we support nested lists? Even though it's
|
||||
// written in README of HCL, it's not a part of the grammar
|
||||
// (not defined in parse.y)
|
||||
node, err := p.listType()
|
||||
if err != nil {
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf(
|
||||
"error while trying to parse list within list: %s", err),
|
||||
}
|
||||
}
|
||||
l.Add(node)
|
||||
case token.RBRACK:
|
||||
// finished
|
||||
l.Rbrack = p.tok.Pos
|
||||
|
|
179
vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go
generated
vendored
179
vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go
generated
vendored
|
@ -59,12 +59,12 @@ func TestListType(t *testing.T) {
|
|||
[]token.Type{token.NUMBER, token.STRING},
|
||||
},
|
||||
{
|
||||
`foo = []`,
|
||||
[]token.Type{},
|
||||
`foo = [false]`,
|
||||
[]token.Type{token.BOOL},
|
||||
},
|
||||
{
|
||||
`foo = ["123", 123]`,
|
||||
[]token.Type{token.STRING, token.NUMBER},
|
||||
`foo = []`,
|
||||
[]token.Type{},
|
||||
},
|
||||
{
|
||||
`foo = [1,
|
||||
|
@ -152,6 +152,109 @@ func TestListOfMaps_requiresComma(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestListType_leadComment(t *testing.T) {
|
||||
var literals = []struct {
|
||||
src string
|
||||
comment []string
|
||||
}{
|
||||
{
|
||||
`foo = [
|
||||
1,
|
||||
# bar
|
||||
2,
|
||||
3,
|
||||
]`,
|
||||
[]string{"", "# bar", ""},
|
||||
},
|
||||
}
|
||||
|
||||
for _, l := range literals {
|
||||
p := newParser([]byte(l.src))
|
||||
item, err := p.objectItem()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
list, ok := item.Val.(*ast.ListType)
|
||||
if !ok {
|
||||
t.Fatalf("node should be of type LiteralType, got: %T", item.Val)
|
||||
}
|
||||
|
||||
if len(list.List) != len(l.comment) {
|
||||
t.Fatalf("bad: %d", len(list.List))
|
||||
}
|
||||
|
||||
for i, li := range list.List {
|
||||
lt := li.(*ast.LiteralType)
|
||||
comment := l.comment[i]
|
||||
|
||||
if (lt.LeadComment == nil) != (comment == "") {
|
||||
t.Fatalf("bad: %#v", lt)
|
||||
}
|
||||
|
||||
if comment == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
actual := lt.LeadComment.List[0].Text
|
||||
if actual != comment {
|
||||
t.Fatalf("bad: %q %q", actual, comment)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestListType_lineComment(t *testing.T) {
|
||||
var literals = []struct {
|
||||
src string
|
||||
comment []string
|
||||
}{
|
||||
{
|
||||
`foo = [
|
||||
1,
|
||||
2, # bar
|
||||
3,
|
||||
]`,
|
||||
[]string{"", "# bar", ""},
|
||||
},
|
||||
}
|
||||
|
||||
for _, l := range literals {
|
||||
p := newParser([]byte(l.src))
|
||||
item, err := p.objectItem()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
list, ok := item.Val.(*ast.ListType)
|
||||
if !ok {
|
||||
t.Fatalf("node should be of type LiteralType, got: %T", item.Val)
|
||||
}
|
||||
|
||||
if len(list.List) != len(l.comment) {
|
||||
t.Fatalf("bad: %d", len(list.List))
|
||||
}
|
||||
|
||||
for i, li := range list.List {
|
||||
lt := li.(*ast.LiteralType)
|
||||
comment := l.comment[i]
|
||||
|
||||
if (lt.LineComment == nil) != (comment == "") {
|
||||
t.Fatalf("bad: %s", lt)
|
||||
}
|
||||
|
||||
if comment == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
actual := lt.LineComment.List[0].Text
|
||||
if actual != comment {
|
||||
t.Fatalf("bad: %q %q", actual, comment)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestObjectType(t *testing.T) {
|
||||
var literals = []struct {
|
||||
src string
|
||||
|
@ -204,6 +307,8 @@ func TestObjectType(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, l := range literals {
|
||||
t.Logf("Source: %s", l.src)
|
||||
|
||||
p := newParser([]byte(l.src))
|
||||
// p.enableTrace = true
|
||||
item, err := p.objectItem()
|
||||
|
@ -282,6 +387,30 @@ func TestObjectKey(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCommentGroup(t *testing.T) {
|
||||
var cases = []struct {
|
||||
src string
|
||||
groups int
|
||||
}{
|
||||
{"# Hello\n# World", 1},
|
||||
{"# Hello\r\n# Windows", 1},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.src, func(t *testing.T) {
|
||||
p := newParser([]byte(tc.src))
|
||||
file, err := p.Parse()
|
||||
if err != nil {
|
||||
t.Fatalf("parse error: %s", err)
|
||||
}
|
||||
|
||||
if len(file.Comments) != tc.groups {
|
||||
t.Fatalf("bad: %#v", file.Comments)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Official HCL tests
|
||||
func TestParse(t *testing.T) {
|
||||
cases := []struct {
|
||||
|
@ -296,6 +425,10 @@ func TestParse(t *testing.T) {
|
|||
"comment.hcl",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"comment_crlf.hcl",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"comment_lastline.hcl",
|
||||
false,
|
||||
|
@ -336,6 +469,10 @@ func TestParse(t *testing.T) {
|
|||
"complex.hcl",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"complex_crlf.hcl",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"types.hcl",
|
||||
false,
|
||||
|
@ -368,20 +505,38 @@ func TestParse(t *testing.T) {
|
|||
"object_key_without_value.hcl",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"object_key_assign_without_value.hcl",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"object_key_assign_without_value2.hcl",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"object_key_assign_without_value3.hcl",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"git_crypt.hcl",
|
||||
true,
|
||||
},
|
||||
}
|
||||
|
||||
const fixtureDir = "./test-fixtures"
|
||||
|
||||
for _, tc := range cases {
|
||||
d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.Name))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.Name))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
_, err = Parse(d)
|
||||
if (err != nil) != tc.Err {
|
||||
t.Fatalf("Input: %s\n\nError: %s", tc.Name, err)
|
||||
}
|
||||
v, err := Parse(d)
|
||||
if (err != nil) != tc.Err {
|
||||
t.Fatalf("Input: %s\n\nError: %s\n\nAST: %#v", tc.Name, err, v)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
15
vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/comment_crlf.hcl
generated
vendored
Normal file
15
vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/comment_crlf.hcl
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
// Foo
|
||||
|
||||
/* Bar */
|
||||
|
||||
/*
|
||||
/*
|
||||
Baz
|
||||
*/
|
||||
|
||||
# Another
|
||||
|
||||
# Multiple
|
||||
# Lines
|
||||
|
||||
foo = "bar"
|
42
vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/complex_crlf.hcl
generated
vendored
Normal file
42
vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/complex_crlf.hcl
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
variable "foo" {
|
||||
default = "bar"
|
||||
description = "bar"
|
||||
}
|
||||
|
||||
variable "groups" { }
|
||||
|
||||
provider "aws" {
|
||||
access_key = "foo"
|
||||
secret_key = "bar"
|
||||
}
|
||||
|
||||
provider "do" {
|
||||
api_key = "${var.foo}"
|
||||
}
|
||||
|
||||
resource "aws_security_group" "firewall" {
|
||||
count = 5
|
||||
}
|
||||
|
||||
resource aws_instance "web" {
|
||||
ami = "${var.foo}"
|
||||
security_groups = [
|
||||
"foo",
|
||||
"${aws_security_group.firewall.foo}",
|
||||
"${element(split(\",\", var.groups)}",
|
||||
]
|
||||
network_interface = {
|
||||
device_index = 0
|
||||
description = "Main network interface"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_instance" "db" {
|
||||
security_groups = "${aws_security_group.firewall.*.id}"
|
||||
VPC = "foo"
|
||||
depends_on = ["aws_instance.web"]
|
||||
}
|
||||
|
||||
output "web_ip" {
|
||||
value = "${aws_instance.web.private_ip}"
|
||||
}
|
BIN
vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/git_crypt.hcl
generated
vendored
Normal file
BIN
vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/git_crypt.hcl
generated
vendored
Normal file
Binary file not shown.
3
vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value.hcl
generated
vendored
Normal file
3
vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value.hcl
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
foo {
|
||||
bar =
|
||||
}
|
4
vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value2.hcl
generated
vendored
Normal file
4
vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value2.hcl
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
foo {
|
||||
baz = 7
|
||||
bar =
|
||||
}
|
4
vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value3.hcl
generated
vendored
Normal file
4
vendor/github.com/hashicorp/hcl/hcl/parser/test-fixtures/object_key_assign_without_value3.hcl
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
foo {
|
||||
bar =
|
||||
baz = 7
|
||||
}
|
261
vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
generated
vendored
261
vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
generated
vendored
|
@ -62,6 +62,14 @@ func (p *printer) collectComments(node ast.Node) {
|
|||
ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
|
||||
switch t := nn.(type) {
|
||||
case *ast.LiteralType:
|
||||
if t.LeadComment != nil {
|
||||
for _, comment := range t.LeadComment.List {
|
||||
if _, ok := standaloneComments[comment.Pos()]; ok {
|
||||
delete(standaloneComments, comment.Pos())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if t.LineComment != nil {
|
||||
for _, comment := range t.LineComment.List {
|
||||
if _, ok := standaloneComments[comment.Pos()]; ok {
|
||||
|
@ -95,7 +103,6 @@ func (p *printer) collectComments(node ast.Node) {
|
|||
}
|
||||
|
||||
sort.Sort(ByPosition(p.standaloneComments))
|
||||
|
||||
}
|
||||
|
||||
// output prints creates b printable HCL output and returns it.
|
||||
|
@ -104,35 +111,60 @@ func (p *printer) output(n interface{}) []byte {
|
|||
|
||||
switch t := n.(type) {
|
||||
case *ast.File:
|
||||
// File doesn't trace so we add the tracing here
|
||||
defer un(trace(p, "File"))
|
||||
return p.output(t.Node)
|
||||
case *ast.ObjectList:
|
||||
var index int
|
||||
var nextItem token.Pos
|
||||
var commented bool
|
||||
for {
|
||||
// TODO(arslan): refactor below comment printing, we have the same in objectType
|
||||
for _, c := range p.standaloneComments {
|
||||
for _, comment := range c.List {
|
||||
if index != len(t.Items) {
|
||||
nextItem = t.Items[index].Pos()
|
||||
} else {
|
||||
nextItem = token.Pos{Offset: infinity, Line: infinity}
|
||||
}
|
||||
defer un(trace(p, "ObjectList"))
|
||||
|
||||
var index int
|
||||
for {
|
||||
// Determine the location of the next actual non-comment
|
||||
// item. If we're at the end, the next item is at "infinity"
|
||||
var nextItem token.Pos
|
||||
if index != len(t.Items) {
|
||||
nextItem = t.Items[index].Pos()
|
||||
} else {
|
||||
nextItem = token.Pos{Offset: infinity, Line: infinity}
|
||||
}
|
||||
|
||||
// Go through the standalone comments in the file and print out
|
||||
// the comments that we should be for this object item.
|
||||
for _, c := range p.standaloneComments {
|
||||
// Go through all the comments in the group. The group
|
||||
// should be printed together, not separated by double newlines.
|
||||
printed := false
|
||||
newlinePrinted := false
|
||||
for _, comment := range c.List {
|
||||
// We only care about comments after the previous item
|
||||
// we've printed so that comments are printed in the
|
||||
// correct locations (between two objects for example).
|
||||
// And before the next item.
|
||||
if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
|
||||
// if we hit the end add newlines so we can print the comment
|
||||
if index == len(t.Items) {
|
||||
// we don't do this if prev is invalid which means the
|
||||
// beginning of the file since the first comment should
|
||||
// be at the first line.
|
||||
if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) {
|
||||
buf.Write([]byte{newline, newline})
|
||||
newlinePrinted = true
|
||||
}
|
||||
|
||||
// Write the actual comment.
|
||||
buf.WriteString(comment.Text)
|
||||
|
||||
buf.WriteByte(newline)
|
||||
if index != len(t.Items) {
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
|
||||
// Set printed to true to note that we printed something
|
||||
printed = true
|
||||
}
|
||||
}
|
||||
|
||||
// If we're not at the last item, write a new line so
|
||||
// that there is a newline separating this comment from
|
||||
// the next object.
|
||||
if printed && index != len(t.Items) {
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
}
|
||||
|
||||
if index == len(t.Items) {
|
||||
|
@ -140,8 +172,29 @@ func (p *printer) output(n interface{}) []byte {
|
|||
}
|
||||
|
||||
buf.Write(p.output(t.Items[index]))
|
||||
if !commented && index != len(t.Items)-1 {
|
||||
buf.Write([]byte{newline, newline})
|
||||
if index != len(t.Items)-1 {
|
||||
// Always write a newline to separate us from the next item
|
||||
buf.WriteByte(newline)
|
||||
|
||||
// Need to determine if we're going to separate the next item
|
||||
// with a blank line. The logic here is simple, though there
|
||||
// are a few conditions:
|
||||
//
|
||||
// 1. The next object is more than one line away anyways,
|
||||
// so we need an empty line.
|
||||
//
|
||||
// 2. The next object is not a "single line" object, so
|
||||
// we need an empty line.
|
||||
//
|
||||
// 3. This current object is not a single line object,
|
||||
// so we need an empty line.
|
||||
current := t.Items[index]
|
||||
next := t.Items[index+1]
|
||||
if next.Pos().Line != t.Items[index].Pos().Line+1 ||
|
||||
!p.isSingleLineObject(next) ||
|
||||
!p.isSingleLineObject(current) {
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
}
|
||||
index++
|
||||
}
|
||||
|
@ -165,7 +218,8 @@ func (p *printer) output(n interface{}) []byte {
|
|||
|
||||
func (p *printer) literalType(lit *ast.LiteralType) []byte {
|
||||
result := []byte(lit.Token.Text)
|
||||
if lit.Token.Type == token.HEREDOC {
|
||||
switch lit.Token.Type {
|
||||
case token.HEREDOC:
|
||||
// Clear the trailing newline from heredocs
|
||||
if result[len(result)-1] == '\n' {
|
||||
result = result[:len(result)-1]
|
||||
|
@ -173,6 +227,12 @@ func (p *printer) literalType(lit *ast.LiteralType) []byte {
|
|||
|
||||
// Poison lines 2+ so that we don't indent them
|
||||
result = p.heredocIndent(result)
|
||||
case token.STRING:
|
||||
// If this is a multiline string, poison lines 2+ so we don't
|
||||
// indent them.
|
||||
if bytes.IndexRune(result, '\n') >= 0 {
|
||||
result = p.heredocIndent(result)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
|
@ -226,17 +286,24 @@ func (p *printer) objectType(o *ast.ObjectType) []byte {
|
|||
var nextItem token.Pos
|
||||
var commented, newlinePrinted bool
|
||||
for {
|
||||
// Determine the location of the next actual non-comment
|
||||
// item. If we're at the end, the next item is the closing brace
|
||||
if index != len(o.List.Items) {
|
||||
nextItem = o.List.Items[index].Pos()
|
||||
} else {
|
||||
nextItem = o.Rbrace
|
||||
}
|
||||
|
||||
// Print stand alone comments
|
||||
// Go through the standalone comments in the file and print out
|
||||
// the comments that we should be for this object item.
|
||||
for _, c := range p.standaloneComments {
|
||||
printed := false
|
||||
var lastCommentPos token.Pos
|
||||
for _, comment := range c.List {
|
||||
// if we hit the end, last item should be the brace
|
||||
if index != len(o.List.Items) {
|
||||
nextItem = o.List.Items[index].Pos()
|
||||
} else {
|
||||
nextItem = o.Rbrace
|
||||
}
|
||||
|
||||
// We only care about comments after the previous item
|
||||
// we've printed so that comments are printed in the
|
||||
// correct locations (between two objects for example).
|
||||
// And before the next item.
|
||||
if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
|
||||
// If there are standalone comments and the initial newline has not
|
||||
// been printed yet, do it now.
|
||||
|
@ -251,11 +318,33 @@ func (p *printer) objectType(o *ast.ObjectType) []byte {
|
|||
buf.WriteByte(newline)
|
||||
}
|
||||
|
||||
buf.Write(p.indent([]byte(comment.Text)))
|
||||
// Store this position
|
||||
lastCommentPos = comment.Pos()
|
||||
|
||||
// output the comment itself
|
||||
buf.Write(p.indent(p.heredocIndent([]byte(comment.Text))))
|
||||
|
||||
// Set printed to true to note that we printed something
|
||||
printed = true
|
||||
|
||||
/*
|
||||
if index != len(o.List.Items) {
|
||||
buf.WriteByte(newline) // do not print on the end
|
||||
}
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
// Stuff to do if we had comments
|
||||
if printed {
|
||||
// Always write a newline
|
||||
buf.WriteByte(newline)
|
||||
|
||||
// If there is another item in the object and our comment
|
||||
// didn't hug it directly, then make sure there is a blank
|
||||
// line separating them.
|
||||
if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 {
|
||||
buf.WriteByte(newline)
|
||||
if index != len(o.List.Items) {
|
||||
buf.WriteByte(newline) // do not print on the end
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -435,16 +524,54 @@ func (p *printer) list(l *ast.ListType) []byte {
|
|||
}
|
||||
|
||||
insertSpaceBeforeItem := false
|
||||
lastHadLeadComment := false
|
||||
for i, item := range l.List {
|
||||
// Keep track of whether this item is a heredoc since that has
|
||||
// unique behavior.
|
||||
heredoc := false
|
||||
if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
|
||||
heredoc = true
|
||||
}
|
||||
|
||||
if item.Pos().Line != l.Lbrack.Line {
|
||||
// multiline list, add newline before we add each item
|
||||
buf.WriteByte(newline)
|
||||
insertSpaceBeforeItem = false
|
||||
|
||||
// If we have a lead comment, then we want to write that first
|
||||
leadComment := false
|
||||
if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil {
|
||||
leadComment = true
|
||||
|
||||
// If this isn't the first item and the previous element
|
||||
// didn't have a lead comment, then we need to add an extra
|
||||
// newline to properly space things out. If it did have a
|
||||
// lead comment previously then this would be done
|
||||
// automatically.
|
||||
if i > 0 && !lastHadLeadComment {
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
|
||||
for _, comment := range lit.LeadComment.List {
|
||||
buf.Write(p.indent([]byte(comment.Text)))
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
}
|
||||
|
||||
// also indent each line
|
||||
val := p.output(item)
|
||||
curLen := len(val)
|
||||
buf.Write(p.indent(val))
|
||||
buf.WriteString(",")
|
||||
|
||||
// if this item is a heredoc, then we output the comma on
|
||||
// the next line. This is the only case this happens.
|
||||
comma := []byte{','}
|
||||
if heredoc {
|
||||
buf.WriteByte(newline)
|
||||
comma = p.indent(comma)
|
||||
}
|
||||
|
||||
buf.Write(comma)
|
||||
|
||||
if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
|
||||
// if the next item doesn't have any comments, do not align
|
||||
|
@ -458,19 +585,51 @@ func (p *printer) list(l *ast.ListType) []byte {
|
|||
}
|
||||
}
|
||||
|
||||
if i == len(l.List)-1 {
|
||||
lastItem := i == len(l.List)-1
|
||||
if lastItem {
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
|
||||
if leadComment && !lastItem {
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
|
||||
lastHadLeadComment = leadComment
|
||||
} else {
|
||||
if insertSpaceBeforeItem {
|
||||
buf.WriteByte(blank)
|
||||
insertSpaceBeforeItem = false
|
||||
}
|
||||
buf.Write(p.output(item))
|
||||
|
||||
// Output the item itself
|
||||
// also indent each line
|
||||
val := p.output(item)
|
||||
curLen := len(val)
|
||||
buf.Write(val)
|
||||
|
||||
// If this is a heredoc item we always have to output a newline
|
||||
// so that it parses properly.
|
||||
if heredoc {
|
||||
buf.WriteByte(newline)
|
||||
}
|
||||
|
||||
// If this isn't the last element, write a comma.
|
||||
if i != len(l.List)-1 {
|
||||
buf.WriteString(",")
|
||||
insertSpaceBeforeItem = true
|
||||
}
|
||||
|
||||
if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
|
||||
// if the next item doesn't have any comments, do not align
|
||||
buf.WriteByte(blank) // align one space
|
||||
for i := 0; i < longestLine-curLen; i++ {
|
||||
buf.WriteByte(blank)
|
||||
}
|
||||
|
||||
for _, comment := range lit.LineComment.List {
|
||||
buf.WriteString(comment.Text)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -547,6 +706,36 @@ func (p *printer) heredocIndent(buf []byte) []byte {
|
|||
return res
|
||||
}
|
||||
|
||||
// isSingleLineObject tells whether the given object item is a single
|
||||
// line object such as "obj {}".
|
||||
//
|
||||
// A single line object:
|
||||
//
|
||||
// * has no lead comments (hence multi-line)
|
||||
// * has no assignment
|
||||
// * has no values in the stanza (within {})
|
||||
//
|
||||
func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool {
|
||||
// If there is a lead comment, can't be one line
|
||||
if val.LeadComment != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// If there is assignment, we always break by line
|
||||
if val.Assign.IsValid() {
|
||||
return false
|
||||
}
|
||||
|
||||
// If it isn't an object type, then its not a single line object
|
||||
ot, ok := val.Val.(*ast.ObjectType)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// If the object has no items, it is single line!
|
||||
return len(ot.List.Items) == 0
|
||||
}
|
||||
|
||||
func lines(txt string) int {
|
||||
endline := 1
|
||||
for i := 0; i < len(txt); i++ {
|
||||
|
|
1
vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
generated
vendored
1
vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
generated
vendored
|
@ -62,6 +62,5 @@ func Format(src []byte) ([]byte, error) {
|
|||
|
||||
// Add trailing newline to result
|
||||
buf.WriteString("\n")
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
|
27
vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go
generated
vendored
27
vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go
generated
vendored
|
@ -1,8 +1,3 @@
|
|||
// +build !windows
|
||||
// TODO(jen20): These need fixing on Windows but printer is not used right now
|
||||
// and red CI is making it harder to process other bugs, so ignore until
|
||||
// we get around to fixing them.package printer
|
||||
|
||||
package printer
|
||||
|
||||
import (
|
||||
|
@ -31,18 +26,32 @@ type entry struct {
|
|||
var data = []entry{
|
||||
{"complexhcl.input", "complexhcl.golden"},
|
||||
{"list.input", "list.golden"},
|
||||
{"list_comment.input", "list_comment.golden"},
|
||||
{"comment.input", "comment.golden"},
|
||||
{"comment_crlf.input", "comment.golden"},
|
||||
{"comment_aligned.input", "comment_aligned.golden"},
|
||||
{"comment_array.input", "comment_array.golden"},
|
||||
{"comment_end_file.input", "comment_end_file.golden"},
|
||||
{"comment_multiline_indent.input", "comment_multiline_indent.golden"},
|
||||
{"comment_multiline_no_stanza.input", "comment_multiline_no_stanza.golden"},
|
||||
{"comment_multiline_stanza.input", "comment_multiline_stanza.golden"},
|
||||
{"comment_newline.input", "comment_newline.golden"},
|
||||
{"comment_object_multi.input", "comment_object_multi.golden"},
|
||||
{"comment_standalone.input", "comment_standalone.golden"},
|
||||
{"empty_block.input", "empty_block.golden"},
|
||||
{"list_of_objects.input", "list_of_objects.golden"},
|
||||
{"multiline_string.input", "multiline_string.golden"},
|
||||
{"object_singleline.input", "object_singleline.golden"},
|
||||
{"object_with_heredoc.input", "object_with_heredoc.golden"},
|
||||
}
|
||||
|
||||
func TestFiles(t *testing.T) {
|
||||
for _, e := range data {
|
||||
source := filepath.Join(dataDir, e.source)
|
||||
golden := filepath.Join(dataDir, e.golden)
|
||||
check(t, source, golden)
|
||||
t.Run(e.source, func(t *testing.T) {
|
||||
check(t, source, golden)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -96,8 +105,8 @@ func diff(aname, bname string, a, b []byte) error {
|
|||
for i := 0; i < len(a) && i < len(b); i++ {
|
||||
ch := a[i]
|
||||
if ch != b[i] {
|
||||
fmt.Fprintf(&buf, "\n%s:%d:%d: %s", aname, line, i-offs+1, lineAt(a, offs))
|
||||
fmt.Fprintf(&buf, "\n%s:%d:%d: %s", bname, line, i-offs+1, lineAt(b, offs))
|
||||
fmt.Fprintf(&buf, "\n%s:%d:%d: %q", aname, line, i-offs+1, lineAt(a, offs))
|
||||
fmt.Fprintf(&buf, "\n%s:%d:%d: %q", bname, line, i-offs+1, lineAt(b, offs))
|
||||
fmt.Fprintf(&buf, "\n\n")
|
||||
break
|
||||
}
|
||||
|
@ -124,7 +133,7 @@ func format(src []byte) ([]byte, error) {
|
|||
|
||||
// make sure formatted output is syntactically correct
|
||||
if _, err := parser.Parse(formatted); err != nil {
|
||||
return nil, fmt.Errorf("parse: %s\n%s", err, src)
|
||||
return nil, fmt.Errorf("parse: %s\n%s", err, formatted)
|
||||
}
|
||||
|
||||
return formatted, nil
|
||||
|
|
13
vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.golden
generated
vendored
Normal file
13
vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.golden
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
banana = [
|
||||
# I really want to comment this item in the array.
|
||||
"a",
|
||||
|
||||
# This as well
|
||||
"b",
|
||||
|
||||
"c", # And C
|
||||
"d",
|
||||
|
||||
# And another
|
||||
"e",
|
||||
]
|
13
vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.input
generated
vendored
Normal file
13
vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_array.input
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
banana = [
|
||||
# I really want to comment this item in the array.
|
||||
"a",
|
||||
|
||||
# This as well
|
||||
"b",
|
||||
|
||||
"c", # And C
|
||||
"d",
|
||||
|
||||
# And another
|
||||
"e",
|
||||
]
|
37
vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_crlf.input
generated
vendored
Normal file
37
vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_crlf.input
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
// A standalone comment is a comment which is not attached to any kind of node
|
||||
|
||||
// This comes from Terraform, as a test
|
||||
variable "foo" {
|
||||
# Standalone comment should be still here
|
||||
|
||||
default = "bar"
|
||||
description = "bar" # yooo
|
||||
}
|
||||
|
||||
/* This is a multi line standalone
|
||||
comment*/
|
||||
|
||||
|
||||
// fatih arslan
|
||||
/* This is a developer test
|
||||
account and a multine comment */
|
||||
developer = [ "fatih", "arslan"] // fatih arslan
|
||||
|
||||
# One line here
|
||||
numbers = [1,2] // another line here
|
||||
|
||||
# Another comment
|
||||
variable = {
|
||||
description = "bar" # another yooo
|
||||
foo {
|
||||
# Nested standalone
|
||||
|
||||
bar = "fatih"
|
||||
}
|
||||
}
|
||||
|
||||
// lead comment
|
||||
foo {
|
||||
bar = "fatih" // line comment 2
|
||||
} // line comment 3
|
||||
|
6
vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.golden
generated
vendored
Normal file
6
vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.golden
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
resource "blah" "blah" {}
|
||||
|
||||
//
|
||||
//
|
||||
//
|
||||
|
5
vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.input
generated
vendored
Normal file
5
vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_end_file.input
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
resource "blah" "blah" {}
|
||||
|
||||
//
|
||||
//
|
||||
//
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue