mirror of
https://github.com/Luzifer/badge-gen.git
synced 2024-12-29 21:01:15 +00:00
Switch to using dep
for vendoring
Signed-off-by: Knut Ahlers <knut@ahlers.me>
This commit is contained in:
parent
45baca1b4a
commit
820c81f62f
234 changed files with 20512 additions and 6636 deletions
142
Godeps/Godeps.json
generated
142
Godeps/Godeps.json
generated
|
@ -1,142 +0,0 @@
|
|||
{
|
||||
"ImportPath": "github.com/Luzifer/badge-gen",
|
||||
"GoVersion": "go1.6",
|
||||
"GodepVersion": "v74",
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "bitbucket.org/ww/goautoneg",
|
||||
"Comment": "null-5",
|
||||
"Rev": "'75cd24fc2f2c2a2088577d12123ddee5f54e0675'"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Luzifer/go_helpers/accessLogger",
|
||||
"Comment": "v1.4.0",
|
||||
"Rev": "d76f718bb2d7d043fdf9dfdc01af03f20047432b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Luzifer/rconfig",
|
||||
"Comment": "v1.1.0",
|
||||
"Rev": "c27bd3a64b5b19556914d9fec69922cf3852d585"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/beorn7/perks/quantile",
|
||||
"Rev": "b965b613227fddccbfffe13eae360ed3fa822f8d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/freetype/raster",
|
||||
"Comment": "release-131-g38b4c39",
|
||||
"Rev": "38b4c392adc5eed94207994c4848fff99f4ac234"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/freetype/truetype",
|
||||
"Comment": "release-131-g38b4c39",
|
||||
"Rev": "38b4c392adc5eed94207994c4848fff99f4ac234"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/proto",
|
||||
"Rev": "8df8a93c670173cd1d8737a507a253b94f2d0b5a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gorilla/context",
|
||||
"Rev": "1c83b3eabd45b6d76072b66b746c20815fb2872d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gorilla/mux",
|
||||
"Rev": "49c024275504f0341e5a9971eb7ba7fa3dc7af40"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil",
|
||||
"Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_golang/prometheus",
|
||||
"Comment": "0.7.0-39-g3b78d7a",
|
||||
"Rev": "3b78d7a77f51ccbc364d4bc170920153022cfd08"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_model/go",
|
||||
"Comment": "model-0.0.2-12-gfa8ad6f",
|
||||
"Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/common/expfmt",
|
||||
"Rev": "ffd5d0f2976124c788687ec2ac194f5479e1f9c2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/common/model",
|
||||
"Rev": "ffd5d0f2976124c788687ec2ac194f5479e1f9c2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/procfs",
|
||||
"Rev": "3b231703e7dd0ca93302b5242ad6aeb4b5c603a2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/pflag",
|
||||
"Rev": "367864438f1b1a3c7db4da06a2f55b144e6784e0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/tdewolff/buffer",
|
||||
"Comment": "v1.0.0-6-g0edfcb7",
|
||||
"Rev": "0edfcb7b750146ff879e95831de2ef53605a5cb5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/tdewolff/minify",
|
||||
"Comment": "v2.0.0-85-g28aac1f",
|
||||
"Rev": "28aac1f92d928dfb63dd0258a3b2248a020e86da"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/tdewolff/minify/css",
|
||||
"Comment": "v2.0.0-85-g28aac1f",
|
||||
"Rev": "28aac1f92d928dfb63dd0258a3b2248a020e86da"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/tdewolff/minify/svg",
|
||||
"Comment": "v2.0.0-85-g28aac1f",
|
||||
"Rev": "28aac1f92d928dfb63dd0258a3b2248a020e86da"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/tdewolff/parse",
|
||||
"Comment": "v2.0.0-2-g34d5c11",
|
||||
"Rev": "34d5c1160d4503da4b456e5094609f2331d6dde3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/tdewolff/parse/css",
|
||||
"Comment": "v2.0.0-2-g34d5c11",
|
||||
"Rev": "34d5c1160d4503da4b456e5094609f2331d6dde3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/tdewolff/parse/svg",
|
||||
"Comment": "v2.0.0-2-g34d5c11",
|
||||
"Rev": "34d5c1160d4503da4b456e5094609f2331d6dde3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/tdewolff/parse/xml",
|
||||
"Comment": "v2.0.0-2-g34d5c11",
|
||||
"Rev": "34d5c1160d4503da4b456e5094609f2331d6dde3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/tdewolff/strconv",
|
||||
"Rev": "3e8091f4417ebaaa3910da63a45ea394ebbfb0e3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/image/font",
|
||||
"Rev": "97680175a5267bb8b31f1923e7a66df98013b11a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/image/math/fixed",
|
||||
"Rev": "97680175a5267bb8b31f1923e7a66df98013b11a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context",
|
||||
"Rev": "ef00b378c73f107bf44d5c9b69875255ce89b79a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context/ctxhttp",
|
||||
"Rev": "ef00b378c73f107bf44d5c9b69875255ce89b79a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/yaml.v2",
|
||||
"Rev": "53feefa2559fb8dfa8d81baad31be332c97d6c77"
|
||||
}
|
||||
]
|
||||
}
|
5
Godeps/Readme
generated
5
Godeps/Readme
generated
|
@ -1,5 +0,0 @@
|
|||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
152
Gopkg.lock
generated
Normal file
152
Gopkg.lock
generated
Normal file
|
@ -0,0 +1,152 @@
|
|||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Luzifer/go_helpers"
|
||||
packages = ["accessLogger"]
|
||||
revision = "94b91ff63a5db8e22c4d121e6c5c17b44135be4d"
|
||||
version = "v2.5.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Luzifer/rconfig"
|
||||
packages = ["."]
|
||||
revision = "7aef1d393c1e2d0758901853b59981c7adc67c7e"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/beorn7/perks"
|
||||
packages = ["quantile"]
|
||||
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/freetype"
|
||||
packages = [
|
||||
"raster",
|
||||
"truetype"
|
||||
]
|
||||
revision = "e2365dfdc4a05e4b8299a783240d4a7d5a65d4e4"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gorilla/context"
|
||||
packages = ["."]
|
||||
revision = "08b5f424b9271eedf6f9f0ce86cb9396ed337a42"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gorilla/mux"
|
||||
packages = ["."]
|
||||
revision = "e3702bed27f0d39777b0b37b664b6280e8ef8fbf"
|
||||
version = "v1.6.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
packages = ["pbutil"]
|
||||
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
packages = ["prometheus"]
|
||||
revision = "c5b7fccd204277076155f10851dad72b76a49317"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/client_model"
|
||||
packages = ["go"]
|
||||
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = [
|
||||
"expfmt",
|
||||
"internal/bitbucket.org/ww/goautoneg",
|
||||
"model"
|
||||
]
|
||||
revision = "7600349dcfe1abd18d72d3a1770870d9800a7801"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = [
|
||||
".",
|
||||
"internal/util",
|
||||
"nfs",
|
||||
"xfs"
|
||||
]
|
||||
revision = "94663424ae5ae9856b40a9f170762b4197024661"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/tdewolff/minify"
|
||||
packages = [
|
||||
".",
|
||||
"css",
|
||||
"svg"
|
||||
]
|
||||
revision = "222672169d634c440a73abc47685074e1a9daa60"
|
||||
version = "v2.3.4"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/tdewolff/parse"
|
||||
packages = [
|
||||
".",
|
||||
"buffer",
|
||||
"css",
|
||||
"strconv",
|
||||
"svg",
|
||||
"xml"
|
||||
]
|
||||
revision = "639f6272aec6b52094db77b9ec488214b0b4b1a1"
|
||||
version = "v2.3.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/image"
|
||||
packages = [
|
||||
"font",
|
||||
"math/fixed"
|
||||
]
|
||||
revision = "af66defab954cb421ca110193eed9477c8541e2a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
"context/ctxhttp"
|
||||
]
|
||||
revision = "1e491301e022f8f977054da4c2d852decd59571f"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
name = "gopkg.in/validator.v2"
|
||||
packages = ["."]
|
||||
revision = "135c24b11c19e52befcae2ec3fca5d9b78c4e98e"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||
version = "v2.2.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "dc2df5f4e08055a0bc57c20651189dd7bac1e27bfc06e2a9cf255ead61b6faa9"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
66
Gopkg.toml
Normal file
66
Gopkg.toml
Normal file
|
@ -0,0 +1,66 @@
|
|||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
#
|
||||
# [prune]
|
||||
# non-go = false
|
||||
# go-tests = true
|
||||
# unused-packages = true
|
||||
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/Luzifer/go_helpers"
|
||||
version = "2.5.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/Luzifer/rconfig"
|
||||
version = "1.2.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/freetype"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gorilla/mux"
|
||||
version = "1.6.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
version = "0.8.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tdewolff/minify"
|
||||
version = "2.3.4"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/image"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/yaml.v2"
|
||||
version = "2.2.1"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
13
vendor/bitbucket.org/ww/goautoneg/Makefile
generated
vendored
13
vendor/bitbucket.org/ww/goautoneg/Makefile
generated
vendored
|
@ -1,13 +0,0 @@
|
|||
include $(GOROOT)/src/Make.inc
|
||||
|
||||
TARG=bitbucket.org/ww/goautoneg
|
||||
GOFILES=autoneg.go
|
||||
|
||||
include $(GOROOT)/src/Make.pkg
|
||||
|
||||
format:
|
||||
gofmt -w *.go
|
||||
|
||||
docs:
|
||||
gomake clean
|
||||
godoc ${TARG} > README.txt
|
2
vendor/github.com/Luzifer/go_helpers/accessLogger/accessLogger.go
generated
vendored
2
vendor/github.com/Luzifer/go_helpers/accessLogger/accessLogger.go
generated
vendored
|
@ -33,5 +33,5 @@ func (a *AccessLogResponseWriter) WriteHeader(code int) {
|
|||
}
|
||||
|
||||
func (a *AccessLogResponseWriter) HTTPResponseType() string {
|
||||
return fmt.Sprintf("%sxx", strconv.FormatInt(int64(a.StatusCode), 10)[0])
|
||||
return fmt.Sprintf("%cxx", strconv.FormatInt(int64(a.StatusCode), 10)[0])
|
||||
}
|
||||
|
|
4
vendor/github.com/Luzifer/rconfig/.travis.yml
generated
vendored
4
vendor/github.com/Luzifer/rconfig/.travis.yml
generated
vendored
|
@ -1,8 +1,8 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- 1.7
|
||||
- tip
|
||||
|
||||
script: go test -v -race -cover ./...
|
||||
|
|
4
vendor/github.com/Luzifer/rconfig/History.md
generated
vendored
4
vendor/github.com/Luzifer/rconfig/History.md
generated
vendored
|
@ -1,3 +1,7 @@
|
|||
# 1.2.0 / 2017-06-19
|
||||
|
||||
* Add ParseAndValidate method
|
||||
|
||||
# 1.1.0 / 2016-06-28
|
||||
|
||||
* Support time.Duration config parameters
|
||||
|
|
39
vendor/github.com/Luzifer/rconfig/README.md
generated
vendored
39
vendor/github.com/Luzifer/rconfig/README.md
generated
vendored
|
@ -29,31 +29,28 @@ go test -v -race -cover github.com/Luzifer/rconfig
|
|||
|
||||
## Usage
|
||||
|
||||
As a first step define a struct holding your configuration:
|
||||
A very simple usecase is to just configure a struct inside the vars section of your `main.go` and to parse the commandline flags from the `main()` function:
|
||||
|
||||
```go
|
||||
type config struct {
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/Luzifer/rconfig"
|
||||
)
|
||||
|
||||
var (
|
||||
cfg = struct {
|
||||
Username string `default:"unknown" flag:"user" description:"Your name"`
|
||||
Details struct {
|
||||
Age int `default:"25" flag:"age" env:"age" description:"Your age"`
|
||||
}
|
||||
}
|
||||
```
|
||||
}{}
|
||||
)
|
||||
|
||||
Next create an instance of that struct and let `rconfig` fill that config:
|
||||
|
||||
```go
|
||||
var cfg config
|
||||
func init() {
|
||||
cfg = config{}
|
||||
rconfig.Parse(&cfg)
|
||||
}
|
||||
```
|
||||
|
||||
You're ready to access your configuration:
|
||||
|
||||
```go
|
||||
func main() {
|
||||
rconfig.Parse(&cfg)
|
||||
|
||||
fmt.Printf("Hello %s, happy birthday for your %dth birthday.",
|
||||
cfg.Username,
|
||||
cfg.Details.Age)
|
||||
|
@ -72,18 +69,14 @@ The order of the directives (lower number = higher precedence):
|
|||
1. `default` tag in the struct
|
||||
|
||||
```go
|
||||
type config struct {
|
||||
var cfg = struct {
|
||||
Username string `vardefault:"username" flag:"username" description:"Your username"`
|
||||
}
|
||||
|
||||
var cfg = config{}
|
||||
|
||||
func init() {
|
||||
func main() {
|
||||
rconfig.SetVariableDefaults(rconfig.VarDefaultsFromYAMLFile("~/.myapp.yml"))
|
||||
rconfig.Parse(&cfg)
|
||||
}
|
||||
|
||||
func main() {
|
||||
fmt.Printf("Username = %s", cfg.Username)
|
||||
// Output: Username = luzifer
|
||||
}
|
||||
|
|
18
vendor/github.com/Luzifer/rconfig/config.go
generated
vendored
18
vendor/github.com/Luzifer/rconfig/config.go
generated
vendored
|
@ -13,6 +13,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
validator "gopkg.in/validator.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -45,6 +46,15 @@ func Parse(config interface{}) error {
|
|||
return parse(config, nil)
|
||||
}
|
||||
|
||||
// ParseAndValidate works exactly like Parse but implements an additional run of
|
||||
// the go-validator package on the configuration struct. Therefore additonal struct
|
||||
// tags are supported like described in the readme file of the go-validator package:
|
||||
//
|
||||
// https://github.com/go-validator/validator/tree/v2#usage
|
||||
func ParseAndValidate(config interface{}) error {
|
||||
return parseAndValidate(config, nil)
|
||||
}
|
||||
|
||||
// Args returns the non-flag command-line arguments.
|
||||
func Args() []string {
|
||||
return fs.Args()
|
||||
|
@ -65,6 +75,14 @@ func SetVariableDefaults(defaults map[string]string) {
|
|||
variableDefaults = defaults
|
||||
}
|
||||
|
||||
func parseAndValidate(in interface{}, args []string) error {
|
||||
if err := parse(in, args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return validator.Validate(in)
|
||||
}
|
||||
|
||||
func parse(in interface{}, args []string) error {
|
||||
if args == nil {
|
||||
args = os.Args
|
||||
|
|
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
Normal file
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
Copyright (C) 2013 Blake Mizerany
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
36
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
36
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
|
@ -77,15 +77,20 @@ func NewHighBiased(epsilon float64) *Stream {
|
|||
// is guaranteed to be within (Quantile±Epsilon).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||
func NewTargeted(targets map[float64]float64) *Stream {
|
||||
func NewTargeted(targetMap map[float64]float64) *Stream {
|
||||
// Convert map to slice to avoid slow iterations on a map.
|
||||
// ƒ is called on the hot path, so converting the map to a slice
|
||||
// beforehand results in significant CPU savings.
|
||||
targets := targetMapToSlice(targetMap)
|
||||
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
var m = math.MaxFloat64
|
||||
var f float64
|
||||
for quantile, epsilon := range targets {
|
||||
if quantile*s.n <= r {
|
||||
f = (2 * epsilon * r) / quantile
|
||||
for _, t := range targets {
|
||||
if t.quantile*s.n <= r {
|
||||
f = (2 * t.epsilon * r) / t.quantile
|
||||
} else {
|
||||
f = (2 * epsilon * (s.n - r)) / (1 - quantile)
|
||||
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
|
||||
}
|
||||
if f < m {
|
||||
m = f
|
||||
|
@ -96,6 +101,25 @@ func NewTargeted(targets map[float64]float64) *Stream {
|
|||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
type target struct {
|
||||
quantile float64
|
||||
epsilon float64
|
||||
}
|
||||
|
||||
func targetMapToSlice(targetMap map[float64]float64) []target {
|
||||
targets := make([]target, 0, len(targetMap))
|
||||
|
||||
for quantile, epsilon := range targetMap {
|
||||
t := target{
|
||||
quantile: quantile,
|
||||
epsilon: epsilon,
|
||||
}
|
||||
targets = append(targets, t)
|
||||
}
|
||||
|
||||
return targets
|
||||
}
|
||||
|
||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||
// design. Take care when using across multiple goroutines.
|
||||
type Stream struct {
|
||||
|
@ -133,7 +157,7 @@ func (s *Stream) Query(q float64) float64 {
|
|||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(float64(l) * q)
|
||||
i := int(math.Ceil(float64(l) * q))
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
|
|
20
vendor/github.com/golang/freetype/AUTHORS
generated
vendored
Normal file
20
vendor/github.com/golang/freetype/AUTHORS
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
# This is the official list of Freetype-Go authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS files.
|
||||
# See the latter for an explanation.
|
||||
#
|
||||
# Freetype-Go is derived from Freetype, which is written in C. The latter
|
||||
# is copyright 1996-2010 David Turner, Robert Wilhelm, and Werner Lemberg.
|
||||
|
||||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Google Inc.
|
||||
Jeff R. Allen <jra@nella.org>
|
||||
Maksim Kochkin <maxxarts@gmail.com>
|
||||
Michael Fogleman <fogleman@gmail.com>
|
||||
Rémy Oudompheng <oudomphe@phare.normalesup.org>
|
||||
Roger Peppe <rogpeppe@gmail.com>
|
||||
Steven Edwards <steven@stephenwithav.com>
|
38
vendor/github.com/golang/freetype/CONTRIBUTORS
generated
vendored
Normal file
38
vendor/github.com/golang/freetype/CONTRIBUTORS
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
# This is the official list of people who can contribute
|
||||
# (and typically have contributed) code to the Freetype-Go repository.
|
||||
# The AUTHORS file lists the copyright holders; this file
|
||||
# lists people. For example, Google employees are listed here
|
||||
# but not in AUTHORS, because Google holds the copyright.
|
||||
#
|
||||
# The submission process automatically checks to make sure
|
||||
# that people submitting code are listed in this file (by email address).
|
||||
#
|
||||
# Names should be added to this file only after verifying that
|
||||
# the individual or the individual's organization has agreed to
|
||||
# the appropriate Contributor License Agreement, found here:
|
||||
#
|
||||
# http://code.google.com/legal/individual-cla-v1.0.html
|
||||
# http://code.google.com/legal/corporate-cla-v1.0.html
|
||||
#
|
||||
# The agreement for individuals can be filled out on the web.
|
||||
#
|
||||
# When adding J Random Contributor's name to this file,
|
||||
# either J's name or J's organization's name should be
|
||||
# added to the AUTHORS file, depending on whether the
|
||||
# individual or corporate CLA was used.
|
||||
|
||||
# Names should be added to this file like so:
|
||||
# Name <email address>
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Andrew Gerrand <adg@golang.org>
|
||||
Jeff R. Allen <jra@nella.org> <jeff.allen@gmail.com>
|
||||
Maksim Kochkin <maxxarts@gmail.com>
|
||||
Michael Fogleman <fogleman@gmail.com>
|
||||
Nigel Tao <nigeltao@golang.org>
|
||||
Rémy Oudompheng <oudomphe@phare.normalesup.org> <remyoudompheng@gmail.com>
|
||||
Rob Pike <r@golang.org>
|
||||
Roger Peppe <rogpeppe@gmail.com>
|
||||
Russ Cox <rsc@golang.org>
|
||||
Steven Edwards <steven@stephenwithav.com>
|
2
vendor/github.com/golang/freetype/raster/raster.go
generated
vendored
2
vendor/github.com/golang/freetype/raster/raster.go
generated
vendored
|
@ -13,7 +13,7 @@
|
|||
// the Freetype "smooth" module, and the Anti-Grain Geometry library. A
|
||||
// description of the area/coverage algorithm is at
|
||||
// http://projects.tuxee.net/cl-vectors/section-the-cl-aa-algorithm
|
||||
package raster
|
||||
package raster // import "github.com/golang/freetype/raster"
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
|
42
vendor/github.com/golang/freetype/testdata/COPYING
generated
vendored
Normal file
42
vendor/github.com/golang/freetype/testdata/COPYING
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
Luxi fonts copyright (c) 2001 by Bigelow & Holmes Inc. Luxi font
|
||||
instruction code copyright (c) 2001 by URW++ GmbH. All Rights
|
||||
Reserved. Luxi is a registered trademark of Bigelow & Holmes Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of these Fonts and associated documentation files (the "Font
|
||||
Software"), to deal in the Font Software, including without
|
||||
limitation the rights to use, copy, merge, publish, distribute,
|
||||
sublicense, and/or sell copies of the Font Software, and to permit
|
||||
persons to whom the Font Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright and trademark notices and this permission notice
|
||||
shall be included in all copies of one or more of the Font Software.
|
||||
|
||||
The Font Software may not be modified, altered, or added to, and in
|
||||
particular the designs of glyphs or characters in the Fonts may not
|
||||
be modified nor may additional glyphs or characters be added to the
|
||||
Fonts. This License becomes null and void when the Fonts or Font
|
||||
Software have been modified.
|
||||
|
||||
THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
|
||||
OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL
|
||||
BIGELOW & HOLMES INC. OR URW++ GMBH. BE LIABLE FOR ANY CLAIM, DAMAGES
|
||||
OR OTHER LIABILITY, INCLUDING ANY GENERAL, SPECIAL, INDIRECT,
|
||||
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WHETHER IN AN ACTION OF
|
||||
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF THE USE OR
|
||||
INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT
|
||||
SOFTWARE.
|
||||
|
||||
Except as contained in this notice, the names of Bigelow & Holmes
|
||||
Inc. and URW++ GmbH. shall not be used in advertising or otherwise to
|
||||
promote the sale, use or other dealings in this Font Software without
|
||||
prior written authorization from Bigelow & Holmes Inc. and URW++ GmbH.
|
||||
|
||||
For further information, contact:
|
||||
|
||||
info@urwpp.de
|
||||
or
|
||||
design@bigelowandholmes.com
|
5
vendor/github.com/golang/freetype/truetype/glyph.go
generated
vendored
5
vendor/github.com/golang/freetype/truetype/glyph.go
generated
vendored
|
@ -209,6 +209,7 @@ func (g *GlyphBuf) load(recursion uint32, i Index, useMyMetrics bool) (err error
|
|||
g.addPhantomsAndScale(len(g.Points), len(g.Points), true, true)
|
||||
copy(g.phantomPoints[:], g.Points[len(g.Points)-4:])
|
||||
g.Points = g.Points[:len(g.Points)-4]
|
||||
// TODO: also trim g.InFontUnits and g.Unhinted?
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -282,6 +283,10 @@ func (g *GlyphBuf) loadSimple(glyf []byte, ne int) (program []byte) {
|
|||
program = glyf[offset : offset+instrLen]
|
||||
offset += instrLen
|
||||
|
||||
if ne == 0 {
|
||||
return program
|
||||
}
|
||||
|
||||
np0 := len(g.Points)
|
||||
np1 := np0 + int(g.Ends[len(g.Ends)-1])
|
||||
|
||||
|
|
30
vendor/github.com/golang/freetype/truetype/truetype.go
generated
vendored
30
vendor/github.com/golang/freetype/truetype/truetype.go
generated
vendored
|
@ -15,7 +15,7 @@
|
|||
//
|
||||
// To measure a TrueType font in ideal FUnit space, use scale equal to
|
||||
// font.FUnitsPerEm().
|
||||
package truetype
|
||||
package truetype // import "github.com/golang/freetype/truetype"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -57,7 +57,8 @@ const (
|
|||
// A 32-bit encoding consists of a most-significant 16-bit Platform ID and a
|
||||
// least-significant 16-bit Platform Specific ID. The magic numbers are
|
||||
// specified at https://www.microsoft.com/typography/otspec/name.htm
|
||||
unicodeEncoding = 0x00000003 // PID = 0 (Unicode), PSID = 3 (Unicode 2.0)
|
||||
unicodeEncodingBMPOnly = 0x00000003 // PID = 0 (Unicode), PSID = 3 (Unicode 2.0 BMP Only)
|
||||
unicodeEncodingFull = 0x00000004 // PID = 0 (Unicode), PSID = 4 (Unicode 2.0 Full Repertoire)
|
||||
microsoftSymbolEncoding = 0x00030000 // PID = 3 (Microsoft), PSID = 0 (Symbol)
|
||||
microsoftUCS2Encoding = 0x00030001 // PID = 3 (Microsoft), PSID = 1 (UCS-2)
|
||||
microsoftUCS4Encoding = 0x0003000a // PID = 3 (Microsoft), PSID = 10 (UCS-4)
|
||||
|
@ -142,7 +143,7 @@ func parseSubtables(table []byte, name string, offset, size int, pred func([]byt
|
|||
pidPsid := u32(table, offset)
|
||||
// We prefer the Unicode cmap encoding. Failing to find that, we fall
|
||||
// back onto the Microsoft cmap encoding.
|
||||
if pidPsid == unicodeEncoding {
|
||||
if pidPsid == unicodeEncodingBMPOnly || pidPsid == unicodeEncodingFull {
|
||||
bestOffset, bestPID, ok = offset, pidPsid>>16, true
|
||||
break
|
||||
|
||||
|
@ -323,11 +324,20 @@ func (f *Font) parseKern() error {
|
|||
if version != 0 {
|
||||
return UnsupportedError(fmt.Sprintf("kern version: %d", version))
|
||||
}
|
||||
|
||||
n, offset := u16(f.kern, offset), offset+2
|
||||
if n != 1 {
|
||||
return UnsupportedError(fmt.Sprintf("kern nTables: %d", n))
|
||||
if n == 0 {
|
||||
return UnsupportedError("kern nTables: 0")
|
||||
}
|
||||
offset += 2
|
||||
// TODO: support multiple subtables. In practice, almost all .ttf files
|
||||
// have only one subtable, if they have a kern table at all. But it's not
|
||||
// impossible. Xolonium Regular (https://fontlibrary.org/en/font/xolonium)
|
||||
// has 3 subtables. Those subtables appear to be disjoint, rather than
|
||||
// being the same kerning pairs encoded in three different ways.
|
||||
//
|
||||
// For now, we'll use only the first subtable.
|
||||
|
||||
offset += 2 // Skip the version.
|
||||
length, offset := int(u16(f.kern, offset)), offset+2
|
||||
coverage, offset := u16(f.kern, offset), offset+2
|
||||
if coverage != 0x0001 {
|
||||
|
@ -550,8 +560,7 @@ func parse(ttf []byte, offset int) (font *Font, err error) {
|
|||
return
|
||||
}
|
||||
ttcVersion, offset := u32(ttf, offset), offset+4
|
||||
if ttcVersion != 0x00010000 {
|
||||
// TODO: support TTC version 2.0, once I have such a .ttc file to test with.
|
||||
if ttcVersion != 0x00010000 && ttcVersion != 0x00020000 {
|
||||
err = FormatError("bad TTC version")
|
||||
return
|
||||
}
|
||||
|
@ -578,14 +587,15 @@ func parse(ttf []byte, offset int) (font *Font, err error) {
|
|||
return
|
||||
}
|
||||
n, offset := int(u16(ttf, offset)), offset+2
|
||||
if len(ttf) < 16*n+12 {
|
||||
offset += 6 // Skip the searchRange, entrySelector and rangeShift.
|
||||
if len(ttf) < 16*n+offset {
|
||||
err = FormatError("TTF data is too short")
|
||||
return
|
||||
}
|
||||
f := new(Font)
|
||||
// Assign the table slices.
|
||||
for i := 0; i < n; i++ {
|
||||
x := 16*i + 12
|
||||
x := 16*i + offset
|
||||
switch string(ttf[x : x+4]) {
|
||||
case "cmap":
|
||||
f.cmap, err = readTable(ttf, ttf[x+8:x+16])
|
||||
|
|
3
vendor/github.com/golang/protobuf/AUTHORS
generated
vendored
Normal file
3
vendor/github.com/golang/protobuf/AUTHORS
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
3
vendor/github.com/golang/protobuf/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/github.com/golang/protobuf/CONTRIBUTORS
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
43
vendor/github.com/golang/protobuf/proto/Makefile
generated
vendored
43
vendor/github.com/golang/protobuf/proto/Makefile
generated
vendored
|
@ -1,43 +0,0 @@
|
|||
# Go support for Protocol Buffers - Google's data interchange format
|
||||
#
|
||||
# Copyright 2010 The Go Authors. All rights reserved.
|
||||
# https://github.com/golang/protobuf
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
install:
|
||||
go install
|
||||
|
||||
test: install generate-test-pbs
|
||||
go test
|
||||
|
||||
|
||||
generate-test-pbs:
|
||||
make install
|
||||
make -C testdata
|
||||
protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto
|
||||
make
|
58
vendor/github.com/golang/protobuf/proto/clone.go
generated
vendored
58
vendor/github.com/golang/protobuf/proto/clone.go
generated
vendored
|
@ -30,27 +30,44 @@
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer deep copy and merge.
|
||||
// TODO: MessageSet and RawMessage.
|
||||
// TODO: RawMessage.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Clone returns a deep copy of a protocol buffer.
|
||||
func Clone(pb Message) Message {
|
||||
in := reflect.ValueOf(pb)
|
||||
func Clone(src Message) Message {
|
||||
in := reflect.ValueOf(src)
|
||||
if in.IsNil() {
|
||||
return pb
|
||||
return src
|
||||
}
|
||||
|
||||
out := reflect.New(in.Type().Elem())
|
||||
// out is empty so a merge is a deep copy.
|
||||
mergeStruct(out.Elem(), in.Elem())
|
||||
return out.Interface().(Message)
|
||||
dst := out.Interface().(Message)
|
||||
Merge(dst, src)
|
||||
return dst
|
||||
}
|
||||
|
||||
// Merger is the interface representing objects that can merge messages of the same type.
|
||||
type Merger interface {
|
||||
// Merge merges src into this message.
|
||||
// Required and optional fields that are set in src will be set to that value in dst.
|
||||
// Elements of repeated fields will be appended.
|
||||
//
|
||||
// Merge may panic if called with a different argument type than the receiver.
|
||||
Merge(src Message)
|
||||
}
|
||||
|
||||
// generatedMerger is the custom merge method that generated protos will have.
|
||||
// We must add this method since a generate Merge method will conflict with
|
||||
// many existing protos that have a Merge data field already defined.
|
||||
type generatedMerger interface {
|
||||
XXX_Merge(src Message)
|
||||
}
|
||||
|
||||
// Merge merges src into dst.
|
||||
|
@ -58,17 +75,24 @@ func Clone(pb Message) Message {
|
|||
// Elements of repeated fields will be appended.
|
||||
// Merge panics if src and dst are not the same type, or if dst is nil.
|
||||
func Merge(dst, src Message) {
|
||||
if m, ok := dst.(Merger); ok {
|
||||
m.Merge(src)
|
||||
return
|
||||
}
|
||||
|
||||
in := reflect.ValueOf(src)
|
||||
out := reflect.ValueOf(dst)
|
||||
if out.IsNil() {
|
||||
panic("proto: nil destination")
|
||||
}
|
||||
if in.Type() != out.Type() {
|
||||
// Explicit test prior to mergeStruct so that mistyped nils will fail
|
||||
panic("proto: type mismatch")
|
||||
panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
|
||||
}
|
||||
if in.IsNil() {
|
||||
// Merging nil into non-nil is a quiet no-op
|
||||
return // Merge from nil src is a noop
|
||||
}
|
||||
if m, ok := dst.(generatedMerger); ok {
|
||||
m.XXX_Merge(src)
|
||||
return
|
||||
}
|
||||
mergeStruct(out.Elem(), in.Elem())
|
||||
|
@ -84,9 +108,15 @@ func mergeStruct(out, in reflect.Value) {
|
|||
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
|
||||
}
|
||||
|
||||
if emIn, ok := in.Addr().Interface().(extendableProto); ok {
|
||||
emOut := out.Addr().Interface().(extendableProto)
|
||||
mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
|
||||
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
||||
emOut, _ := extendable(out.Addr().Interface())
|
||||
mIn, muIn := emIn.extensionsRead()
|
||||
if mIn != nil {
|
||||
mOut := emOut.extensionsWrite()
|
||||
muIn.Lock()
|
||||
mergeExtension(mOut, mIn)
|
||||
muIn.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
uf := in.FieldByName("XXX_unrecognized")
|
||||
|
|
769
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
769
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
|
@ -39,8 +39,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// errOverflow is returned when an integer is too large to be represented.
|
||||
|
@ -50,10 +48,6 @@ var errOverflow = errors.New("proto: integer overflow")
|
|||
// wire type is encountered. It does not get returned to user code.
|
||||
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
||||
|
||||
// The fundamental decoders that interpret bytes on the wire.
|
||||
// Those that take integer types all return uint64 and are
|
||||
// therefore of type valueDecoder.
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the slice.
|
||||
// It returns the integer and the number of bytes consumed, or
|
||||
// zero if there is not enough.
|
||||
|
@ -61,7 +55,6 @@ var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for
|
|||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func DecodeVarint(buf []byte) (x uint64, n int) {
|
||||
// x, n already 0
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if n >= len(buf) {
|
||||
return 0, 0
|
||||
|
@ -78,13 +71,7 @@ func DecodeVarint(buf []byte) (x uint64, n int) {
|
|||
return 0, 0
|
||||
}
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
|
||||
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
|
||||
i := p.index
|
||||
l := len(p.buf)
|
||||
|
||||
|
@ -107,6 +94,107 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||
i := p.index
|
||||
buf := p.buf
|
||||
|
||||
if i >= len(buf) {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
} else if buf[i] < 0x80 {
|
||||
p.index++
|
||||
return uint64(buf[i]), nil
|
||||
} else if len(buf)-i < 10 {
|
||||
return p.decodeVarintSlow()
|
||||
}
|
||||
|
||||
var b uint64
|
||||
// we already checked the first byte
|
||||
x = uint64(buf[i]) - 0x80
|
||||
i++
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 7
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 7
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 14
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 14
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 21
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 21
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 28
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 28
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 35
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 35
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 42
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 42
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 49
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 49
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 56
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 56
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 63
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
// x -= 0x80 << 63 // Always zero.
|
||||
|
||||
return 0, errOverflow
|
||||
|
||||
done:
|
||||
p.index = i
|
||||
return x, nil
|
||||
}
|
||||
|
||||
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed64, sfixed64, and double protocol buffer types.
|
||||
|
@ -173,9 +261,6 @@ func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
// These are not ValueDecoders: they produce an array of bytes or a string.
|
||||
// bytes, embedded messages
|
||||
|
||||
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
|
||||
// This is the format used for the bytes protocol buffer
|
||||
// type and for embedded messages.
|
||||
|
@ -217,81 +302,29 @@ func (p *Buffer) DecodeStringBytes() (s string, err error) {
|
|||
return string(buf), nil
|
||||
}
|
||||
|
||||
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
|
||||
// If the protocol buffer has extensions, and the field matches, add it as an extension.
|
||||
// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
|
||||
func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
|
||||
oi := o.index
|
||||
|
||||
err := o.skip(t, tag, wire)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !unrecField.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
ptr := structPointer_Bytes(base, unrecField)
|
||||
|
||||
// Add the skipped field to struct field
|
||||
obuf := o.buf
|
||||
|
||||
o.buf = *ptr
|
||||
o.EncodeVarint(uint64(tag<<3 | wire))
|
||||
*ptr = append(o.buf, obuf[oi:o.index]...)
|
||||
|
||||
o.buf = obuf
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
|
||||
func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
|
||||
|
||||
var u uint64
|
||||
var err error
|
||||
|
||||
switch wire {
|
||||
case WireVarint:
|
||||
_, err = o.DecodeVarint()
|
||||
case WireFixed64:
|
||||
_, err = o.DecodeFixed64()
|
||||
case WireBytes:
|
||||
_, err = o.DecodeRawBytes(false)
|
||||
case WireFixed32:
|
||||
_, err = o.DecodeFixed32()
|
||||
case WireStartGroup:
|
||||
for {
|
||||
u, err = o.DecodeVarint()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
fwire := int(u & 0x7)
|
||||
if fwire == WireEndGroup {
|
||||
break
|
||||
}
|
||||
ftag := int(u >> 3)
|
||||
err = o.skip(t, ftag, fwire)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshaler is the interface representing objects that can
|
||||
// unmarshal themselves. The method should reset the receiver before
|
||||
// decoding starts. The argument points to data that may be
|
||||
// unmarshal themselves. The argument points to data that may be
|
||||
// overwritten, so implementations should not keep references to the
|
||||
// buffer.
|
||||
// Unmarshal implementations should not clear the receiver.
|
||||
// Any unmarshaled data should be merged into the receiver.
|
||||
// Callers of Unmarshal that do not want to retain existing data
|
||||
// should Reset the receiver before calling Unmarshal.
|
||||
type Unmarshaler interface {
|
||||
Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// newUnmarshaler is the interface representing objects that can
|
||||
// unmarshal themselves. The semantics are identical to Unmarshaler.
|
||||
//
|
||||
// This exists to support protoc-gen-go generated messages.
|
||||
// The proto package will stop type-asserting to this interface in the future.
|
||||
//
|
||||
// DO NOT DEPEND ON THIS.
|
||||
type newUnmarshaler interface {
|
||||
XXX_Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in buf and places the
|
||||
// decoded result in pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
|
@ -301,7 +334,13 @@ type Unmarshaler interface {
|
|||
// to preserve and append to existing data.
|
||||
func Unmarshal(buf []byte, pb Message) error {
|
||||
pb.Reset()
|
||||
return UnmarshalMerge(buf, pb)
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
return u.XXX_Unmarshal(buf)
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
return u.Unmarshal(buf)
|
||||
}
|
||||
return NewBuffer(buf).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// UnmarshalMerge parses the protocol buffer representation in buf and
|
||||
|
@ -311,8 +350,16 @@ func Unmarshal(buf []byte, pb Message) error {
|
|||
// UnmarshalMerge merges into existing data in pb.
|
||||
// Most code should use Unmarshal instead.
|
||||
func UnmarshalMerge(buf []byte, pb Message) error {
|
||||
// If the object can unmarshal itself, let it.
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
return u.XXX_Unmarshal(buf)
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
// NOTE: The history of proto have unfortunately been inconsistent
|
||||
// whether Unmarshaler should or should not implicitly clear itself.
|
||||
// Some implementations do, most do not.
|
||||
// Thus, calling this here may or may not do what people want.
|
||||
//
|
||||
// See https://github.com/golang/protobuf/issues/424
|
||||
return u.Unmarshal(buf)
|
||||
}
|
||||
return NewBuffer(buf).Unmarshal(pb)
|
||||
|
@ -328,536 +375,54 @@ func (p *Buffer) DecodeMessage(pb Message) error {
|
|||
}
|
||||
|
||||
// DecodeGroup reads a tag-delimited group from the Buffer.
|
||||
// StartGroup tag is already consumed. This function consumes
|
||||
// EndGroup tag.
|
||||
func (p *Buffer) DecodeGroup(pb Message) error {
|
||||
typ, base, err := getbase(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
b := p.buf[p.index:]
|
||||
x, y := findEndGroup(b)
|
||||
if x < 0 {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
|
||||
err := Unmarshal(b[:x], pb)
|
||||
p.index += y
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in the
|
||||
// Buffer and places the decoded result in pb. If the struct
|
||||
// underlying pb does not match the data in the buffer, the results can be
|
||||
// unpredictable.
|
||||
//
|
||||
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
|
||||
func (p *Buffer) Unmarshal(pb Message) error {
|
||||
// If the object can unmarshal itself, let it.
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
err := u.XXX_Unmarshal(p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
// NOTE: The history of proto have unfortunately been inconsistent
|
||||
// whether Unmarshaler should or should not implicitly clear itself.
|
||||
// Some implementations do, most do not.
|
||||
// Thus, calling this here may or may not do what people want.
|
||||
//
|
||||
// See https://github.com/golang/protobuf/issues/424
|
||||
err := u.Unmarshal(p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
typ, base, err := getbase(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
|
||||
|
||||
if collectStats {
|
||||
stats.Decode++
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// unmarshalType does the work of unmarshaling a structure.
|
||||
func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
|
||||
var state errorState
|
||||
required, reqFields := prop.reqCount, uint64(0)
|
||||
|
||||
var err error
|
||||
for err == nil && o.index < len(o.buf) {
|
||||
oi := o.index
|
||||
var u uint64
|
||||
u, err = o.DecodeVarint()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
wire := int(u & 0x7)
|
||||
if wire == WireEndGroup {
|
||||
if is_group {
|
||||
return nil // input is satisfied
|
||||
}
|
||||
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
|
||||
}
|
||||
tag := int(u >> 3)
|
||||
if tag <= 0 {
|
||||
return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
|
||||
}
|
||||
fieldnum, ok := prop.decoderTags.get(tag)
|
||||
if !ok {
|
||||
// Maybe it's an extension?
|
||||
if prop.extendable {
|
||||
if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
|
||||
if err = o.skip(st, tag, wire); err == nil {
|
||||
ext := e.ExtensionMap()[int32(tag)] // may be missing
|
||||
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
|
||||
e.ExtensionMap()[int32(tag)] = ext
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Maybe it's a oneof?
|
||||
if prop.oneofUnmarshaler != nil {
|
||||
m := structPointer_Interface(base, st).(Message)
|
||||
// First return value indicates whether tag is a oneof field.
|
||||
ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
|
||||
if err == ErrInternalBadWireType {
|
||||
// Map the error to something more descriptive.
|
||||
// Do the formatting here to save generated code space.
|
||||
err = fmt.Errorf("bad wiretype for oneof field in %T", m)
|
||||
}
|
||||
if ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
|
||||
continue
|
||||
}
|
||||
p := prop.Prop[fieldnum]
|
||||
|
||||
if p.dec == nil {
|
||||
fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
|
||||
continue
|
||||
}
|
||||
dec := p.dec
|
||||
if wire != WireStartGroup && wire != p.WireType {
|
||||
if wire == WireBytes && p.packedDec != nil {
|
||||
// a packable field
|
||||
dec = p.packedDec
|
||||
} else {
|
||||
err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
|
||||
continue
|
||||
}
|
||||
}
|
||||
decErr := dec(o, p, base)
|
||||
if decErr != nil && !state.shouldContinue(decErr, p) {
|
||||
err = decErr
|
||||
}
|
||||
if err == nil && p.Required {
|
||||
// Successfully decoded a required field.
|
||||
if tag <= 64 {
|
||||
// use bitmap for fields 1-64 to catch field reuse.
|
||||
var mask uint64 = 1 << uint64(tag-1)
|
||||
if reqFields&mask == 0 {
|
||||
// new required field
|
||||
reqFields |= mask
|
||||
required--
|
||||
}
|
||||
} else {
|
||||
// This is imprecise. It can be fooled by a required field
|
||||
// with a tag > 64 that is encoded twice; that's very rare.
|
||||
// A fully correct implementation would require allocating
|
||||
// a data structure, which we would like to avoid.
|
||||
required--
|
||||
}
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
if is_group {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if state.err != nil {
|
||||
return state.err
|
||||
}
|
||||
if required > 0 {
|
||||
// Not enough information to determine the exact field. If we use extra
|
||||
// CPU, we could determine the field only if the missing required field
|
||||
// has a tag <= 64 and we check reqFields.
|
||||
return &RequiredNotSetError{"{Unknown}"}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Individual type decoders
|
||||
// For each,
|
||||
// u is the decoded value,
|
||||
// v is a pointer to the field (pointer) in the struct
|
||||
|
||||
// Sizes of the pools to allocate inside the Buffer.
|
||||
// The goal is modest amortization and allocation
|
||||
// on at least 16-byte boundaries.
|
||||
const (
|
||||
boolPoolSize = 16
|
||||
uint32PoolSize = 8
|
||||
uint64PoolSize = 4
|
||||
)
|
||||
|
||||
// Decode a bool.
|
||||
func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(o.bools) == 0 {
|
||||
o.bools = make([]bool, boolPoolSize)
|
||||
}
|
||||
o.bools[0] = u != 0
|
||||
*structPointer_Bool(base, p.field) = &o.bools[0]
|
||||
o.bools = o.bools[1:]
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*structPointer_BoolVal(base, p.field) = u != 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode an int32.
|
||||
func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode an int64.
|
||||
func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
word64_Set(structPointer_Word64(base, p.field), o, u)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a string.
|
||||
func (o *Buffer) dec_string(p *Properties, base structPointer) error {
|
||||
s, err := o.DecodeStringBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*structPointer_String(base, p.field) = &s
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
|
||||
s, err := o.DecodeStringBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*structPointer_StringVal(base, p.field) = s
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of bytes ([]byte).
|
||||
func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
|
||||
b, err := o.DecodeRawBytes(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*structPointer_Bytes(base, p.field) = b
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of bools ([]bool).
|
||||
func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v := structPointer_BoolSlice(base, p.field)
|
||||
*v = append(*v, u != 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of bools ([]bool) in packed format.
|
||||
func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
|
||||
v := structPointer_BoolSlice(base, p.field)
|
||||
|
||||
nn, err := o.DecodeVarint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nb := int(nn) // number of bytes of encoded bools
|
||||
|
||||
y := *v
|
||||
for i := 0; i < nb; i++ {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
y = append(y, u != 0)
|
||||
}
|
||||
|
||||
*v = y
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of int32s ([]int32).
|
||||
func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
structPointer_Word32Slice(base, p.field).Append(uint32(u))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of int32s ([]int32) in packed format.
|
||||
func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
|
||||
v := structPointer_Word32Slice(base, p.field)
|
||||
|
||||
nn, err := o.DecodeVarint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nb := int(nn) // number of bytes of encoded int32s
|
||||
|
||||
fin := o.index + nb
|
||||
if fin < o.index {
|
||||
return errOverflow
|
||||
}
|
||||
for o.index < fin {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Append(uint32(u))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of int64s ([]int64).
|
||||
func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
structPointer_Word64Slice(base, p.field).Append(u)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of int64s ([]int64) in packed format.
|
||||
func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
|
||||
v := structPointer_Word64Slice(base, p.field)
|
||||
|
||||
nn, err := o.DecodeVarint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nb := int(nn) // number of bytes of encoded int64s
|
||||
|
||||
fin := o.index + nb
|
||||
if fin < o.index {
|
||||
return errOverflow
|
||||
}
|
||||
for o.index < fin {
|
||||
u, err := p.valDec(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Append(u)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of strings ([]string).
|
||||
func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
|
||||
s, err := o.DecodeStringBytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v := structPointer_StringSlice(base, p.field)
|
||||
*v = append(*v, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a slice of slice of bytes ([][]byte).
|
||||
func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
|
||||
b, err := o.DecodeRawBytes(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v := structPointer_BytesSlice(base, p.field)
|
||||
*v = append(*v, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a map field.
|
||||
func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
|
||||
raw, err := o.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
oi := o.index // index at the end of this map entry
|
||||
o.index -= len(raw) // move buffer back to start of map entry
|
||||
|
||||
mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
|
||||
if mptr.Elem().IsNil() {
|
||||
mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
|
||||
}
|
||||
v := mptr.Elem() // map[K]V
|
||||
|
||||
// Prepare addressable doubly-indirect placeholders for the key and value types.
|
||||
// See enc_new_map for why.
|
||||
keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
|
||||
keybase := toStructPointer(keyptr.Addr()) // **K
|
||||
|
||||
var valbase structPointer
|
||||
var valptr reflect.Value
|
||||
switch p.mtype.Elem().Kind() {
|
||||
case reflect.Slice:
|
||||
// []byte
|
||||
var dummy []byte
|
||||
valptr = reflect.ValueOf(&dummy) // *[]byte
|
||||
valbase = toStructPointer(valptr) // *[]byte
|
||||
case reflect.Ptr:
|
||||
// message; valptr is **Msg; need to allocate the intermediate pointer
|
||||
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
|
||||
valptr.Set(reflect.New(valptr.Type().Elem()))
|
||||
valbase = toStructPointer(valptr)
|
||||
default:
|
||||
// everything else
|
||||
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
|
||||
valbase = toStructPointer(valptr.Addr()) // **V
|
||||
}
|
||||
|
||||
// Decode.
|
||||
// This parses a restricted wire format, namely the encoding of a message
|
||||
// with two fields. See enc_new_map for the format.
|
||||
for o.index < oi {
|
||||
// tagcode for key and value properties are always a single byte
|
||||
// because they have tags 1 and 2.
|
||||
tagcode := o.buf[o.index]
|
||||
o.index++
|
||||
switch tagcode {
|
||||
case p.mkeyprop.tagcode[0]:
|
||||
if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
|
||||
return err
|
||||
}
|
||||
case p.mvalprop.tagcode[0]:
|
||||
if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
// TODO: Should we silently skip this instead?
|
||||
return fmt.Errorf("proto: bad map data tag %d", raw[0])
|
||||
}
|
||||
}
|
||||
keyelem, valelem := keyptr.Elem(), valptr.Elem()
|
||||
if !keyelem.IsValid() || !valelem.IsValid() {
|
||||
// We did not decode the key or the value in the map entry.
|
||||
// Either way, it's an invalid map entry.
|
||||
return fmt.Errorf("proto: bad map data: missing key/val")
|
||||
}
|
||||
|
||||
v.SetMapIndex(keyelem, valelem)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a group.
|
||||
func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
|
||||
bas := structPointer_GetStructPointer(base, p.field)
|
||||
if structPointer_IsNil(bas) {
|
||||
// allocate new nested message
|
||||
bas = toStructPointer(reflect.New(p.stype))
|
||||
structPointer_SetStructPointer(base, p.field, bas)
|
||||
}
|
||||
return o.unmarshalType(p.stype, p.sprop, true, bas)
|
||||
}
|
||||
|
||||
// Decode an embedded message.
|
||||
func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
|
||||
raw, e := o.DecodeRawBytes(false)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
|
||||
bas := structPointer_GetStructPointer(base, p.field)
|
||||
if structPointer_IsNil(bas) {
|
||||
// allocate new nested message
|
||||
bas = toStructPointer(reflect.New(p.stype))
|
||||
structPointer_SetStructPointer(base, p.field, bas)
|
||||
}
|
||||
|
||||
// If the object can unmarshal itself, let it.
|
||||
if p.isUnmarshaler {
|
||||
iv := structPointer_Interface(bas, p.stype)
|
||||
return iv.(Unmarshaler).Unmarshal(raw)
|
||||
}
|
||||
|
||||
obuf := o.buf
|
||||
oi := o.index
|
||||
o.buf = raw
|
||||
o.index = 0
|
||||
|
||||
err = o.unmarshalType(p.stype, p.sprop, false, bas)
|
||||
o.buf = obuf
|
||||
o.index = oi
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode a slice of embedded messages.
|
||||
func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
|
||||
return o.dec_slice_struct(p, false, base)
|
||||
}
|
||||
|
||||
// Decode a slice of embedded groups.
|
||||
func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
|
||||
return o.dec_slice_struct(p, true, base)
|
||||
}
|
||||
|
||||
// Decode a slice of structs ([]*struct).
|
||||
func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
|
||||
v := reflect.New(p.stype)
|
||||
bas := toStructPointer(v)
|
||||
structPointer_StructPointerSlice(base, p.field).Append(bas)
|
||||
|
||||
if is_group {
|
||||
err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
|
||||
return err
|
||||
}
|
||||
|
||||
raw, err := o.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the object can unmarshal itself, let it.
|
||||
if p.isUnmarshaler {
|
||||
iv := v.Interface()
|
||||
return iv.(Unmarshaler).Unmarshal(raw)
|
||||
}
|
||||
|
||||
obuf := o.buf
|
||||
oi := o.index
|
||||
o.buf = raw
|
||||
o.index = 0
|
||||
|
||||
err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
|
||||
|
||||
o.buf = obuf
|
||||
o.index = oi
|
||||
|
||||
// Slow workaround for messages that aren't Unmarshalers.
|
||||
// This includes some hand-coded .pb.go files and
|
||||
// bootstrap protos.
|
||||
// TODO: fix all of those and then add Unmarshal to
|
||||
// the Message interface. Then:
|
||||
// The cast above and code below can be deleted.
|
||||
// The old unmarshaler can be deleted.
|
||||
// Clients can call Unmarshal directly (can already do that, actually).
|
||||
var info InternalMessageInfo
|
||||
err := info.Unmarshal(pb, p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
|
|
350
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
Normal file
350
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
Normal file
|
@ -0,0 +1,350 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type generatedDiscarder interface {
|
||||
XXX_DiscardUnknown()
|
||||
}
|
||||
|
||||
// DiscardUnknown recursively discards all unknown fields from this message
|
||||
// and all embedded messages.
|
||||
//
|
||||
// When unmarshaling a message with unrecognized fields, the tags and values
|
||||
// of such fields are preserved in the Message. This allows a later call to
|
||||
// marshal to be able to produce a message that continues to have those
|
||||
// unrecognized fields. To avoid this, DiscardUnknown is used to
|
||||
// explicitly clear the unknown fields after unmarshaling.
|
||||
//
|
||||
// For proto2 messages, the unknown fields of message extensions are only
|
||||
// discarded from messages that have been accessed via GetExtension.
|
||||
func DiscardUnknown(m Message) {
|
||||
if m, ok := m.(generatedDiscarder); ok {
|
||||
m.XXX_DiscardUnknown()
|
||||
return
|
||||
}
|
||||
// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
|
||||
// but the master branch has no implementation for InternalMessageInfo,
|
||||
// so it would be more work to replicate that approach.
|
||||
discardLegacy(m)
|
||||
}
|
||||
|
||||
// DiscardUnknown recursively discards all unknown fields.
|
||||
func (a *InternalMessageInfo) DiscardUnknown(m Message) {
|
||||
di := atomicLoadDiscardInfo(&a.discard)
|
||||
if di == nil {
|
||||
di = getDiscardInfo(reflect.TypeOf(m).Elem())
|
||||
atomicStoreDiscardInfo(&a.discard, di)
|
||||
}
|
||||
di.discard(toPointer(&m))
|
||||
}
|
||||
|
||||
type discardInfo struct {
|
||||
typ reflect.Type
|
||||
|
||||
initialized int32 // 0: only typ is valid, 1: everything is valid
|
||||
lock sync.Mutex
|
||||
|
||||
fields []discardFieldInfo
|
||||
unrecognized field
|
||||
}
|
||||
|
||||
type discardFieldInfo struct {
|
||||
field field // Offset of field, guaranteed to be valid
|
||||
discard func(src pointer)
|
||||
}
|
||||
|
||||
var (
|
||||
discardInfoMap = map[reflect.Type]*discardInfo{}
|
||||
discardInfoLock sync.Mutex
|
||||
)
|
||||
|
||||
func getDiscardInfo(t reflect.Type) *discardInfo {
|
||||
discardInfoLock.Lock()
|
||||
defer discardInfoLock.Unlock()
|
||||
di := discardInfoMap[t]
|
||||
if di == nil {
|
||||
di = &discardInfo{typ: t}
|
||||
discardInfoMap[t] = di
|
||||
}
|
||||
return di
|
||||
}
|
||||
|
||||
func (di *discardInfo) discard(src pointer) {
|
||||
if src.isNil() {
|
||||
return // Nothing to do.
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&di.initialized) == 0 {
|
||||
di.computeDiscardInfo()
|
||||
}
|
||||
|
||||
for _, fi := range di.fields {
|
||||
sfp := src.offset(fi.field)
|
||||
fi.discard(sfp)
|
||||
}
|
||||
|
||||
// For proto2 messages, only discard unknown fields in message extensions
|
||||
// that have been accessed via GetExtension.
|
||||
if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
|
||||
// Ignore lock since DiscardUnknown is not concurrency safe.
|
||||
emm, _ := em.extensionsRead()
|
||||
for _, mx := range emm {
|
||||
if m, ok := mx.value.(Message); ok {
|
||||
DiscardUnknown(m)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if di.unrecognized.IsValid() {
|
||||
*src.offset(di.unrecognized).toBytes() = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (di *discardInfo) computeDiscardInfo() {
|
||||
di.lock.Lock()
|
||||
defer di.lock.Unlock()
|
||||
if di.initialized != 0 {
|
||||
return
|
||||
}
|
||||
t := di.typ
|
||||
n := t.NumField()
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
|
||||
dfi := discardFieldInfo{field: toField(&f)}
|
||||
tf := f.Type
|
||||
|
||||
// Unwrap tf to get its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
di := getDiscardInfo(tf)
|
||||
dfi.discard = func(src pointer) {
|
||||
sps := src.getPointerSlice()
|
||||
for _, sp := range sps {
|
||||
if !sp.isNil() {
|
||||
di.discard(sp)
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
di := getDiscardInfo(tf)
|
||||
dfi.discard = func(src pointer) {
|
||||
sp := src.getPointer()
|
||||
if !sp.isNil() {
|
||||
di.discard(sp)
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
|
||||
default: // E.g., map[K]V
|
||||
if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
|
||||
dfi.discard = func(src pointer) {
|
||||
sm := src.asPointerTo(tf).Elem()
|
||||
if sm.Len() == 0 {
|
||||
return
|
||||
}
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
DiscardUnknown(val.Interface().(Message))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
dfi.discard = func(pointer) {} // Noop
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
|
||||
default: // E.g., interface{}
|
||||
// TODO: Make this faster?
|
||||
dfi.discard = func(src pointer) {
|
||||
su := src.asPointerTo(tf).Elem()
|
||||
if !su.IsNil() {
|
||||
sv := su.Elem().Elem().Field(0)
|
||||
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
||||
return
|
||||
}
|
||||
switch sv.Type().Kind() {
|
||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||
DiscardUnknown(sv.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
continue
|
||||
}
|
||||
di.fields = append(di.fields, dfi)
|
||||
}
|
||||
|
||||
di.unrecognized = invalidField
|
||||
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
|
||||
if f.Type != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
di.unrecognized = toField(&f)
|
||||
}
|
||||
|
||||
atomic.StoreInt32(&di.initialized, 1)
|
||||
}
|
||||
|
||||
func discardLegacy(m Message) {
|
||||
v := reflect.ValueOf(m)
|
||||
if v.Kind() != reflect.Ptr || v.IsNil() {
|
||||
return
|
||||
}
|
||||
v = v.Elem()
|
||||
if v.Kind() != reflect.Struct {
|
||||
return
|
||||
}
|
||||
t := v.Type()
|
||||
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
vf := v.Field(i)
|
||||
tf := f.Type
|
||||
|
||||
// Unwrap tf to get its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
for j := 0; j < vf.Len(); j++ {
|
||||
discardLegacy(vf.Index(j).Interface().(Message))
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
discardLegacy(vf.Interface().(Message))
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
|
||||
default: // E.g., map[K]V
|
||||
tv := vf.Type().Elem()
|
||||
if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
|
||||
for _, key := range vf.MapKeys() {
|
||||
val := vf.MapIndex(key)
|
||||
discardLegacy(val.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
|
||||
default: // E.g., test_proto.isCommunique_Union interface
|
||||
if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
|
||||
vf = vf.Elem() // E.g., *test_proto.Communique_Msg
|
||||
if !vf.IsNil() {
|
||||
vf = vf.Elem() // E.g., test_proto.Communique_Msg
|
||||
vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
|
||||
if vf.Kind() == reflect.Ptr {
|
||||
discardLegacy(vf.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
|
||||
if vf.Type() != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
vf.Set(reflect.ValueOf([]byte(nil)))
|
||||
}
|
||||
|
||||
// For proto2 messages, only discard unknown fields in message extensions
|
||||
// that have been accessed via GetExtension.
|
||||
if em, err := extendable(m); err == nil {
|
||||
// Ignore lock since discardLegacy is not concurrency safe.
|
||||
emm, _ := em.extensionsRead()
|
||||
for _, mx := range emm {
|
||||
if m, ok := mx.value.(Message); ok {
|
||||
discardLegacy(m)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
1181
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
1181
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
File diff suppressed because it is too large
Load diff
93
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
93
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
|
@ -30,7 +30,6 @@
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer comparison.
|
||||
// TODO: MessageSet.
|
||||
|
||||
package proto
|
||||
|
||||
|
@ -51,15 +50,21 @@ Equality is defined in this way:
|
|||
are equal, and extensions sets are equal.
|
||||
- Two set scalar fields are equal iff their values are equal.
|
||||
If the fields are of a floating-point type, remember that
|
||||
NaN != x for all x, including NaN.
|
||||
NaN != x for all x, including NaN. If the message is defined
|
||||
in a proto3 .proto file, fields are not "set"; specifically,
|
||||
zero length proto3 "bytes" fields are equal (nil == {}).
|
||||
- Two repeated fields are equal iff their lengths are the same,
|
||||
and their corresponding elements are equal (a "bytes" field,
|
||||
although represented by []byte, is not a repeated field)
|
||||
and their corresponding elements are equal. Note a "bytes" field,
|
||||
although represented by []byte, is not a repeated field and the
|
||||
rule for the scalar fields described above applies.
|
||||
- Two unset fields are equal.
|
||||
- Two unknown field sets are equal if their current
|
||||
encoded state is equal.
|
||||
- Two extension sets are equal iff they have corresponding
|
||||
elements that are pairwise equal.
|
||||
- Two map fields are equal iff their lengths are the same,
|
||||
and they contain the same set of elements. Zero-length map
|
||||
fields are equal.
|
||||
- Every other combination of things are not equal.
|
||||
|
||||
The return value is undefined if a and b are not protocol buffers.
|
||||
|
@ -89,6 +94,7 @@ func Equal(a, b Message) bool {
|
|||
|
||||
// v1 and v2 are known to have the same type.
|
||||
func equalStruct(v1, v2 reflect.Value) bool {
|
||||
sprop := GetProperties(v1.Type())
|
||||
for i := 0; i < v1.NumField(); i++ {
|
||||
f := v1.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
|
@ -103,25 +109,23 @@ func equalStruct(v1, v2 reflect.Value) bool {
|
|||
// set/unset mismatch
|
||||
return false
|
||||
}
|
||||
b1, ok := f1.Interface().(raw)
|
||||
if ok {
|
||||
b2 := f2.Interface().(raw)
|
||||
// RawMessage
|
||||
if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
|
||||
return false
|
||||
}
|
||||
continue
|
||||
}
|
||||
f1, f2 = f1.Elem(), f2.Elem()
|
||||
}
|
||||
if !equalAny(f1, f2) {
|
||||
if !equalAny(f1, f2, sprop.Prop[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
|
||||
em2 := v2.FieldByName("XXX_InternalExtensions")
|
||||
if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
|
||||
em2 := v2.FieldByName("XXX_extensions")
|
||||
if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
|
||||
if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -133,15 +137,12 @@ func equalStruct(v1, v2 reflect.Value) bool {
|
|||
|
||||
u1 := uf.Bytes()
|
||||
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
|
||||
if !bytes.Equal(u1, u2) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
return bytes.Equal(u1, u2)
|
||||
}
|
||||
|
||||
// v1 and v2 are known to have the same type.
|
||||
func equalAny(v1, v2 reflect.Value) bool {
|
||||
// prop may be nil.
|
||||
func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
|
||||
if v1.Type() == protoMessageType {
|
||||
m1, _ := v1.Interface().(Message)
|
||||
m2, _ := v2.Interface().(Message)
|
||||
|
@ -164,7 +165,7 @@ func equalAny(v1, v2 reflect.Value) bool {
|
|||
if e1.Type() != e2.Type() {
|
||||
return false
|
||||
}
|
||||
return equalAny(e1, e2)
|
||||
return equalAny(e1, e2, nil)
|
||||
case reflect.Map:
|
||||
if v1.Len() != v2.Len() {
|
||||
return false
|
||||
|
@ -175,16 +176,29 @@ func equalAny(v1, v2 reflect.Value) bool {
|
|||
// This key was not found in the second map.
|
||||
return false
|
||||
}
|
||||
if !equalAny(v1.MapIndex(key), val2) {
|
||||
if !equalAny(v1.MapIndex(key), val2, nil) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.Ptr:
|
||||
return equalAny(v1.Elem(), v2.Elem())
|
||||
// Maps may have nil values in them, so check for nil.
|
||||
if v1.IsNil() && v2.IsNil() {
|
||||
return true
|
||||
}
|
||||
if v1.IsNil() != v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
return equalAny(v1.Elem(), v2.Elem(), prop)
|
||||
case reflect.Slice:
|
||||
if v1.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// short circuit: []byte
|
||||
|
||||
// Edge case: if this is in a proto3 message, a zero length
|
||||
// bytes field is considered the zero value.
|
||||
if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
|
||||
return true
|
||||
}
|
||||
if v1.IsNil() != v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
|
@ -195,7 +209,7 @@ func equalAny(v1, v2 reflect.Value) bool {
|
|||
return false
|
||||
}
|
||||
for i := 0; i < v1.Len(); i++ {
|
||||
if !equalAny(v1.Index(i), v2.Index(i)) {
|
||||
if !equalAny(v1.Index(i), v2.Index(i), prop) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -214,8 +228,14 @@ func equalAny(v1, v2 reflect.Value) bool {
|
|||
}
|
||||
|
||||
// base is the struct type that the extensions are based on.
|
||||
// em1 and em2 are extension maps.
|
||||
func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
||||
// x1 and x2 are InternalExtensions.
|
||||
func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
|
||||
em1, _ := x1.extensionsRead()
|
||||
em2, _ := x2.extensionsRead()
|
||||
return equalExtMap(base, em1, em2)
|
||||
}
|
||||
|
||||
func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
||||
if len(em1) != len(em2) {
|
||||
return false
|
||||
}
|
||||
|
@ -228,9 +248,18 @@ func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
|||
|
||||
m1, m2 := e1.value, e2.value
|
||||
|
||||
if m1 == nil && m2 == nil {
|
||||
// Both have only encoded form.
|
||||
if bytes.Equal(e1.enc, e2.enc) {
|
||||
continue
|
||||
}
|
||||
// The bytes are different, but the extensions might still be
|
||||
// equal. We need to decode them to compare.
|
||||
}
|
||||
|
||||
if m1 != nil && m2 != nil {
|
||||
// Both are unencoded.
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) {
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||
return false
|
||||
}
|
||||
continue
|
||||
|
@ -243,8 +272,12 @@ func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
|||
desc = m[extNum]
|
||||
}
|
||||
if desc == nil {
|
||||
// If both have only encoded form and the bytes are the same,
|
||||
// it is handled above. We get here when the bytes are different.
|
||||
// We don't know how to decode it, so just compare them as byte
|
||||
// slices.
|
||||
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
|
||||
continue
|
||||
return false
|
||||
}
|
||||
var err error
|
||||
if m1 == nil {
|
||||
|
@ -258,7 +291,7 @@ func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
|||
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
|
||||
return false
|
||||
}
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) {
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
327
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
327
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
|
@ -38,6 +38,7 @@ package proto
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
@ -52,14 +53,111 @@ type ExtensionRange struct {
|
|||
Start, End int32 // both inclusive
|
||||
}
|
||||
|
||||
// extendableProto is an interface implemented by any protocol buffer that may be extended.
|
||||
// extendableProto is an interface implemented by any protocol buffer generated by the current
|
||||
// proto compiler that may be extended.
|
||||
type extendableProto interface {
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
extensionsWrite() map[int32]Extension
|
||||
extensionsRead() (map[int32]Extension, sync.Locker)
|
||||
}
|
||||
|
||||
// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
|
||||
// version of the proto compiler that may be extended.
|
||||
type extendableProtoV1 interface {
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
ExtensionMap() map[int32]Extension
|
||||
}
|
||||
|
||||
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
|
||||
// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
|
||||
type extensionAdapter struct {
|
||||
extendableProtoV1
|
||||
}
|
||||
|
||||
func (e extensionAdapter) extensionsWrite() map[int32]Extension {
|
||||
return e.ExtensionMap()
|
||||
}
|
||||
|
||||
func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
|
||||
return e.ExtensionMap(), notLocker{}
|
||||
}
|
||||
|
||||
// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
|
||||
type notLocker struct{}
|
||||
|
||||
func (n notLocker) Lock() {}
|
||||
func (n notLocker) Unlock() {}
|
||||
|
||||
// extendable returns the extendableProto interface for the given generated proto message.
|
||||
// If the proto message has the old extension format, it returns a wrapper that implements
|
||||
// the extendableProto interface.
|
||||
func extendable(p interface{}) (extendableProto, error) {
|
||||
switch p := p.(type) {
|
||||
case extendableProto:
|
||||
if isNilPtr(p) {
|
||||
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
|
||||
}
|
||||
return p, nil
|
||||
case extendableProtoV1:
|
||||
if isNilPtr(p) {
|
||||
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
|
||||
}
|
||||
return extensionAdapter{p}, nil
|
||||
}
|
||||
// Don't allocate a specific error containing %T:
|
||||
// this is the hot path for Clone and MarshalText.
|
||||
return nil, errNotExtendable
|
||||
}
|
||||
|
||||
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
|
||||
|
||||
func isNilPtr(x interface{}) bool {
|
||||
v := reflect.ValueOf(x)
|
||||
return v.Kind() == reflect.Ptr && v.IsNil()
|
||||
}
|
||||
|
||||
// XXX_InternalExtensions is an internal representation of proto extensions.
|
||||
//
|
||||
// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
|
||||
// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
|
||||
//
|
||||
// The methods of XXX_InternalExtensions are not concurrency safe in general,
|
||||
// but calls to logically read-only methods such as has and get may be executed concurrently.
|
||||
type XXX_InternalExtensions struct {
|
||||
// The struct must be indirect so that if a user inadvertently copies a
|
||||
// generated message and its embedded XXX_InternalExtensions, they
|
||||
// avoid the mayhem of a copied mutex.
|
||||
//
|
||||
// The mutex serializes all logically read-only operations to p.extensionMap.
|
||||
// It is up to the client to ensure that write operations to p.extensionMap are
|
||||
// mutually exclusive with other accesses.
|
||||
p *struct {
|
||||
mu sync.Mutex
|
||||
extensionMap map[int32]Extension
|
||||
}
|
||||
}
|
||||
|
||||
// extensionsWrite returns the extension map, creating it on first use.
|
||||
func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
|
||||
if e.p == nil {
|
||||
e.p = new(struct {
|
||||
mu sync.Mutex
|
||||
extensionMap map[int32]Extension
|
||||
})
|
||||
e.p.extensionMap = make(map[int32]Extension)
|
||||
}
|
||||
return e.p.extensionMap
|
||||
}
|
||||
|
||||
// extensionsRead returns the extensions map for read-only use. It may be nil.
|
||||
// The caller must hold the returned mutex's lock when accessing Elements within the map.
|
||||
func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
|
||||
if e.p == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return e.p.extensionMap, &e.p.mu
|
||||
}
|
||||
|
||||
// ExtensionDesc represents an extension specification.
|
||||
// Used in generated code from the protocol compiler.
|
||||
|
@ -69,6 +167,7 @@ type ExtensionDesc struct {
|
|||
Field int32 // field number
|
||||
Name string // fully-qualified name of extension, for text formatting
|
||||
Tag string // protobuf tag style
|
||||
Filename string // name of the file in which the extension is defined
|
||||
}
|
||||
|
||||
func (ed *ExtensionDesc) repeated() bool {
|
||||
|
@ -92,8 +191,13 @@ type Extension struct {
|
|||
}
|
||||
|
||||
// SetRawExtension is for testing only.
|
||||
func SetRawExtension(base extendableProto, id int32, b []byte) {
|
||||
base.ExtensionMap()[id] = Extension{enc: b}
|
||||
func SetRawExtension(base Message, id int32, b []byte) {
|
||||
epb, err := extendable(base)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
extmap := epb.extensionsWrite()
|
||||
extmap[id] = Extension{enc: b}
|
||||
}
|
||||
|
||||
// isExtensionField returns true iff the given field number is in an extension range.
|
||||
|
@ -108,9 +212,13 @@ func isExtensionField(pb extendableProto, field int32) bool {
|
|||
|
||||
// checkExtensionTypes checks that the given extension is valid for pb.
|
||||
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
|
||||
var pbi interface{} = pb
|
||||
// Check the extended type.
|
||||
if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
|
||||
return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
|
||||
if ea, ok := pbi.(extensionAdapter); ok {
|
||||
pbi = ea.extendableProtoV1
|
||||
}
|
||||
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
|
||||
return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
|
||||
}
|
||||
// Check the range.
|
||||
if !isExtensionField(pb, extension.Field) {
|
||||
|
@ -155,80 +263,62 @@ func extensionProperties(ed *ExtensionDesc) *Properties {
|
|||
return prop
|
||||
}
|
||||
|
||||
// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
|
||||
func encodeExtensionMap(m map[int32]Extension) error {
|
||||
for k, e := range m {
|
||||
if e.value == nil || e.desc == nil {
|
||||
// Extension is only in its encoded form.
|
||||
continue
|
||||
}
|
||||
|
||||
// We don't skip extensions that have an encoded form set,
|
||||
// because the extension value may have been mutated after
|
||||
// the last time this function was called.
|
||||
|
||||
et := reflect.TypeOf(e.desc.ExtensionType)
|
||||
props := extensionProperties(e.desc)
|
||||
|
||||
p := NewBuffer(nil)
|
||||
// If e.value has type T, the encoder expects a *struct{ X T }.
|
||||
// Pass a *T with a zero field and hope it all works out.
|
||||
x := reflect.New(et)
|
||||
x.Elem().Set(reflect.ValueOf(e.value))
|
||||
if err := props.enc(p, props, toStructPointer(x)); err != nil {
|
||||
return err
|
||||
}
|
||||
e.enc = p.buf
|
||||
m[k] = e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func sizeExtensionMap(m map[int32]Extension) (n int) {
|
||||
for _, e := range m {
|
||||
if e.value == nil || e.desc == nil {
|
||||
// Extension is only in its encoded form.
|
||||
n += len(e.enc)
|
||||
continue
|
||||
}
|
||||
|
||||
// We don't skip extensions that have an encoded form set,
|
||||
// because the extension value may have been mutated after
|
||||
// the last time this function was called.
|
||||
|
||||
et := reflect.TypeOf(e.desc.ExtensionType)
|
||||
props := extensionProperties(e.desc)
|
||||
|
||||
// If e.value has type T, the encoder expects a *struct{ X T }.
|
||||
// Pass a *T with a zero field and hope it all works out.
|
||||
x := reflect.New(et)
|
||||
x.Elem().Set(reflect.ValueOf(e.value))
|
||||
n += props.size(props, toStructPointer(x))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// HasExtension returns whether the given extension is present in pb.
|
||||
func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
|
||||
func HasExtension(pb Message, extension *ExtensionDesc) bool {
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
_, ok := pb.ExtensionMap()[extension.Field]
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
extmap, mu := epb.extensionsRead()
|
||||
if extmap == nil {
|
||||
return false
|
||||
}
|
||||
mu.Lock()
|
||||
_, ok := extmap[extension.Field]
|
||||
mu.Unlock()
|
||||
return ok
|
||||
}
|
||||
|
||||
// ClearExtension removes the given extension from pb.
|
||||
func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
|
||||
func ClearExtension(pb Message, extension *ExtensionDesc) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
delete(pb.ExtensionMap(), extension.Field)
|
||||
extmap := epb.extensionsWrite()
|
||||
delete(extmap, extension.Field)
|
||||
}
|
||||
|
||||
// GetExtension parses and returns the given extension of pb.
|
||||
// If the extension is not present and has no default value it returns ErrMissingExtension.
|
||||
func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
|
||||
if err := checkExtensionTypes(pb, extension); err != nil {
|
||||
// GetExtension retrieves a proto2 extended field from pb.
|
||||
//
|
||||
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
|
||||
// then GetExtension parses the encoded field and returns a Go value of the specified type.
|
||||
// If the field is not present, then the default value is returned (if one is specified),
|
||||
// otherwise ErrMissingExtension is reported.
|
||||
//
|
||||
// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
|
||||
// then GetExtension returns the raw encoded bytes of the field extension.
|
||||
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
emap := pb.ExtensionMap()
|
||||
if extension.ExtendedType != nil {
|
||||
// can only check type if this is a complete descriptor
|
||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
emap, mu := epb.extensionsRead()
|
||||
if emap == nil {
|
||||
return defaultExtensionValue(extension)
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
e, ok := emap[extension.Field]
|
||||
if !ok {
|
||||
// defaultExtensionValue returns the default value or
|
||||
|
@ -247,6 +337,11 @@ func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, er
|
|||
return e.value, nil
|
||||
}
|
||||
|
||||
if extension.ExtensionType == nil {
|
||||
// incomplete descriptor
|
||||
return e.enc, nil
|
||||
}
|
||||
|
||||
v, err := decodeExtension(e.enc, extension)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -264,6 +359,11 @@ func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, er
|
|||
// defaultExtensionValue returns the default value for extension.
|
||||
// If no default for an extension is defined ErrMissingExtension is returned.
|
||||
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
|
||||
if extension.ExtensionType == nil {
|
||||
// incomplete descriptor, so no default
|
||||
return nil, ErrMissingExtension
|
||||
}
|
||||
|
||||
t := reflect.TypeOf(extension.ExtensionType)
|
||||
props := extensionProperties(extension)
|
||||
|
||||
|
@ -298,32 +398,28 @@ func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
|
|||
|
||||
// decodeExtension decodes an extension encoded in b.
|
||||
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
|
||||
o := NewBuffer(b)
|
||||
|
||||
t := reflect.TypeOf(extension.ExtensionType)
|
||||
rep := extension.repeated()
|
||||
|
||||
props := extensionProperties(extension)
|
||||
unmarshal := typeUnmarshaler(t, extension.Tag)
|
||||
|
||||
// t is a pointer to a struct, pointer to basic type or a slice.
|
||||
// Allocate a "field" to store the pointer/slice itself; the
|
||||
// pointer/slice will be stored here. We pass
|
||||
// the address of this field to props.dec.
|
||||
// This passes a zero field and a *t and lets props.dec
|
||||
// interpret it as a *struct{ x t }.
|
||||
// Allocate space to store the pointer/slice.
|
||||
value := reflect.New(t).Elem()
|
||||
|
||||
var err error
|
||||
for {
|
||||
// Discard wire type and field number varint. It isn't needed.
|
||||
if _, err := o.DecodeVarint(); err != nil {
|
||||
x, n := decodeVarint(b)
|
||||
if n == 0 {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
b = b[n:]
|
||||
wire := int(x) & 7
|
||||
|
||||
b, err = unmarshal(b, valToPointer(value.Addr()), wire)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !rep || o.index >= len(o.buf) {
|
||||
if len(b) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -333,10 +429,9 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
|
|||
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
|
||||
// The returned slice has the same length as es; missing extensions will appear as nil elements.
|
||||
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
|
||||
epb, ok := pb.(extendableProto)
|
||||
if !ok {
|
||||
err = errors.New("proto: not an extendable proto")
|
||||
return
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
extensions = make([]interface{}, len(es))
|
||||
for i, e := range es {
|
||||
|
@ -351,9 +446,44 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e
|
|||
return
|
||||
}
|
||||
|
||||
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
|
||||
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
|
||||
// just the Field field, which defines the extension's field number.
|
||||
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
registeredExtensions := RegisteredExtensions(pb)
|
||||
|
||||
emap, mu := epb.extensionsRead()
|
||||
if emap == nil {
|
||||
return nil, nil
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
extensions := make([]*ExtensionDesc, 0, len(emap))
|
||||
for extid, e := range emap {
|
||||
desc := e.desc
|
||||
if desc == nil {
|
||||
desc = registeredExtensions[extid]
|
||||
if desc == nil {
|
||||
desc = &ExtensionDesc{Field: extid}
|
||||
}
|
||||
}
|
||||
|
||||
extensions = append(extensions, desc)
|
||||
}
|
||||
return extensions, nil
|
||||
}
|
||||
|
||||
// SetExtension sets the specified extension of pb to the specified value.
|
||||
func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
|
||||
if err := checkExtensionTypes(pb, extension); err != nil {
|
||||
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||
return err
|
||||
}
|
||||
typ := reflect.TypeOf(extension.ExtensionType)
|
||||
|
@ -369,10 +499,23 @@ func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{
|
|||
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
|
||||
}
|
||||
|
||||
pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
|
||||
extmap := epb.extensionsWrite()
|
||||
extmap[extension.Field] = Extension{desc: extension, value: value}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearAllExtensions clears all extensions from pb.
|
||||
func ClearAllExtensions(pb Message) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
m := epb.extensionsWrite()
|
||||
for k := range m {
|
||||
delete(m, k)
|
||||
}
|
||||
}
|
||||
|
||||
// A global registry of extensions.
|
||||
// The generated code will register the generated descriptors by calling RegisterExtension.
|
||||
|
||||
|
|
88
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
88
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
|
@ -70,6 +70,11 @@ for a protocol buffer variable v:
|
|||
with distinguished wrapper types for each possible field value.
|
||||
- Marshal and Unmarshal are functions to encode and decode the wire format.
|
||||
|
||||
When the .proto file specifies `syntax="proto3"`, there are some differences:
|
||||
|
||||
- Non-repeated fields of non-message type are values instead of pointers.
|
||||
- Enum types do not get an Enum method.
|
||||
|
||||
The simplest way to describe this is to see an example.
|
||||
Given file test.proto, containing
|
||||
|
||||
|
@ -216,7 +221,7 @@ The resulting file, test.pb.go, is:
|
|||
|
||||
To create and play with a Test object:
|
||||
|
||||
package main
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
@ -229,6 +234,7 @@ package main
|
|||
test := &pb.Test{
|
||||
Label: proto.String("hello"),
|
||||
Type: proto.Int32(17),
|
||||
Reps: []int64{1, 2, 3},
|
||||
Optionalgroup: &pb.Test_OptionalGroup{
|
||||
RequiredField: proto.String("good bye"),
|
||||
},
|
||||
|
@ -259,6 +265,7 @@ package proto
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
|
@ -267,6 +274,8 @@ import (
|
|||
"sync"
|
||||
)
|
||||
|
||||
var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string")
|
||||
|
||||
// Message is implemented by generated protocol buffer messages.
|
||||
type Message interface {
|
||||
Reset()
|
||||
|
@ -301,18 +310,9 @@ func GetStats() Stats { return stats }
|
|||
// temporary Buffer and are fine for most applications.
|
||||
type Buffer struct {
|
||||
buf []byte // encode/decode byte stream
|
||||
index int // write point
|
||||
index int // read point
|
||||
|
||||
// pools of basic types to amortize allocation.
|
||||
bools []bool
|
||||
uint32s []uint32
|
||||
uint64s []uint64
|
||||
|
||||
// extra pools, only used with pointer_reflect.go
|
||||
int32s []int32
|
||||
int64s []int64
|
||||
float32s []float32
|
||||
float64s []float64
|
||||
deterministic bool
|
||||
}
|
||||
|
||||
// NewBuffer allocates a new Buffer and initializes its internal data to
|
||||
|
@ -337,6 +337,30 @@ func (p *Buffer) SetBuf(s []byte) {
|
|||
// Bytes returns the contents of the Buffer.
|
||||
func (p *Buffer) Bytes() []byte { return p.buf }
|
||||
|
||||
// SetDeterministic sets whether to use deterministic serialization.
|
||||
//
|
||||
// Deterministic serialization guarantees that for a given binary, equal
|
||||
// messages will always be serialized to the same bytes. This implies:
|
||||
//
|
||||
// - Repeated serialization of a message will return the same bytes.
|
||||
// - Different processes of the same binary (which may be executing on
|
||||
// different machines) will serialize equal messages to the same bytes.
|
||||
//
|
||||
// Note that the deterministic serialization is NOT canonical across
|
||||
// languages. It is not guaranteed to remain stable over time. It is unstable
|
||||
// across different builds with schema changes due to unknown fields.
|
||||
// Users who need canonical serialization (e.g., persistent storage in a
|
||||
// canonical form, fingerprinting, etc.) should define their own
|
||||
// canonicalization specification and implement their own serializer rather
|
||||
// than relying on this API.
|
||||
//
|
||||
// If deterministic serialization is requested, map entries will be sorted
|
||||
// by keys in lexographical order. This is an implementation detail and
|
||||
// subject to change.
|
||||
func (p *Buffer) SetDeterministic(deterministic bool) {
|
||||
p.deterministic = deterministic
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper routines for simplifying the creation of optional fields of basic type.
|
||||
*/
|
||||
|
@ -825,22 +849,12 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes
|
|||
return sf, false, nil
|
||||
}
|
||||
|
||||
// mapKeys returns a sort.Interface to be used for sorting the map keys.
|
||||
// Map fields may have key types of non-float scalars, strings and enums.
|
||||
// The easiest way to sort them in some deterministic order is to use fmt.
|
||||
// If this turns out to be inefficient we can always consider other options,
|
||||
// such as doing a Schwartzian transform.
|
||||
|
||||
func mapKeys(vs []reflect.Value) sort.Interface {
|
||||
s := mapKeySorter{
|
||||
vs: vs,
|
||||
// default Less function: textual comparison
|
||||
less: func(a, b reflect.Value) bool {
|
||||
return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
|
||||
},
|
||||
}
|
||||
s := mapKeySorter{vs: vs}
|
||||
|
||||
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
|
||||
// numeric keys are sorted numerically.
|
||||
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
|
||||
if len(vs) == 0 {
|
||||
return s
|
||||
}
|
||||
|
@ -849,6 +863,12 @@ func mapKeys(vs []reflect.Value) sort.Interface {
|
|||
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
|
||||
case reflect.Bool:
|
||||
s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
|
||||
case reflect.String:
|
||||
s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
|
||||
}
|
||||
|
||||
return s
|
||||
|
@ -881,3 +901,21 @@ func isProto3Zero(v reflect.Value) bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
const ProtoPackageIsVersion2 = true
|
||||
|
||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
const ProtoPackageIsVersion1 = true
|
||||
|
||||
// InternalMessageInfo is a type used internally by generated .pb.go files.
|
||||
// This type is not intended to be used by non-generated code.
|
||||
// This type is not subject to any compatibility guarantee.
|
||||
type InternalMessageInfo struct {
|
||||
marshal *marshalInfo
|
||||
unmarshal *unmarshalInfo
|
||||
merge *mergeInfo
|
||||
discard *discardInfo
|
||||
}
|
||||
|
|
139
vendor/github.com/golang/protobuf/proto/message_set.go
generated
vendored
139
vendor/github.com/golang/protobuf/proto/message_set.go
generated
vendored
|
@ -42,13 +42,14 @@ import (
|
|||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ErrNoMessageTypeId occurs when a protocol buffer does not have a message type ID.
|
||||
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
|
||||
// A message type ID is required for storing a protocol buffer in a message set.
|
||||
var ErrNoMessageTypeId = errors.New("proto does not have a message type ID")
|
||||
var errNoMessageTypeID = errors.New("proto does not have a message type ID")
|
||||
|
||||
// The first two types (_MessageSet_Item and MessageSet)
|
||||
// The first two types (_MessageSet_Item and messageSet)
|
||||
// model what the protocol compiler produces for the following protocol message:
|
||||
// message MessageSet {
|
||||
// repeated group Item = 1 {
|
||||
|
@ -58,27 +59,20 @@ var ErrNoMessageTypeId = errors.New("proto does not have a message type ID")
|
|||
// }
|
||||
// That is the MessageSet wire format. We can't use a proto to generate these
|
||||
// because that would introduce a circular dependency between it and this package.
|
||||
//
|
||||
// When a proto1 proto has a field that looks like:
|
||||
// optional message<MessageSet> info = 3;
|
||||
// the protocol compiler produces a field in the generated struct that looks like:
|
||||
// Info *_proto_.MessageSet `protobuf:"bytes,3,opt,name=info"`
|
||||
// The package is automatically inserted so there is no need for that proto file to
|
||||
// import this package.
|
||||
|
||||
type _MessageSet_Item struct {
|
||||
TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
|
||||
Message []byte `protobuf:"bytes,3,req,name=message"`
|
||||
}
|
||||
|
||||
type MessageSet struct {
|
||||
type messageSet struct {
|
||||
Item []*_MessageSet_Item `protobuf:"group,1,rep"`
|
||||
XXX_unrecognized []byte
|
||||
// TODO: caching?
|
||||
}
|
||||
|
||||
// Make sure MessageSet is a Message.
|
||||
var _ Message = (*MessageSet)(nil)
|
||||
// Make sure messageSet is a Message.
|
||||
var _ Message = (*messageSet)(nil)
|
||||
|
||||
// messageTypeIder is an interface satisfied by a protocol buffer type
|
||||
// that may be stored in a MessageSet.
|
||||
|
@ -86,7 +80,7 @@ type messageTypeIder interface {
|
|||
MessageTypeId() int32
|
||||
}
|
||||
|
||||
func (ms *MessageSet) find(pb Message) *_MessageSet_Item {
|
||||
func (ms *messageSet) find(pb Message) *_MessageSet_Item {
|
||||
mti, ok := pb.(messageTypeIder)
|
||||
if !ok {
|
||||
return nil
|
||||
|
@ -100,24 +94,21 @@ func (ms *MessageSet) find(pb Message) *_MessageSet_Item {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (ms *MessageSet) Has(pb Message) bool {
|
||||
if ms.find(pb) != nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
func (ms *messageSet) Has(pb Message) bool {
|
||||
return ms.find(pb) != nil
|
||||
}
|
||||
|
||||
func (ms *MessageSet) Unmarshal(pb Message) error {
|
||||
func (ms *messageSet) Unmarshal(pb Message) error {
|
||||
if item := ms.find(pb); item != nil {
|
||||
return Unmarshal(item.Message, pb)
|
||||
}
|
||||
if _, ok := pb.(messageTypeIder); !ok {
|
||||
return ErrNoMessageTypeId
|
||||
return errNoMessageTypeID
|
||||
}
|
||||
return nil // TODO: return error instead?
|
||||
}
|
||||
|
||||
func (ms *MessageSet) Marshal(pb Message) error {
|
||||
func (ms *messageSet) Marshal(pb Message) error {
|
||||
msg, err := Marshal(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -130,7 +121,7 @@ func (ms *MessageSet) Marshal(pb Message) error {
|
|||
|
||||
mti, ok := pb.(messageTypeIder)
|
||||
if !ok {
|
||||
return ErrNoMessageTypeId
|
||||
return errNoMessageTypeID
|
||||
}
|
||||
|
||||
mtid := mti.MessageTypeId()
|
||||
|
@ -141,9 +132,9 @@ func (ms *MessageSet) Marshal(pb Message) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (ms *MessageSet) Reset() { *ms = MessageSet{} }
|
||||
func (ms *MessageSet) String() string { return CompactTextString(ms) }
|
||||
func (*MessageSet) ProtoMessage() {}
|
||||
func (ms *messageSet) Reset() { *ms = messageSet{} }
|
||||
func (ms *messageSet) String() string { return CompactTextString(ms) }
|
||||
func (*messageSet) ProtoMessage() {}
|
||||
|
||||
// Support for the message_set_wire_format message option.
|
||||
|
||||
|
@ -156,37 +147,55 @@ func skipVarint(buf []byte) []byte {
|
|||
|
||||
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
|
||||
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
|
||||
if err := encodeExtensionMap(m); err != nil {
|
||||
return nil, err
|
||||
func MarshalMessageSet(exts interface{}) ([]byte, error) {
|
||||
return marshalMessageSet(exts, false)
|
||||
}
|
||||
|
||||
// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
|
||||
func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
|
||||
switch exts := exts.(type) {
|
||||
case *XXX_InternalExtensions:
|
||||
var u marshalInfo
|
||||
siz := u.sizeMessageSet(exts)
|
||||
b := make([]byte, 0, siz)
|
||||
return u.appendMessageSet(b, exts, deterministic)
|
||||
|
||||
case map[int32]Extension:
|
||||
// This is an old-style extension map.
|
||||
// Wrap it in a new-style XXX_InternalExtensions.
|
||||
ie := XXX_InternalExtensions{
|
||||
p: &struct {
|
||||
mu sync.Mutex
|
||||
extensionMap map[int32]Extension
|
||||
}{
|
||||
extensionMap: exts,
|
||||
},
|
||||
}
|
||||
|
||||
// Sort extension IDs to provide a deterministic encoding.
|
||||
// See also enc_map in encode.go.
|
||||
ids := make([]int, 0, len(m))
|
||||
for id := range m {
|
||||
ids = append(ids, int(id))
|
||||
}
|
||||
sort.Ints(ids)
|
||||
var u marshalInfo
|
||||
siz := u.sizeMessageSet(&ie)
|
||||
b := make([]byte, 0, siz)
|
||||
return u.appendMessageSet(b, &ie, deterministic)
|
||||
|
||||
ms := &MessageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
|
||||
for _, id := range ids {
|
||||
e := m[int32(id)]
|
||||
// Remove the wire type and field number varint, as well as the length varint.
|
||||
msg := skipVarint(skipVarint(e.enc))
|
||||
|
||||
ms.Item = append(ms.Item, &_MessageSet_Item{
|
||||
TypeId: Int32(int32(id)),
|
||||
Message: msg,
|
||||
})
|
||||
default:
|
||||
return nil, errors.New("proto: not an extension map")
|
||||
}
|
||||
return Marshal(ms)
|
||||
}
|
||||
|
||||
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
||||
// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
|
||||
ms := new(MessageSet)
|
||||
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func UnmarshalMessageSet(buf []byte, exts interface{}) error {
|
||||
var m map[int32]Extension
|
||||
switch exts := exts.(type) {
|
||||
case *XXX_InternalExtensions:
|
||||
m = exts.extensionsWrite()
|
||||
case map[int32]Extension:
|
||||
m = exts
|
||||
default:
|
||||
return errors.New("proto: not an extension map")
|
||||
}
|
||||
|
||||
ms := new(messageSet)
|
||||
if err := Unmarshal(buf, ms); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -216,7 +225,24 @@ func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
|
|||
|
||||
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
|
||||
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
|
||||
func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
|
||||
var m map[int32]Extension
|
||||
switch exts := exts.(type) {
|
||||
case *XXX_InternalExtensions:
|
||||
var mu sync.Locker
|
||||
m, mu = exts.extensionsRead()
|
||||
if m != nil {
|
||||
// Keep the extensions map locked until we're done marshaling to prevent
|
||||
// races between marshaling and unmarshaling the lazily-{en,de}coded
|
||||
// values.
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
}
|
||||
case map[int32]Extension:
|
||||
m = exts
|
||||
default:
|
||||
return nil, errors.New("proto: not an extension map")
|
||||
}
|
||||
var b bytes.Buffer
|
||||
b.WriteByte('{')
|
||||
|
||||
|
@ -229,15 +255,16 @@ func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
|
|||
|
||||
for i, id := range ids {
|
||||
ext := m[id]
|
||||
if i > 0 {
|
||||
b.WriteByte(',')
|
||||
}
|
||||
|
||||
msd, ok := messageSetMap[id]
|
||||
if !ok {
|
||||
// Unknown type; we can't render it, so skip it.
|
||||
continue
|
||||
}
|
||||
|
||||
if i > 0 && b.Len() > 1 {
|
||||
b.WriteByte(',')
|
||||
}
|
||||
|
||||
fmt.Fprintf(&b, `"[%s]":`, msd.name)
|
||||
|
||||
x := ext.value
|
||||
|
@ -259,7 +286,7 @@ func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
|
|||
|
||||
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
|
||||
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
|
||||
func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
|
||||
// Common-case fast path.
|
||||
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
|
||||
return nil
|
||||
|
|
632
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
632
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
|
@ -29,7 +29,7 @@
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build appengine
|
||||
// +build purego appengine js
|
||||
|
||||
// This file contains an implementation of proto field accesses using package reflect.
|
||||
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
|
||||
|
@ -38,32 +38,13 @@
|
|||
package proto
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A structPointer is a pointer to a struct.
|
||||
type structPointer struct {
|
||||
v reflect.Value
|
||||
}
|
||||
const unsafeAllowed = false
|
||||
|
||||
// toStructPointer returns a structPointer equivalent to the given reflect value.
|
||||
// The reflect value must itself be a pointer to a struct.
|
||||
func toStructPointer(v reflect.Value) structPointer {
|
||||
return structPointer{v}
|
||||
}
|
||||
|
||||
// IsNil reports whether p is nil.
|
||||
func structPointer_IsNil(p structPointer) bool {
|
||||
return p.v.IsNil()
|
||||
}
|
||||
|
||||
// Interface returns the struct pointer as an interface value.
|
||||
func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
|
||||
return p.v.Interface()
|
||||
}
|
||||
|
||||
// A field identifies a field in a struct, accessible from a structPointer.
|
||||
// A field identifies a field in a struct, accessible from a pointer.
|
||||
// In this implementation, a field is identified by the sequence of field indices
|
||||
// passed to reflect's FieldByIndex.
|
||||
type field []int
|
||||
|
@ -76,404 +57,301 @@ func toField(f *reflect.StructField) field {
|
|||
// invalidField is an invalid field identifier.
|
||||
var invalidField = field(nil)
|
||||
|
||||
// zeroField is a noop when calling pointer.offset.
|
||||
var zeroField = field([]int{})
|
||||
|
||||
// IsValid reports whether the field identifier is valid.
|
||||
func (f field) IsValid() bool { return f != nil }
|
||||
|
||||
// field returns the given field in the struct as a reflect value.
|
||||
func structPointer_field(p structPointer, f field) reflect.Value {
|
||||
// Special case: an extension map entry with a value of type T
|
||||
// passes a *T to the struct-handling code with a zero field,
|
||||
// expecting that it will be treated as equivalent to *struct{ X T },
|
||||
// which has the same memory layout. We have to handle that case
|
||||
// specially, because reflect will panic if we call FieldByIndex on a
|
||||
// non-struct.
|
||||
if f == nil {
|
||||
return p.v.Elem()
|
||||
}
|
||||
|
||||
return p.v.Elem().FieldByIndex(f)
|
||||
}
|
||||
|
||||
// ifield returns the given field in the struct as an interface value.
|
||||
func structPointer_ifield(p structPointer, f field) interface{} {
|
||||
return structPointer_field(p, f).Addr().Interface()
|
||||
}
|
||||
|
||||
// Bytes returns the address of a []byte field in the struct.
|
||||
func structPointer_Bytes(p structPointer, f field) *[]byte {
|
||||
return structPointer_ifield(p, f).(*[]byte)
|
||||
}
|
||||
|
||||
// BytesSlice returns the address of a [][]byte field in the struct.
|
||||
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
|
||||
return structPointer_ifield(p, f).(*[][]byte)
|
||||
}
|
||||
|
||||
// Bool returns the address of a *bool field in the struct.
|
||||
func structPointer_Bool(p structPointer, f field) **bool {
|
||||
return structPointer_ifield(p, f).(**bool)
|
||||
}
|
||||
|
||||
// BoolVal returns the address of a bool field in the struct.
|
||||
func structPointer_BoolVal(p structPointer, f field) *bool {
|
||||
return structPointer_ifield(p, f).(*bool)
|
||||
}
|
||||
|
||||
// BoolSlice returns the address of a []bool field in the struct.
|
||||
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
|
||||
return structPointer_ifield(p, f).(*[]bool)
|
||||
}
|
||||
|
||||
// String returns the address of a *string field in the struct.
|
||||
func structPointer_String(p structPointer, f field) **string {
|
||||
return structPointer_ifield(p, f).(**string)
|
||||
}
|
||||
|
||||
// StringVal returns the address of a string field in the struct.
|
||||
func structPointer_StringVal(p structPointer, f field) *string {
|
||||
return structPointer_ifield(p, f).(*string)
|
||||
}
|
||||
|
||||
// StringSlice returns the address of a []string field in the struct.
|
||||
func structPointer_StringSlice(p structPointer, f field) *[]string {
|
||||
return structPointer_ifield(p, f).(*[]string)
|
||||
}
|
||||
|
||||
// ExtMap returns the address of an extension map field in the struct.
|
||||
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
|
||||
return structPointer_ifield(p, f).(*map[int32]Extension)
|
||||
}
|
||||
|
||||
// NewAt returns the reflect.Value for a pointer to a field in the struct.
|
||||
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
|
||||
return structPointer_field(p, f).Addr()
|
||||
}
|
||||
|
||||
// SetStructPointer writes a *struct field in the struct.
|
||||
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
|
||||
structPointer_field(p, f).Set(q.v)
|
||||
}
|
||||
|
||||
// GetStructPointer reads a *struct field in the struct.
|
||||
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
|
||||
return structPointer{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// StructPointerSlice the address of a []*struct field in the struct.
|
||||
func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
|
||||
return structPointerSlice{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// A structPointerSlice represents the address of a slice of pointers to structs
|
||||
// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
|
||||
type structPointerSlice struct {
|
||||
// The pointer type is for the table-driven decoder.
|
||||
// The implementation here uses a reflect.Value of pointer type to
|
||||
// create a generic pointer. In pointer_unsafe.go we use unsafe
|
||||
// instead of reflect to implement the same (but faster) interface.
|
||||
type pointer struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
func (p structPointerSlice) Len() int { return p.v.Len() }
|
||||
func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
|
||||
func (p structPointerSlice) Append(q structPointer) {
|
||||
p.v.Set(reflect.Append(p.v, q.v))
|
||||
// toPointer converts an interface of pointer type to a pointer
|
||||
// that points to the same target.
|
||||
func toPointer(i *Message) pointer {
|
||||
return pointer{v: reflect.ValueOf(*i)}
|
||||
}
|
||||
|
||||
var (
|
||||
int32Type = reflect.TypeOf(int32(0))
|
||||
uint32Type = reflect.TypeOf(uint32(0))
|
||||
float32Type = reflect.TypeOf(float32(0))
|
||||
int64Type = reflect.TypeOf(int64(0))
|
||||
uint64Type = reflect.TypeOf(uint64(0))
|
||||
float64Type = reflect.TypeOf(float64(0))
|
||||
)
|
||||
|
||||
// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
|
||||
// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
|
||||
type word32 struct {
|
||||
v reflect.Value
|
||||
// toAddrPointer converts an interface to a pointer that points to
|
||||
// the interface data.
|
||||
func toAddrPointer(i *interface{}, isptr bool) pointer {
|
||||
v := reflect.ValueOf(*i)
|
||||
u := reflect.New(v.Type())
|
||||
u.Elem().Set(v)
|
||||
return pointer{v: u}
|
||||
}
|
||||
|
||||
// IsNil reports whether p is nil.
|
||||
func word32_IsNil(p word32) bool {
|
||||
// valToPointer converts v to a pointer. v must be of pointer type.
|
||||
func valToPointer(v reflect.Value) pointer {
|
||||
return pointer{v: v}
|
||||
}
|
||||
|
||||
// offset converts from a pointer to a structure to a pointer to
|
||||
// one of its fields.
|
||||
func (p pointer) offset(f field) pointer {
|
||||
return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
|
||||
}
|
||||
|
||||
func (p pointer) isNil() bool {
|
||||
return p.v.IsNil()
|
||||
}
|
||||
|
||||
// Set sets p to point at a newly allocated word with bits set to x.
|
||||
func word32_Set(p word32, o *Buffer, x uint32) {
|
||||
t := p.v.Type().Elem()
|
||||
switch t {
|
||||
case int32Type:
|
||||
if len(o.int32s) == 0 {
|
||||
o.int32s = make([]int32, uint32PoolSize)
|
||||
}
|
||||
o.int32s[0] = int32(x)
|
||||
p.v.Set(reflect.ValueOf(&o.int32s[0]))
|
||||
o.int32s = o.int32s[1:]
|
||||
return
|
||||
case uint32Type:
|
||||
if len(o.uint32s) == 0 {
|
||||
o.uint32s = make([]uint32, uint32PoolSize)
|
||||
}
|
||||
o.uint32s[0] = x
|
||||
p.v.Set(reflect.ValueOf(&o.uint32s[0]))
|
||||
o.uint32s = o.uint32s[1:]
|
||||
return
|
||||
case float32Type:
|
||||
if len(o.float32s) == 0 {
|
||||
o.float32s = make([]float32, uint32PoolSize)
|
||||
}
|
||||
o.float32s[0] = math.Float32frombits(x)
|
||||
p.v.Set(reflect.ValueOf(&o.float32s[0]))
|
||||
o.float32s = o.float32s[1:]
|
||||
return
|
||||
}
|
||||
|
||||
// must be enum
|
||||
p.v.Set(reflect.New(t))
|
||||
p.v.Elem().SetInt(int64(int32(x)))
|
||||
}
|
||||
|
||||
// Get gets the bits pointed at by p, as a uint32.
|
||||
func word32_Get(p word32) uint32 {
|
||||
elem := p.v.Elem()
|
||||
switch elem.Kind() {
|
||||
case reflect.Int32:
|
||||
return uint32(elem.Int())
|
||||
case reflect.Uint32:
|
||||
return uint32(elem.Uint())
|
||||
case reflect.Float32:
|
||||
return math.Float32bits(float32(elem.Float()))
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
|
||||
func structPointer_Word32(p structPointer, f field) word32 {
|
||||
return word32{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// A word32Val represents a field of type int32, uint32, float32, or enum.
|
||||
// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
|
||||
type word32Val struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
// Set sets *p to x.
|
||||
func word32Val_Set(p word32Val, x uint32) {
|
||||
switch p.v.Type() {
|
||||
case int32Type:
|
||||
p.v.SetInt(int64(x))
|
||||
return
|
||||
case uint32Type:
|
||||
p.v.SetUint(uint64(x))
|
||||
return
|
||||
case float32Type:
|
||||
p.v.SetFloat(float64(math.Float32frombits(x)))
|
||||
return
|
||||
}
|
||||
|
||||
// must be enum
|
||||
p.v.SetInt(int64(int32(x)))
|
||||
}
|
||||
|
||||
// Get gets the bits pointed at by p, as a uint32.
|
||||
func word32Val_Get(p word32Val) uint32 {
|
||||
elem := p.v
|
||||
switch elem.Kind() {
|
||||
case reflect.Int32:
|
||||
return uint32(elem.Int())
|
||||
case reflect.Uint32:
|
||||
return uint32(elem.Uint())
|
||||
case reflect.Float32:
|
||||
return math.Float32bits(float32(elem.Float()))
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
|
||||
func structPointer_Word32Val(p structPointer, f field) word32Val {
|
||||
return word32Val{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
// A word32Slice is a slice of 32-bit values.
|
||||
// That is, v.Type() is []int32, []uint32, []float32, or []enum.
|
||||
type word32Slice struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
func (p word32Slice) Append(x uint32) {
|
||||
n, m := p.v.Len(), p.v.Cap()
|
||||
// grow updates the slice s in place to make it one element longer.
|
||||
// s must be addressable.
|
||||
// Returns the (addressable) new element.
|
||||
func grow(s reflect.Value) reflect.Value {
|
||||
n, m := s.Len(), s.Cap()
|
||||
if n < m {
|
||||
p.v.SetLen(n + 1)
|
||||
s.SetLen(n + 1)
|
||||
} else {
|
||||
t := p.v.Type().Elem()
|
||||
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
|
||||
}
|
||||
elem := p.v.Index(n)
|
||||
switch elem.Kind() {
|
||||
case reflect.Int32:
|
||||
elem.SetInt(int64(int32(x)))
|
||||
case reflect.Uint32:
|
||||
elem.SetUint(uint64(x))
|
||||
case reflect.Float32:
|
||||
elem.SetFloat(float64(math.Float32frombits(x)))
|
||||
s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
|
||||
}
|
||||
return s.Index(n)
|
||||
}
|
||||
|
||||
func (p word32Slice) Len() int {
|
||||
return p.v.Len()
|
||||
func (p pointer) toInt64() *int64 {
|
||||
return p.v.Interface().(*int64)
|
||||
}
|
||||
func (p pointer) toInt64Ptr() **int64 {
|
||||
return p.v.Interface().(**int64)
|
||||
}
|
||||
func (p pointer) toInt64Slice() *[]int64 {
|
||||
return p.v.Interface().(*[]int64)
|
||||
}
|
||||
|
||||
func (p word32Slice) Index(i int) uint32 {
|
||||
elem := p.v.Index(i)
|
||||
switch elem.Kind() {
|
||||
case reflect.Int32:
|
||||
return uint32(elem.Int())
|
||||
case reflect.Uint32:
|
||||
return uint32(elem.Uint())
|
||||
case reflect.Float32:
|
||||
return math.Float32bits(float32(elem.Float()))
|
||||
}
|
||||
panic("unreachable")
|
||||
var int32ptr = reflect.TypeOf((*int32)(nil))
|
||||
|
||||
func (p pointer) toInt32() *int32 {
|
||||
return p.v.Convert(int32ptr).Interface().(*int32)
|
||||
}
|
||||
|
||||
// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
|
||||
func structPointer_Word32Slice(p structPointer, f field) word32Slice {
|
||||
return word32Slice{structPointer_field(p, f)}
|
||||
// The toInt32Ptr/Slice methods don't work because of enums.
|
||||
// Instead, we must use set/get methods for the int32ptr/slice case.
|
||||
/*
|
||||
func (p pointer) toInt32Ptr() **int32 {
|
||||
return p.v.Interface().(**int32)
|
||||
}
|
||||
func (p pointer) toInt32Slice() *[]int32 {
|
||||
return p.v.Interface().(*[]int32)
|
||||
}
|
||||
*/
|
||||
func (p pointer) getInt32Ptr() *int32 {
|
||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||
// raw int32 type
|
||||
return p.v.Elem().Interface().(*int32)
|
||||
}
|
||||
// an enum
|
||||
return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
|
||||
}
|
||||
func (p pointer) setInt32Ptr(v int32) {
|
||||
// Allocate value in a *int32. Possibly convert that to a *enum.
|
||||
// Then assign it to a **int32 or **enum.
|
||||
// Note: we can convert *int32 to *enum, but we can't convert
|
||||
// **int32 to **enum!
|
||||
p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
|
||||
}
|
||||
|
||||
// word64 is like word32 but for 64-bit values.
|
||||
type word64 struct {
|
||||
v reflect.Value
|
||||
// getInt32Slice copies []int32 from p as a new slice.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) getInt32Slice() []int32 {
|
||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||
// raw int32 type
|
||||
return p.v.Elem().Interface().([]int32)
|
||||
}
|
||||
// an enum
|
||||
// Allocate a []int32, then assign []enum's values into it.
|
||||
// Note: we can't convert []enum to []int32.
|
||||
slice := p.v.Elem()
|
||||
s := make([]int32, slice.Len())
|
||||
for i := 0; i < slice.Len(); i++ {
|
||||
s[i] = int32(slice.Index(i).Int())
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func word64_Set(p word64, o *Buffer, x uint64) {
|
||||
t := p.v.Type().Elem()
|
||||
switch t {
|
||||
case int64Type:
|
||||
if len(o.int64s) == 0 {
|
||||
o.int64s = make([]int64, uint64PoolSize)
|
||||
}
|
||||
o.int64s[0] = int64(x)
|
||||
p.v.Set(reflect.ValueOf(&o.int64s[0]))
|
||||
o.int64s = o.int64s[1:]
|
||||
return
|
||||
case uint64Type:
|
||||
if len(o.uint64s) == 0 {
|
||||
o.uint64s = make([]uint64, uint64PoolSize)
|
||||
}
|
||||
o.uint64s[0] = x
|
||||
p.v.Set(reflect.ValueOf(&o.uint64s[0]))
|
||||
o.uint64s = o.uint64s[1:]
|
||||
return
|
||||
case float64Type:
|
||||
if len(o.float64s) == 0 {
|
||||
o.float64s = make([]float64, uint64PoolSize)
|
||||
}
|
||||
o.float64s[0] = math.Float64frombits(x)
|
||||
p.v.Set(reflect.ValueOf(&o.float64s[0]))
|
||||
o.float64s = o.float64s[1:]
|
||||
// setInt32Slice copies []int32 into p as a new slice.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) setInt32Slice(v []int32) {
|
||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||
// raw int32 type
|
||||
p.v.Elem().Set(reflect.ValueOf(v))
|
||||
return
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func word64_IsNil(p word64) bool {
|
||||
return p.v.IsNil()
|
||||
}
|
||||
|
||||
func word64_Get(p word64) uint64 {
|
||||
elem := p.v.Elem()
|
||||
switch elem.Kind() {
|
||||
case reflect.Int64:
|
||||
return uint64(elem.Int())
|
||||
case reflect.Uint64:
|
||||
return elem.Uint()
|
||||
case reflect.Float64:
|
||||
return math.Float64bits(elem.Float())
|
||||
// an enum
|
||||
// Allocate a []enum, then assign []int32's values into it.
|
||||
// Note: we can't convert []enum to []int32.
|
||||
slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
|
||||
for i, x := range v {
|
||||
slice.Index(i).SetInt(int64(x))
|
||||
}
|
||||
panic("unreachable")
|
||||
p.v.Elem().Set(slice)
|
||||
}
|
||||
func (p pointer) appendInt32Slice(v int32) {
|
||||
grow(p.v.Elem()).SetInt(int64(v))
|
||||
}
|
||||
|
||||
func structPointer_Word64(p structPointer, f field) word64 {
|
||||
return word64{structPointer_field(p, f)}
|
||||
func (p pointer) toUint64() *uint64 {
|
||||
return p.v.Interface().(*uint64)
|
||||
}
|
||||
func (p pointer) toUint64Ptr() **uint64 {
|
||||
return p.v.Interface().(**uint64)
|
||||
}
|
||||
func (p pointer) toUint64Slice() *[]uint64 {
|
||||
return p.v.Interface().(*[]uint64)
|
||||
}
|
||||
func (p pointer) toUint32() *uint32 {
|
||||
return p.v.Interface().(*uint32)
|
||||
}
|
||||
func (p pointer) toUint32Ptr() **uint32 {
|
||||
return p.v.Interface().(**uint32)
|
||||
}
|
||||
func (p pointer) toUint32Slice() *[]uint32 {
|
||||
return p.v.Interface().(*[]uint32)
|
||||
}
|
||||
func (p pointer) toBool() *bool {
|
||||
return p.v.Interface().(*bool)
|
||||
}
|
||||
func (p pointer) toBoolPtr() **bool {
|
||||
return p.v.Interface().(**bool)
|
||||
}
|
||||
func (p pointer) toBoolSlice() *[]bool {
|
||||
return p.v.Interface().(*[]bool)
|
||||
}
|
||||
func (p pointer) toFloat64() *float64 {
|
||||
return p.v.Interface().(*float64)
|
||||
}
|
||||
func (p pointer) toFloat64Ptr() **float64 {
|
||||
return p.v.Interface().(**float64)
|
||||
}
|
||||
func (p pointer) toFloat64Slice() *[]float64 {
|
||||
return p.v.Interface().(*[]float64)
|
||||
}
|
||||
func (p pointer) toFloat32() *float32 {
|
||||
return p.v.Interface().(*float32)
|
||||
}
|
||||
func (p pointer) toFloat32Ptr() **float32 {
|
||||
return p.v.Interface().(**float32)
|
||||
}
|
||||
func (p pointer) toFloat32Slice() *[]float32 {
|
||||
return p.v.Interface().(*[]float32)
|
||||
}
|
||||
func (p pointer) toString() *string {
|
||||
return p.v.Interface().(*string)
|
||||
}
|
||||
func (p pointer) toStringPtr() **string {
|
||||
return p.v.Interface().(**string)
|
||||
}
|
||||
func (p pointer) toStringSlice() *[]string {
|
||||
return p.v.Interface().(*[]string)
|
||||
}
|
||||
func (p pointer) toBytes() *[]byte {
|
||||
return p.v.Interface().(*[]byte)
|
||||
}
|
||||
func (p pointer) toBytesSlice() *[][]byte {
|
||||
return p.v.Interface().(*[][]byte)
|
||||
}
|
||||
func (p pointer) toExtensions() *XXX_InternalExtensions {
|
||||
return p.v.Interface().(*XXX_InternalExtensions)
|
||||
}
|
||||
func (p pointer) toOldExtensions() *map[int32]Extension {
|
||||
return p.v.Interface().(*map[int32]Extension)
|
||||
}
|
||||
func (p pointer) getPointer() pointer {
|
||||
return pointer{v: p.v.Elem()}
|
||||
}
|
||||
func (p pointer) setPointer(q pointer) {
|
||||
p.v.Elem().Set(q.v)
|
||||
}
|
||||
func (p pointer) appendPointer(q pointer) {
|
||||
grow(p.v.Elem()).Set(q.v)
|
||||
}
|
||||
|
||||
// word64Val is like word32Val but for 64-bit values.
|
||||
type word64Val struct {
|
||||
v reflect.Value
|
||||
// getPointerSlice copies []*T from p as a new []pointer.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) getPointerSlice() []pointer {
|
||||
if p.v.IsNil() {
|
||||
return nil
|
||||
}
|
||||
n := p.v.Elem().Len()
|
||||
s := make([]pointer, n)
|
||||
for i := 0; i < n; i++ {
|
||||
s[i] = pointer{v: p.v.Elem().Index(i)}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
|
||||
switch p.v.Type() {
|
||||
case int64Type:
|
||||
p.v.SetInt(int64(x))
|
||||
return
|
||||
case uint64Type:
|
||||
p.v.SetUint(x)
|
||||
return
|
||||
case float64Type:
|
||||
p.v.SetFloat(math.Float64frombits(x))
|
||||
// setPointerSlice copies []pointer into p as a new []*T.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) setPointerSlice(v []pointer) {
|
||||
if v == nil {
|
||||
p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
|
||||
return
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func word64Val_Get(p word64Val) uint64 {
|
||||
elem := p.v
|
||||
switch elem.Kind() {
|
||||
case reflect.Int64:
|
||||
return uint64(elem.Int())
|
||||
case reflect.Uint64:
|
||||
return elem.Uint()
|
||||
case reflect.Float64:
|
||||
return math.Float64bits(elem.Float())
|
||||
s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
|
||||
for _, p := range v {
|
||||
s = reflect.Append(s, p.v)
|
||||
}
|
||||
panic("unreachable")
|
||||
p.v.Elem().Set(s)
|
||||
}
|
||||
|
||||
func structPointer_Word64Val(p structPointer, f field) word64Val {
|
||||
return word64Val{structPointer_field(p, f)}
|
||||
}
|
||||
|
||||
type word64Slice struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
func (p word64Slice) Append(x uint64) {
|
||||
n, m := p.v.Len(), p.v.Cap()
|
||||
if n < m {
|
||||
p.v.SetLen(n + 1)
|
||||
} else {
|
||||
t := p.v.Type().Elem()
|
||||
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
|
||||
}
|
||||
elem := p.v.Index(n)
|
||||
switch elem.Kind() {
|
||||
case reflect.Int64:
|
||||
elem.SetInt(int64(int64(x)))
|
||||
case reflect.Uint64:
|
||||
elem.SetUint(uint64(x))
|
||||
case reflect.Float64:
|
||||
elem.SetFloat(float64(math.Float64frombits(x)))
|
||||
// getInterfacePointer returns a pointer that points to the
|
||||
// interface data of the interface pointed by p.
|
||||
func (p pointer) getInterfacePointer() pointer {
|
||||
if p.v.Elem().IsNil() {
|
||||
return pointer{v: p.v.Elem()}
|
||||
}
|
||||
return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
|
||||
}
|
||||
|
||||
func (p word64Slice) Len() int {
|
||||
return p.v.Len()
|
||||
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
|
||||
// TODO: check that p.v.Type().Elem() == t?
|
||||
return p.v
|
||||
}
|
||||
|
||||
func (p word64Slice) Index(i int) uint64 {
|
||||
elem := p.v.Index(i)
|
||||
switch elem.Kind() {
|
||||
case reflect.Int64:
|
||||
return uint64(elem.Int())
|
||||
case reflect.Uint64:
|
||||
return uint64(elem.Uint())
|
||||
case reflect.Float64:
|
||||
return math.Float64bits(float64(elem.Float()))
|
||||
}
|
||||
panic("unreachable")
|
||||
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
|
||||
func structPointer_Word64Slice(p structPointer, f field) word64Slice {
|
||||
return word64Slice{structPointer_field(p, f)}
|
||||
}
|
||||
var atomicLock sync.Mutex
|
||||
|
|
410
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
410
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
|
@ -29,7 +29,7 @@
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build !appengine
|
||||
// +build !purego,!appengine,!js
|
||||
|
||||
// This file contains the implementation of the proto field accesses using package unsafe.
|
||||
|
||||
|
@ -37,38 +37,13 @@ package proto
|
|||
|
||||
import (
|
||||
"reflect"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// NOTE: These type_Foo functions would more idiomatically be methods,
|
||||
// but Go does not allow methods on pointer types, and we must preserve
|
||||
// some pointer type for the garbage collector. We use these
|
||||
// funcs with clunky names as our poor approximation to methods.
|
||||
//
|
||||
// An alternative would be
|
||||
// type structPointer struct { p unsafe.Pointer }
|
||||
// but that does not registerize as well.
|
||||
const unsafeAllowed = true
|
||||
|
||||
// A structPointer is a pointer to a struct.
|
||||
type structPointer unsafe.Pointer
|
||||
|
||||
// toStructPointer returns a structPointer equivalent to the given reflect value.
|
||||
func toStructPointer(v reflect.Value) structPointer {
|
||||
return structPointer(unsafe.Pointer(v.Pointer()))
|
||||
}
|
||||
|
||||
// IsNil reports whether p is nil.
|
||||
func structPointer_IsNil(p structPointer) bool {
|
||||
return p == nil
|
||||
}
|
||||
|
||||
// Interface returns the struct pointer, assumed to have element type t,
|
||||
// as an interface value.
|
||||
func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
|
||||
return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
|
||||
}
|
||||
|
||||
// A field identifies a field in a struct, accessible from a structPointer.
|
||||
// A field identifies a field in a struct, accessible from a pointer.
|
||||
// In this implementation, a field is identified by its byte offset from the start of the struct.
|
||||
type field uintptr
|
||||
|
||||
|
@ -80,187 +55,254 @@ func toField(f *reflect.StructField) field {
|
|||
// invalidField is an invalid field identifier.
|
||||
const invalidField = ^field(0)
|
||||
|
||||
// zeroField is a noop when calling pointer.offset.
|
||||
const zeroField = field(0)
|
||||
|
||||
// IsValid reports whether the field identifier is valid.
|
||||
func (f field) IsValid() bool {
|
||||
return f != ^field(0)
|
||||
return f != invalidField
|
||||
}
|
||||
|
||||
// Bytes returns the address of a []byte field in the struct.
|
||||
func structPointer_Bytes(p structPointer, f field) *[]byte {
|
||||
return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
// The pointer type below is for the new table-driven encoder/decoder.
|
||||
// The implementation here uses unsafe.Pointer to create a generic pointer.
|
||||
// In pointer_reflect.go we use reflect instead of unsafe to implement
|
||||
// the same (but slower) interface.
|
||||
type pointer struct {
|
||||
p unsafe.Pointer
|
||||
}
|
||||
|
||||
// BytesSlice returns the address of a [][]byte field in the struct.
|
||||
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
|
||||
return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
// size of pointer
|
||||
var ptrSize = unsafe.Sizeof(uintptr(0))
|
||||
|
||||
// toPointer converts an interface of pointer type to a pointer
|
||||
// that points to the same target.
|
||||
func toPointer(i *Message) pointer {
|
||||
// Super-tricky - read pointer out of data word of interface value.
|
||||
// Saves ~25ns over the equivalent:
|
||||
// return valToPointer(reflect.ValueOf(*i))
|
||||
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
||||
}
|
||||
|
||||
// Bool returns the address of a *bool field in the struct.
|
||||
func structPointer_Bool(p structPointer, f field) **bool {
|
||||
return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// BoolVal returns the address of a bool field in the struct.
|
||||
func structPointer_BoolVal(p structPointer, f field) *bool {
|
||||
return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// BoolSlice returns the address of a []bool field in the struct.
|
||||
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
|
||||
return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// String returns the address of a *string field in the struct.
|
||||
func structPointer_String(p structPointer, f field) **string {
|
||||
return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// StringVal returns the address of a string field in the struct.
|
||||
func structPointer_StringVal(p structPointer, f field) *string {
|
||||
return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// StringSlice returns the address of a []string field in the struct.
|
||||
func structPointer_StringSlice(p structPointer, f field) *[]string {
|
||||
return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// ExtMap returns the address of an extension map field in the struct.
|
||||
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
|
||||
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// NewAt returns the reflect.Value for a pointer to a field in the struct.
|
||||
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
|
||||
return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
|
||||
}
|
||||
|
||||
// SetStructPointer writes a *struct field in the struct.
|
||||
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
|
||||
*(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
|
||||
}
|
||||
|
||||
// GetStructPointer reads a *struct field in the struct.
|
||||
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
|
||||
return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// StructPointerSlice the address of a []*struct field in the struct.
|
||||
func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
|
||||
return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
|
||||
type structPointerSlice []structPointer
|
||||
|
||||
func (v *structPointerSlice) Len() int { return len(*v) }
|
||||
func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
|
||||
func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
|
||||
|
||||
// A word32 is the address of a "pointer to 32-bit value" field.
|
||||
type word32 **uint32
|
||||
|
||||
// IsNil reports whether *v is nil.
|
||||
func word32_IsNil(p word32) bool {
|
||||
return *p == nil
|
||||
}
|
||||
|
||||
// Set sets *v to point at a newly allocated word set to x.
|
||||
func word32_Set(p word32, o *Buffer, x uint32) {
|
||||
if len(o.uint32s) == 0 {
|
||||
o.uint32s = make([]uint32, uint32PoolSize)
|
||||
// toAddrPointer converts an interface to a pointer that points to
|
||||
// the interface data.
|
||||
func toAddrPointer(i *interface{}, isptr bool) pointer {
|
||||
// Super-tricky - read or get the address of data word of interface value.
|
||||
if isptr {
|
||||
// The interface is of pointer type, thus it is a direct interface.
|
||||
// The data word is the pointer data itself. We take its address.
|
||||
return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
|
||||
}
|
||||
o.uint32s[0] = x
|
||||
*p = &o.uint32s[0]
|
||||
o.uint32s = o.uint32s[1:]
|
||||
// The interface is not of pointer type. The data word is the pointer
|
||||
// to the data.
|
||||
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
||||
}
|
||||
|
||||
// Get gets the value pointed at by *v.
|
||||
func word32_Get(p word32) uint32 {
|
||||
return **p
|
||||
// valToPointer converts v to a pointer. v must be of pointer type.
|
||||
func valToPointer(v reflect.Value) pointer {
|
||||
return pointer{p: unsafe.Pointer(v.Pointer())}
|
||||
}
|
||||
|
||||
// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
|
||||
func structPointer_Word32(p structPointer, f field) word32 {
|
||||
return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
||||
}
|
||||
|
||||
// A word32Val is the address of a 32-bit value field.
|
||||
type word32Val *uint32
|
||||
|
||||
// Set sets *p to x.
|
||||
func word32Val_Set(p word32Val, x uint32) {
|
||||
*p = x
|
||||
}
|
||||
|
||||
// Get gets the value pointed at by p.
|
||||
func word32Val_Get(p word32Val) uint32 {
|
||||
return *p
|
||||
}
|
||||
|
||||
// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
|
||||
func structPointer_Word32Val(p structPointer, f field) word32Val {
|
||||
return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
||||
}
|
||||
|
||||
// A word32Slice is a slice of 32-bit values.
|
||||
type word32Slice []uint32
|
||||
|
||||
func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
|
||||
func (v *word32Slice) Len() int { return len(*v) }
|
||||
func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
|
||||
|
||||
// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
|
||||
func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
|
||||
return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
}
|
||||
|
||||
// word64 is like word32 but for 64-bit values.
|
||||
type word64 **uint64
|
||||
|
||||
func word64_Set(p word64, o *Buffer, x uint64) {
|
||||
if len(o.uint64s) == 0 {
|
||||
o.uint64s = make([]uint64, uint64PoolSize)
|
||||
// offset converts from a pointer to a structure to a pointer to
|
||||
// one of its fields.
|
||||
func (p pointer) offset(f field) pointer {
|
||||
// For safety, we should panic if !f.IsValid, however calling panic causes
|
||||
// this to no longer be inlineable, which is a serious performance cost.
|
||||
/*
|
||||
if !f.IsValid() {
|
||||
panic("invalid field")
|
||||
}
|
||||
o.uint64s[0] = x
|
||||
*p = &o.uint64s[0]
|
||||
o.uint64s = o.uint64s[1:]
|
||||
*/
|
||||
return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
|
||||
}
|
||||
|
||||
func word64_IsNil(p word64) bool {
|
||||
return *p == nil
|
||||
func (p pointer) isNil() bool {
|
||||
return p.p == nil
|
||||
}
|
||||
|
||||
func word64_Get(p word64) uint64 {
|
||||
return **p
|
||||
func (p pointer) toInt64() *int64 {
|
||||
return (*int64)(p.p)
|
||||
}
|
||||
func (p pointer) toInt64Ptr() **int64 {
|
||||
return (**int64)(p.p)
|
||||
}
|
||||
func (p pointer) toInt64Slice() *[]int64 {
|
||||
return (*[]int64)(p.p)
|
||||
}
|
||||
func (p pointer) toInt32() *int32 {
|
||||
return (*int32)(p.p)
|
||||
}
|
||||
|
||||
func structPointer_Word64(p structPointer, f field) word64 {
|
||||
return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
||||
// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
|
||||
/*
|
||||
func (p pointer) toInt32Ptr() **int32 {
|
||||
return (**int32)(p.p)
|
||||
}
|
||||
func (p pointer) toInt32Slice() *[]int32 {
|
||||
return (*[]int32)(p.p)
|
||||
}
|
||||
*/
|
||||
func (p pointer) getInt32Ptr() *int32 {
|
||||
return *(**int32)(p.p)
|
||||
}
|
||||
func (p pointer) setInt32Ptr(v int32) {
|
||||
*(**int32)(p.p) = &v
|
||||
}
|
||||
|
||||
// word64Val is like word32Val but for 64-bit values.
|
||||
type word64Val *uint64
|
||||
|
||||
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
|
||||
*p = x
|
||||
// getInt32Slice loads a []int32 from p.
|
||||
// The value returned is aliased with the original slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) getInt32Slice() []int32 {
|
||||
return *(*[]int32)(p.p)
|
||||
}
|
||||
|
||||
func word64Val_Get(p word64Val) uint64 {
|
||||
return *p
|
||||
// setInt32Slice stores a []int32 to p.
|
||||
// The value set is aliased with the input slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) setInt32Slice(v []int32) {
|
||||
*(*[]int32)(p.p) = v
|
||||
}
|
||||
|
||||
func structPointer_Word64Val(p structPointer, f field) word64Val {
|
||||
return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
||||
// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
|
||||
func (p pointer) appendInt32Slice(v int32) {
|
||||
s := (*[]int32)(p.p)
|
||||
*s = append(*s, v)
|
||||
}
|
||||
|
||||
// word64Slice is like word32Slice but for 64-bit values.
|
||||
type word64Slice []uint64
|
||||
|
||||
func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
|
||||
func (v *word64Slice) Len() int { return len(*v) }
|
||||
func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
|
||||
|
||||
func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
|
||||
return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
||||
func (p pointer) toUint64() *uint64 {
|
||||
return (*uint64)(p.p)
|
||||
}
|
||||
func (p pointer) toUint64Ptr() **uint64 {
|
||||
return (**uint64)(p.p)
|
||||
}
|
||||
func (p pointer) toUint64Slice() *[]uint64 {
|
||||
return (*[]uint64)(p.p)
|
||||
}
|
||||
func (p pointer) toUint32() *uint32 {
|
||||
return (*uint32)(p.p)
|
||||
}
|
||||
func (p pointer) toUint32Ptr() **uint32 {
|
||||
return (**uint32)(p.p)
|
||||
}
|
||||
func (p pointer) toUint32Slice() *[]uint32 {
|
||||
return (*[]uint32)(p.p)
|
||||
}
|
||||
func (p pointer) toBool() *bool {
|
||||
return (*bool)(p.p)
|
||||
}
|
||||
func (p pointer) toBoolPtr() **bool {
|
||||
return (**bool)(p.p)
|
||||
}
|
||||
func (p pointer) toBoolSlice() *[]bool {
|
||||
return (*[]bool)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat64() *float64 {
|
||||
return (*float64)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat64Ptr() **float64 {
|
||||
return (**float64)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat64Slice() *[]float64 {
|
||||
return (*[]float64)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat32() *float32 {
|
||||
return (*float32)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat32Ptr() **float32 {
|
||||
return (**float32)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat32Slice() *[]float32 {
|
||||
return (*[]float32)(p.p)
|
||||
}
|
||||
func (p pointer) toString() *string {
|
||||
return (*string)(p.p)
|
||||
}
|
||||
func (p pointer) toStringPtr() **string {
|
||||
return (**string)(p.p)
|
||||
}
|
||||
func (p pointer) toStringSlice() *[]string {
|
||||
return (*[]string)(p.p)
|
||||
}
|
||||
func (p pointer) toBytes() *[]byte {
|
||||
return (*[]byte)(p.p)
|
||||
}
|
||||
func (p pointer) toBytesSlice() *[][]byte {
|
||||
return (*[][]byte)(p.p)
|
||||
}
|
||||
func (p pointer) toExtensions() *XXX_InternalExtensions {
|
||||
return (*XXX_InternalExtensions)(p.p)
|
||||
}
|
||||
func (p pointer) toOldExtensions() *map[int32]Extension {
|
||||
return (*map[int32]Extension)(p.p)
|
||||
}
|
||||
|
||||
// getPointerSlice loads []*T from p as a []pointer.
|
||||
// The value returned is aliased with the original slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) getPointerSlice() []pointer {
|
||||
// Super-tricky - p should point to a []*T where T is a
|
||||
// message type. We load it as []pointer.
|
||||
return *(*[]pointer)(p.p)
|
||||
}
|
||||
|
||||
// setPointerSlice stores []pointer into p as a []*T.
|
||||
// The value set is aliased with the input slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) setPointerSlice(v []pointer) {
|
||||
// Super-tricky - p should point to a []*T where T is a
|
||||
// message type. We store it as []pointer.
|
||||
*(*[]pointer)(p.p) = v
|
||||
}
|
||||
|
||||
// getPointer loads the pointer at p and returns it.
|
||||
func (p pointer) getPointer() pointer {
|
||||
return pointer{p: *(*unsafe.Pointer)(p.p)}
|
||||
}
|
||||
|
||||
// setPointer stores the pointer q at p.
|
||||
func (p pointer) setPointer(q pointer) {
|
||||
*(*unsafe.Pointer)(p.p) = q.p
|
||||
}
|
||||
|
||||
// append q to the slice pointed to by p.
|
||||
func (p pointer) appendPointer(q pointer) {
|
||||
s := (*[]unsafe.Pointer)(p.p)
|
||||
*s = append(*s, q.p)
|
||||
}
|
||||
|
||||
// getInterfacePointer returns a pointer that points to the
|
||||
// interface data of the interface pointed by p.
|
||||
func (p pointer) getInterfacePointer() pointer {
|
||||
// Super-tricky - read pointer out of data word of interface value.
|
||||
return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
|
||||
}
|
||||
|
||||
// asPointerTo returns a reflect.Value that is a pointer to an
|
||||
// object of type t stored at p.
|
||||
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
|
||||
return reflect.NewAt(t, p.p)
|
||||
}
|
||||
|
||||
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
|
||||
return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
|
||||
return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
|
||||
return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
|
||||
return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
|
|
467
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
467
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
|
@ -37,6 +37,7 @@ package proto
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
@ -57,39 +58,6 @@ const (
|
|||
WireFixed32 = 5
|
||||
)
|
||||
|
||||
const startSize = 10 // initial slice/string sizes
|
||||
|
||||
// Encoders are defined in encode.go
|
||||
// An encoder outputs the full representation of a field, including its
|
||||
// tag and encoder type.
|
||||
type encoder func(p *Buffer, prop *Properties, base structPointer) error
|
||||
|
||||
// A valueEncoder encodes a single integer in a particular encoding.
|
||||
type valueEncoder func(o *Buffer, x uint64) error
|
||||
|
||||
// Sizers are defined in encode.go
|
||||
// A sizer returns the encoded size of a field, including its tag and encoder
|
||||
// type.
|
||||
type sizer func(prop *Properties, base structPointer) int
|
||||
|
||||
// A valueSizer returns the encoded size of a single integer in a particular
|
||||
// encoding.
|
||||
type valueSizer func(x uint64) int
|
||||
|
||||
// Decoders are defined in decode.go
|
||||
// A decoder creates a value from its wire representation.
|
||||
// Unrecognized subelements are saved in unrec.
|
||||
type decoder func(p *Buffer, prop *Properties, base structPointer) error
|
||||
|
||||
// A valueDecoder decodes a single integer in a particular encoding.
|
||||
type valueDecoder func(o *Buffer) (x uint64, err error)
|
||||
|
||||
// A oneofMarshaler does the marshaling for all oneof fields in a message.
|
||||
type oneofMarshaler func(Message, *Buffer) error
|
||||
|
||||
// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
|
||||
type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
|
||||
|
||||
// tagMap is an optimization over map[int]int for typical protocol buffer
|
||||
// use-cases. Encoded protocol buffers are often in tag order with small tag
|
||||
// numbers.
|
||||
|
@ -136,12 +104,6 @@ type StructProperties struct {
|
|||
decoderTags tagMap // map from proto tag to struct field number
|
||||
decoderOrigNames map[string]int // map from original name to struct field number
|
||||
order []int // list of struct field numbers in tag order
|
||||
unrecField field // field id of the XXX_unrecognized []byte field
|
||||
extendable bool // is this an extendable proto
|
||||
|
||||
oneofMarshaler oneofMarshaler
|
||||
oneofUnmarshaler oneofUnmarshaler
|
||||
stype reflect.Type
|
||||
|
||||
// OneofTypes contains information about the oneof fields in this message.
|
||||
// It is keyed by the original name of a field.
|
||||
|
@ -168,6 +130,7 @@ func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order
|
|||
type Properties struct {
|
||||
Name string // name of the field, for error messages
|
||||
OrigName string // original name before protocol compiler (always set)
|
||||
JSONName string // name to use for JSON; determined by protoc
|
||||
Wire string
|
||||
WireType int
|
||||
Tag int
|
||||
|
@ -181,36 +144,19 @@ type Properties struct {
|
|||
|
||||
Default string // default value
|
||||
HasDefault bool // whether an explicit default was provided
|
||||
def_uint64 uint64
|
||||
|
||||
enc encoder
|
||||
valEnc valueEncoder // set for bool and numeric types only
|
||||
field field
|
||||
tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
|
||||
tagbuf [8]byte
|
||||
stype reflect.Type // set for struct types only
|
||||
sprop *StructProperties // set for struct types only
|
||||
isMarshaler bool
|
||||
isUnmarshaler bool
|
||||
|
||||
mtype reflect.Type // set for map types only
|
||||
mkeyprop *Properties // set for map types only
|
||||
mvalprop *Properties // set for map types only
|
||||
|
||||
size sizer
|
||||
valSize valueSizer // set for bool and numeric types only
|
||||
|
||||
dec decoder
|
||||
valDec valueDecoder // set for bool and numeric types only
|
||||
|
||||
// If this is a packable field, this will be the decoder for the packed version of the field.
|
||||
packedDec decoder
|
||||
}
|
||||
|
||||
// String formats the properties in the protobuf struct field tag style.
|
||||
func (p *Properties) String() string {
|
||||
s := p.Wire
|
||||
s = ","
|
||||
s += ","
|
||||
s += strconv.Itoa(p.Tag)
|
||||
if p.Required {
|
||||
s += ",req"
|
||||
|
@ -224,8 +170,9 @@ func (p *Properties) String() string {
|
|||
if p.Packed {
|
||||
s += ",packed"
|
||||
}
|
||||
if p.OrigName != p.Name {
|
||||
s += ",name=" + p.OrigName
|
||||
if p.JSONName != p.OrigName {
|
||||
s += ",json=" + p.JSONName
|
||||
}
|
||||
if p.proto3 {
|
||||
s += ",proto3"
|
||||
|
@ -255,29 +202,14 @@ func (p *Properties) Parse(s string) {
|
|||
switch p.Wire {
|
||||
case "varint":
|
||||
p.WireType = WireVarint
|
||||
p.valEnc = (*Buffer).EncodeVarint
|
||||
p.valDec = (*Buffer).DecodeVarint
|
||||
p.valSize = sizeVarint
|
||||
case "fixed32":
|
||||
p.WireType = WireFixed32
|
||||
p.valEnc = (*Buffer).EncodeFixed32
|
||||
p.valDec = (*Buffer).DecodeFixed32
|
||||
p.valSize = sizeFixed32
|
||||
case "fixed64":
|
||||
p.WireType = WireFixed64
|
||||
p.valEnc = (*Buffer).EncodeFixed64
|
||||
p.valDec = (*Buffer).DecodeFixed64
|
||||
p.valSize = sizeFixed64
|
||||
case "zigzag32":
|
||||
p.WireType = WireVarint
|
||||
p.valEnc = (*Buffer).EncodeZigzag32
|
||||
p.valDec = (*Buffer).DecodeZigzag32
|
||||
p.valSize = sizeZigzag32
|
||||
case "zigzag64":
|
||||
p.WireType = WireVarint
|
||||
p.valEnc = (*Buffer).EncodeZigzag64
|
||||
p.valDec = (*Buffer).DecodeZigzag64
|
||||
p.valSize = sizeZigzag64
|
||||
case "bytes", "group":
|
||||
p.WireType = WireBytes
|
||||
// no numeric converter for non-numeric types
|
||||
|
@ -292,6 +224,7 @@ func (p *Properties) Parse(s string) {
|
|||
return
|
||||
}
|
||||
|
||||
outer:
|
||||
for i := 2; i < len(fields); i++ {
|
||||
f := fields[i]
|
||||
switch {
|
||||
|
@ -305,6 +238,8 @@ func (p *Properties) Parse(s string) {
|
|||
p.Packed = true
|
||||
case strings.HasPrefix(f, "name="):
|
||||
p.OrigName = f[5:]
|
||||
case strings.HasPrefix(f, "json="):
|
||||
p.JSONName = f[5:]
|
||||
case strings.HasPrefix(f, "enum="):
|
||||
p.Enum = f[5:]
|
||||
case f == "proto3":
|
||||
|
@ -317,233 +252,28 @@ func (p *Properties) Parse(s string) {
|
|||
if i+1 < len(fields) {
|
||||
// Commas aren't escaped, and def is always last.
|
||||
p.Default += "," + strings.Join(fields[i+1:], ",")
|
||||
break
|
||||
break outer
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func logNoSliceEnc(t1, t2 reflect.Type) {
|
||||
fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
|
||||
}
|
||||
|
||||
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
|
||||
|
||||
// Initialize the fields for encoding and decoding.
|
||||
func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
|
||||
p.enc = nil
|
||||
p.dec = nil
|
||||
p.size = nil
|
||||
|
||||
// setFieldProps initializes the field properties for submessages and maps.
|
||||
func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
|
||||
switch t1 := typ; t1.Kind() {
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
|
||||
|
||||
// proto3 scalar types
|
||||
|
||||
case reflect.Bool:
|
||||
p.enc = (*Buffer).enc_proto3_bool
|
||||
p.dec = (*Buffer).dec_proto3_bool
|
||||
p.size = size_proto3_bool
|
||||
case reflect.Int32:
|
||||
p.enc = (*Buffer).enc_proto3_int32
|
||||
p.dec = (*Buffer).dec_proto3_int32
|
||||
p.size = size_proto3_int32
|
||||
case reflect.Uint32:
|
||||
p.enc = (*Buffer).enc_proto3_uint32
|
||||
p.dec = (*Buffer).dec_proto3_int32 // can reuse
|
||||
p.size = size_proto3_uint32
|
||||
case reflect.Int64, reflect.Uint64:
|
||||
p.enc = (*Buffer).enc_proto3_int64
|
||||
p.dec = (*Buffer).dec_proto3_int64
|
||||
p.size = size_proto3_int64
|
||||
case reflect.Float32:
|
||||
p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
|
||||
p.dec = (*Buffer).dec_proto3_int32
|
||||
p.size = size_proto3_uint32
|
||||
case reflect.Float64:
|
||||
p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
|
||||
p.dec = (*Buffer).dec_proto3_int64
|
||||
p.size = size_proto3_int64
|
||||
case reflect.String:
|
||||
p.enc = (*Buffer).enc_proto3_string
|
||||
p.dec = (*Buffer).dec_proto3_string
|
||||
p.size = size_proto3_string
|
||||
|
||||
case reflect.Ptr:
|
||||
switch t2 := t1.Elem(); t2.Kind() {
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
|
||||
break
|
||||
case reflect.Bool:
|
||||
p.enc = (*Buffer).enc_bool
|
||||
p.dec = (*Buffer).dec_bool
|
||||
p.size = size_bool
|
||||
case reflect.Int32:
|
||||
p.enc = (*Buffer).enc_int32
|
||||
p.dec = (*Buffer).dec_int32
|
||||
p.size = size_int32
|
||||
case reflect.Uint32:
|
||||
p.enc = (*Buffer).enc_uint32
|
||||
p.dec = (*Buffer).dec_int32 // can reuse
|
||||
p.size = size_uint32
|
||||
case reflect.Int64, reflect.Uint64:
|
||||
p.enc = (*Buffer).enc_int64
|
||||
p.dec = (*Buffer).dec_int64
|
||||
p.size = size_int64
|
||||
case reflect.Float32:
|
||||
p.enc = (*Buffer).enc_uint32 // can just treat them as bits
|
||||
p.dec = (*Buffer).dec_int32
|
||||
p.size = size_uint32
|
||||
case reflect.Float64:
|
||||
p.enc = (*Buffer).enc_int64 // can just treat them as bits
|
||||
p.dec = (*Buffer).dec_int64
|
||||
p.size = size_int64
|
||||
case reflect.String:
|
||||
p.enc = (*Buffer).enc_string
|
||||
p.dec = (*Buffer).dec_string
|
||||
p.size = size_string
|
||||
case reflect.Struct:
|
||||
if t1.Elem().Kind() == reflect.Struct {
|
||||
p.stype = t1.Elem()
|
||||
p.isMarshaler = isMarshaler(t1)
|
||||
p.isUnmarshaler = isUnmarshaler(t1)
|
||||
if p.Wire == "bytes" {
|
||||
p.enc = (*Buffer).enc_struct_message
|
||||
p.dec = (*Buffer).dec_struct_message
|
||||
p.size = size_struct_message
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_struct_group
|
||||
p.dec = (*Buffer).dec_struct_group
|
||||
p.size = size_struct_group
|
||||
}
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
switch t2 := t1.Elem(); t2.Kind() {
|
||||
default:
|
||||
logNoSliceEnc(t1, t2)
|
||||
break
|
||||
case reflect.Bool:
|
||||
if p.Packed {
|
||||
p.enc = (*Buffer).enc_slice_packed_bool
|
||||
p.size = size_slice_packed_bool
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_bool
|
||||
p.size = size_slice_bool
|
||||
}
|
||||
p.dec = (*Buffer).dec_slice_bool
|
||||
p.packedDec = (*Buffer).dec_slice_packed_bool
|
||||
case reflect.Int32:
|
||||
if p.Packed {
|
||||
p.enc = (*Buffer).enc_slice_packed_int32
|
||||
p.size = size_slice_packed_int32
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_int32
|
||||
p.size = size_slice_int32
|
||||
}
|
||||
p.dec = (*Buffer).dec_slice_int32
|
||||
p.packedDec = (*Buffer).dec_slice_packed_int32
|
||||
case reflect.Uint32:
|
||||
if p.Packed {
|
||||
p.enc = (*Buffer).enc_slice_packed_uint32
|
||||
p.size = size_slice_packed_uint32
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_uint32
|
||||
p.size = size_slice_uint32
|
||||
}
|
||||
p.dec = (*Buffer).dec_slice_int32
|
||||
p.packedDec = (*Buffer).dec_slice_packed_int32
|
||||
case reflect.Int64, reflect.Uint64:
|
||||
if p.Packed {
|
||||
p.enc = (*Buffer).enc_slice_packed_int64
|
||||
p.size = size_slice_packed_int64
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_int64
|
||||
p.size = size_slice_int64
|
||||
}
|
||||
p.dec = (*Buffer).dec_slice_int64
|
||||
p.packedDec = (*Buffer).dec_slice_packed_int64
|
||||
case reflect.Uint8:
|
||||
p.enc = (*Buffer).enc_slice_byte
|
||||
p.dec = (*Buffer).dec_slice_byte
|
||||
p.size = size_slice_byte
|
||||
// This is a []byte, which is either a bytes field,
|
||||
// or the value of a map field. In the latter case,
|
||||
// we always encode an empty []byte, so we should not
|
||||
// use the proto3 enc/size funcs.
|
||||
// f == nil iff this is the key/value of a map field.
|
||||
if p.proto3 && f != nil {
|
||||
p.enc = (*Buffer).enc_proto3_slice_byte
|
||||
p.size = size_proto3_slice_byte
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
switch t2.Bits() {
|
||||
case 32:
|
||||
// can just treat them as bits
|
||||
if p.Packed {
|
||||
p.enc = (*Buffer).enc_slice_packed_uint32
|
||||
p.size = size_slice_packed_uint32
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_uint32
|
||||
p.size = size_slice_uint32
|
||||
}
|
||||
p.dec = (*Buffer).dec_slice_int32
|
||||
p.packedDec = (*Buffer).dec_slice_packed_int32
|
||||
case 64:
|
||||
// can just treat them as bits
|
||||
if p.Packed {
|
||||
p.enc = (*Buffer).enc_slice_packed_int64
|
||||
p.size = size_slice_packed_int64
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_int64
|
||||
p.size = size_slice_int64
|
||||
}
|
||||
p.dec = (*Buffer).dec_slice_int64
|
||||
p.packedDec = (*Buffer).dec_slice_packed_int64
|
||||
default:
|
||||
logNoSliceEnc(t1, t2)
|
||||
break
|
||||
}
|
||||
case reflect.String:
|
||||
p.enc = (*Buffer).enc_slice_string
|
||||
p.dec = (*Buffer).dec_slice_string
|
||||
p.size = size_slice_string
|
||||
case reflect.Ptr:
|
||||
switch t3 := t2.Elem(); t3.Kind() {
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
|
||||
break
|
||||
case reflect.Struct:
|
||||
if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
|
||||
p.stype = t2.Elem()
|
||||
p.isMarshaler = isMarshaler(t2)
|
||||
p.isUnmarshaler = isUnmarshaler(t2)
|
||||
if p.Wire == "bytes" {
|
||||
p.enc = (*Buffer).enc_slice_struct_message
|
||||
p.dec = (*Buffer).dec_slice_struct_message
|
||||
p.size = size_slice_struct_message
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_struct_group
|
||||
p.dec = (*Buffer).dec_slice_struct_group
|
||||
p.size = size_slice_struct_group
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
switch t2.Elem().Kind() {
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
|
||||
break
|
||||
case reflect.Uint8:
|
||||
p.enc = (*Buffer).enc_slice_slice_byte
|
||||
p.dec = (*Buffer).dec_slice_slice_byte
|
||||
p.size = size_slice_slice_byte
|
||||
}
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
p.enc = (*Buffer).enc_new_map
|
||||
p.dec = (*Buffer).dec_new_map
|
||||
p.size = size_new_map
|
||||
|
||||
p.mtype = t1
|
||||
p.mkeyprop = &Properties{}
|
||||
p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
|
||||
|
@ -557,20 +287,6 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
|
|||
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
|
||||
}
|
||||
|
||||
// precalculate tag code
|
||||
wire := p.WireType
|
||||
if p.Packed {
|
||||
wire = WireBytes
|
||||
}
|
||||
x := uint32(p.Tag)<<3 | uint32(wire)
|
||||
i := 0
|
||||
for i = 0; x > 127; i++ {
|
||||
p.tagbuf[i] = 0x80 | uint8(x&0x7F)
|
||||
x >>= 7
|
||||
}
|
||||
p.tagbuf[i] = uint8(x)
|
||||
p.tagcode = p.tagbuf[0 : i+1]
|
||||
|
||||
if p.stype != nil {
|
||||
if lockGetProp {
|
||||
p.sprop = GetProperties(p.stype)
|
||||
|
@ -582,31 +298,8 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
|
|||
|
||||
var (
|
||||
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||
unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
|
||||
)
|
||||
|
||||
// isMarshaler reports whether type t implements Marshaler.
|
||||
func isMarshaler(t reflect.Type) bool {
|
||||
// We're checking for (likely) pointer-receiver methods
|
||||
// so if t is not a pointer, something is very wrong.
|
||||
// The calls above only invoke isMarshaler on pointer types.
|
||||
if t.Kind() != reflect.Ptr {
|
||||
panic("proto: misuse of isMarshaler")
|
||||
}
|
||||
return t.Implements(marshalerType)
|
||||
}
|
||||
|
||||
// isUnmarshaler reports whether type t implements Unmarshaler.
|
||||
func isUnmarshaler(t reflect.Type) bool {
|
||||
// We're checking for (likely) pointer-receiver methods
|
||||
// so if t is not a pointer, something is very wrong.
|
||||
// The calls above only invoke isUnmarshaler on pointer types.
|
||||
if t.Kind() != reflect.Ptr {
|
||||
panic("proto: misuse of isUnmarshaler")
|
||||
}
|
||||
return t.Implements(unmarshalerType)
|
||||
}
|
||||
|
||||
// Init populates the properties from a protocol buffer struct tag.
|
||||
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
|
||||
p.init(typ, name, tag, f, true)
|
||||
|
@ -616,14 +309,11 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF
|
|||
// "bytes,49,opt,def=hello!"
|
||||
p.Name = name
|
||||
p.OrigName = name
|
||||
if f != nil {
|
||||
p.field = toField(f)
|
||||
}
|
||||
if tag == "" {
|
||||
return
|
||||
}
|
||||
p.Parse(tag)
|
||||
p.setEncAndDec(typ, f, lockGetProp)
|
||||
p.setFieldProps(typ, f, lockGetProp)
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -673,8 +363,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
|||
propertiesMap[t] = prop
|
||||
|
||||
// build properties
|
||||
prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
|
||||
prop.unrecField = invalidField
|
||||
prop.Prop = make([]*Properties, t.NumField())
|
||||
prop.order = make([]int, t.NumField())
|
||||
|
||||
|
@ -684,15 +372,11 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
|||
name := f.Name
|
||||
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
|
||||
|
||||
if f.Name == "XXX_extensions" { // special case
|
||||
p.enc = (*Buffer).enc_map
|
||||
p.dec = nil // not needed
|
||||
p.size = size_map
|
||||
oneof := f.Tag.Get("protobuf_oneof") // special case
|
||||
if oneof != "" {
|
||||
// Oneof fields don't use the traditional protobuf tag.
|
||||
p.OrigName = oneof
|
||||
}
|
||||
if f.Name == "XXX_unrecognized" { // special case
|
||||
prop.unrecField = toField(&f)
|
||||
}
|
||||
oneof := f.Tag.Get("protobuf_oneof") != "" // special case
|
||||
prop.Prop[i] = p
|
||||
prop.order[i] = i
|
||||
if debug {
|
||||
|
@ -702,21 +386,17 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
|||
}
|
||||
print("\n")
|
||||
}
|
||||
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof {
|
||||
fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
|
||||
}
|
||||
}
|
||||
|
||||
// Re-order prop.order.
|
||||
sort.Sort(prop)
|
||||
|
||||
type oneofMessage interface {
|
||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), []interface{})
|
||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
||||
}
|
||||
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
|
||||
var oots []interface{}
|
||||
prop.oneofMarshaler, prop.oneofUnmarshaler, oots = om.XXX_OneofFuncs()
|
||||
prop.stype = t
|
||||
_, _, _, oots = om.XXX_OneofFuncs()
|
||||
|
||||
// Interpret oneof metadata.
|
||||
prop.OneofTypes = make(map[string]*OneofProperties)
|
||||
|
@ -766,30 +446,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
|||
return prop
|
||||
}
|
||||
|
||||
// Return the Properties object for the x[0]'th field of the structure.
|
||||
func propByIndex(t reflect.Type, x []int) *Properties {
|
||||
if len(x) != 1 {
|
||||
fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
|
||||
return nil
|
||||
}
|
||||
prop := GetProperties(t)
|
||||
return prop.Prop[x[0]]
|
||||
}
|
||||
|
||||
// Get the address and type of a pointer to a struct from an interface.
|
||||
func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
|
||||
if pb == nil {
|
||||
err = ErrNil
|
||||
return
|
||||
}
|
||||
// get the reflect type of the pointer to the struct.
|
||||
t = reflect.TypeOf(pb)
|
||||
// get the address of the struct.
|
||||
value := reflect.ValueOf(pb)
|
||||
b = toStructPointer(value)
|
||||
return
|
||||
}
|
||||
|
||||
// A global registry of enum types.
|
||||
// The generated code will register the generated maps by calling RegisterEnum.
|
||||
|
||||
|
@ -803,3 +459,86 @@ func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[
|
|||
}
|
||||
enumValueMaps[typeName] = valueMap
|
||||
}
|
||||
|
||||
// EnumValueMap returns the mapping from names to integers of the
|
||||
// enum type enumType, or a nil if not found.
|
||||
func EnumValueMap(enumType string) map[string]int32 {
|
||||
return enumValueMaps[enumType]
|
||||
}
|
||||
|
||||
// A registry of all linked message types.
|
||||
// The string is a fully-qualified proto name ("pkg.Message").
|
||||
var (
|
||||
protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
|
||||
protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
|
||||
revProtoTypes = make(map[reflect.Type]string)
|
||||
)
|
||||
|
||||
// RegisterType is called from generated code and maps from the fully qualified
|
||||
// proto name to the type (pointer to struct) of the protocol buffer.
|
||||
func RegisterType(x Message, name string) {
|
||||
if _, ok := protoTypedNils[name]; ok {
|
||||
// TODO: Some day, make this a panic.
|
||||
log.Printf("proto: duplicate proto type registered: %s", name)
|
||||
return
|
||||
}
|
||||
t := reflect.TypeOf(x)
|
||||
if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
|
||||
// Generated code always calls RegisterType with nil x.
|
||||
// This check is just for extra safety.
|
||||
protoTypedNils[name] = x
|
||||
} else {
|
||||
protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
|
||||
}
|
||||
revProtoTypes[t] = name
|
||||
}
|
||||
|
||||
// RegisterMapType is called from generated code and maps from the fully qualified
|
||||
// proto name to the native map type of the proto map definition.
|
||||
func RegisterMapType(x interface{}, name string) {
|
||||
if reflect.TypeOf(x).Kind() != reflect.Map {
|
||||
panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
|
||||
}
|
||||
if _, ok := protoMapTypes[name]; ok {
|
||||
log.Printf("proto: duplicate proto type registered: %s", name)
|
||||
return
|
||||
}
|
||||
t := reflect.TypeOf(x)
|
||||
protoMapTypes[name] = t
|
||||
revProtoTypes[t] = name
|
||||
}
|
||||
|
||||
// MessageName returns the fully-qualified proto name for the given message type.
|
||||
func MessageName(x Message) string {
|
||||
type xname interface {
|
||||
XXX_MessageName() string
|
||||
}
|
||||
if m, ok := x.(xname); ok {
|
||||
return m.XXX_MessageName()
|
||||
}
|
||||
return revProtoTypes[reflect.TypeOf(x)]
|
||||
}
|
||||
|
||||
// MessageType returns the message type (pointer to struct) for a named message.
|
||||
// The type is not guaranteed to implement proto.Message if the name refers to a
|
||||
// map entry.
|
||||
func MessageType(name string) reflect.Type {
|
||||
if t, ok := protoTypedNils[name]; ok {
|
||||
return reflect.TypeOf(t)
|
||||
}
|
||||
return protoMapTypes[name]
|
||||
}
|
||||
|
||||
// A registry of all linked proto files.
|
||||
var (
|
||||
protoFiles = make(map[string][]byte) // file name => fileDescriptor
|
||||
)
|
||||
|
||||
// RegisterFile is called from generated code and maps from the
|
||||
// full file name of a .proto file to its compressed FileDescriptorProto.
|
||||
func RegisterFile(filename string, fileDescriptor []byte) {
|
||||
protoFiles[filename] = fileDescriptor
|
||||
}
|
||||
|
||||
// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
|
||||
func FileDescriptor(filename string) []byte { return protoFiles[filename] }
|
||||
|
|
2681
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
Normal file
2681
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
654
vendor/github.com/golang/protobuf/proto/table_merge.go
generated
vendored
Normal file
654
vendor/github.com/golang/protobuf/proto/table_merge.go
generated
vendored
Normal file
|
@ -0,0 +1,654 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Merge merges the src message into dst.
|
||||
// This assumes that dst and src of the same type and are non-nil.
|
||||
func (a *InternalMessageInfo) Merge(dst, src Message) {
|
||||
mi := atomicLoadMergeInfo(&a.merge)
|
||||
if mi == nil {
|
||||
mi = getMergeInfo(reflect.TypeOf(dst).Elem())
|
||||
atomicStoreMergeInfo(&a.merge, mi)
|
||||
}
|
||||
mi.merge(toPointer(&dst), toPointer(&src))
|
||||
}
|
||||
|
||||
type mergeInfo struct {
|
||||
typ reflect.Type
|
||||
|
||||
initialized int32 // 0: only typ is valid, 1: everything is valid
|
||||
lock sync.Mutex
|
||||
|
||||
fields []mergeFieldInfo
|
||||
unrecognized field // Offset of XXX_unrecognized
|
||||
}
|
||||
|
||||
type mergeFieldInfo struct {
|
||||
field field // Offset of field, guaranteed to be valid
|
||||
|
||||
// isPointer reports whether the value in the field is a pointer.
|
||||
// This is true for the following situations:
|
||||
// * Pointer to struct
|
||||
// * Pointer to basic type (proto2 only)
|
||||
// * Slice (first value in slice header is a pointer)
|
||||
// * String (first value in string header is a pointer)
|
||||
isPointer bool
|
||||
|
||||
// basicWidth reports the width of the field assuming that it is directly
|
||||
// embedded in the struct (as is the case for basic types in proto3).
|
||||
// The possible values are:
|
||||
// 0: invalid
|
||||
// 1: bool
|
||||
// 4: int32, uint32, float32
|
||||
// 8: int64, uint64, float64
|
||||
basicWidth int
|
||||
|
||||
// Where dst and src are pointers to the types being merged.
|
||||
merge func(dst, src pointer)
|
||||
}
|
||||
|
||||
var (
|
||||
mergeInfoMap = map[reflect.Type]*mergeInfo{}
|
||||
mergeInfoLock sync.Mutex
|
||||
)
|
||||
|
||||
func getMergeInfo(t reflect.Type) *mergeInfo {
|
||||
mergeInfoLock.Lock()
|
||||
defer mergeInfoLock.Unlock()
|
||||
mi := mergeInfoMap[t]
|
||||
if mi == nil {
|
||||
mi = &mergeInfo{typ: t}
|
||||
mergeInfoMap[t] = mi
|
||||
}
|
||||
return mi
|
||||
}
|
||||
|
||||
// merge merges src into dst assuming they are both of type *mi.typ.
|
||||
func (mi *mergeInfo) merge(dst, src pointer) {
|
||||
if dst.isNil() {
|
||||
panic("proto: nil destination")
|
||||
}
|
||||
if src.isNil() {
|
||||
return // Nothing to do.
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&mi.initialized) == 0 {
|
||||
mi.computeMergeInfo()
|
||||
}
|
||||
|
||||
for _, fi := range mi.fields {
|
||||
sfp := src.offset(fi.field)
|
||||
|
||||
// As an optimization, we can avoid the merge function call cost
|
||||
// if we know for sure that the source will have no effect
|
||||
// by checking if it is the zero value.
|
||||
if unsafeAllowed {
|
||||
if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
|
||||
continue
|
||||
}
|
||||
if fi.basicWidth > 0 {
|
||||
switch {
|
||||
case fi.basicWidth == 1 && !*sfp.toBool():
|
||||
continue
|
||||
case fi.basicWidth == 4 && *sfp.toUint32() == 0:
|
||||
continue
|
||||
case fi.basicWidth == 8 && *sfp.toUint64() == 0:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dfp := dst.offset(fi.field)
|
||||
fi.merge(dfp, sfp)
|
||||
}
|
||||
|
||||
// TODO: Make this faster?
|
||||
out := dst.asPointerTo(mi.typ).Elem()
|
||||
in := src.asPointerTo(mi.typ).Elem()
|
||||
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
||||
emOut, _ := extendable(out.Addr().Interface())
|
||||
mIn, muIn := emIn.extensionsRead()
|
||||
if mIn != nil {
|
||||
mOut := emOut.extensionsWrite()
|
||||
muIn.Lock()
|
||||
mergeExtension(mOut, mIn)
|
||||
muIn.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
if mi.unrecognized.IsValid() {
|
||||
if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
|
||||
*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mi *mergeInfo) computeMergeInfo() {
|
||||
mi.lock.Lock()
|
||||
defer mi.lock.Unlock()
|
||||
if mi.initialized != 0 {
|
||||
return
|
||||
}
|
||||
t := mi.typ
|
||||
n := t.NumField()
|
||||
|
||||
props := GetProperties(t)
|
||||
for i := 0; i < n; i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
|
||||
mfi := mergeFieldInfo{field: toField(&f)}
|
||||
tf := f.Type
|
||||
|
||||
// As an optimization, we can avoid the merge function call cost
|
||||
// if we know for sure that the source will have no effect
|
||||
// by checking if it is the zero value.
|
||||
if unsafeAllowed {
|
||||
switch tf.Kind() {
|
||||
case reflect.Ptr, reflect.Slice, reflect.String:
|
||||
// As a special case, we assume slices and strings are pointers
|
||||
// since we know that the first field in the SliceSlice or
|
||||
// StringHeader is a data pointer.
|
||||
mfi.isPointer = true
|
||||
case reflect.Bool:
|
||||
mfi.basicWidth = 1
|
||||
case reflect.Int32, reflect.Uint32, reflect.Float32:
|
||||
mfi.basicWidth = 4
|
||||
case reflect.Int64, reflect.Uint64, reflect.Float64:
|
||||
mfi.basicWidth = 8
|
||||
}
|
||||
}
|
||||
|
||||
// Unwrap tf to get at its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic("both pointer and slice for basic type in " + tf.Name())
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Int32:
|
||||
switch {
|
||||
case isSlice: // E.g., []int32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
|
||||
/*
|
||||
sfsp := src.toInt32Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toInt32Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []int64{}
|
||||
}
|
||||
}
|
||||
*/
|
||||
sfs := src.getInt32Slice()
|
||||
if sfs != nil {
|
||||
dfs := dst.getInt32Slice()
|
||||
dfs = append(dfs, sfs...)
|
||||
if dfs == nil {
|
||||
dfs = []int32{}
|
||||
}
|
||||
dst.setInt32Slice(dfs)
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *int32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
|
||||
/*
|
||||
sfpp := src.toInt32Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toInt32Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Int32(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
*/
|
||||
sfp := src.getInt32Ptr()
|
||||
if sfp != nil {
|
||||
dfp := dst.getInt32Ptr()
|
||||
if dfp == nil {
|
||||
dst.setInt32Ptr(*sfp)
|
||||
} else {
|
||||
*dfp = *sfp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., int32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toInt32(); v != 0 {
|
||||
*dst.toInt32() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Int64:
|
||||
switch {
|
||||
case isSlice: // E.g., []int64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toInt64Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toInt64Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []int64{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *int64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toInt64Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toInt64Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Int64(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., int64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toInt64(); v != 0 {
|
||||
*dst.toInt64() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Uint32:
|
||||
switch {
|
||||
case isSlice: // E.g., []uint32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toUint32Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toUint32Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []uint32{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *uint32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toUint32Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toUint32Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Uint32(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., uint32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toUint32(); v != 0 {
|
||||
*dst.toUint32() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Uint64:
|
||||
switch {
|
||||
case isSlice: // E.g., []uint64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toUint64Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toUint64Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []uint64{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *uint64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toUint64Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toUint64Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Uint64(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., uint64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toUint64(); v != 0 {
|
||||
*dst.toUint64() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Float32:
|
||||
switch {
|
||||
case isSlice: // E.g., []float32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toFloat32Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toFloat32Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []float32{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *float32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toFloat32Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toFloat32Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Float32(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., float32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toFloat32(); v != 0 {
|
||||
*dst.toFloat32() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Float64:
|
||||
switch {
|
||||
case isSlice: // E.g., []float64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toFloat64Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toFloat64Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []float64{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *float64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toFloat64Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toFloat64Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Float64(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., float64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toFloat64(); v != 0 {
|
||||
*dst.toFloat64() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Bool:
|
||||
switch {
|
||||
case isSlice: // E.g., []bool
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toBoolSlice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toBoolSlice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []bool{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *bool
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toBoolPtr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toBoolPtr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Bool(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., bool
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toBool(); v {
|
||||
*dst.toBool() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.String:
|
||||
switch {
|
||||
case isSlice: // E.g., []string
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toStringSlice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toStringSlice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []string{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *string
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toStringPtr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toStringPtr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = String(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., string
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toString(); v != "" {
|
||||
*dst.toString() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
isProto3 := props.Prop[i].proto3
|
||||
switch {
|
||||
case isPointer:
|
||||
panic("bad pointer in byte slice case in " + tf.Name())
|
||||
case tf.Elem().Kind() != reflect.Uint8:
|
||||
panic("bad element kind in byte slice case in " + tf.Name())
|
||||
case isSlice: // E.g., [][]byte
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sbsp := src.toBytesSlice()
|
||||
if *sbsp != nil {
|
||||
dbsp := dst.toBytesSlice()
|
||||
for _, sb := range *sbsp {
|
||||
if sb == nil {
|
||||
*dbsp = append(*dbsp, nil)
|
||||
} else {
|
||||
*dbsp = append(*dbsp, append([]byte{}, sb...))
|
||||
}
|
||||
}
|
||||
if *dbsp == nil {
|
||||
*dbsp = [][]byte{}
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., []byte
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sbp := src.toBytes()
|
||||
if *sbp != nil {
|
||||
dbp := dst.toBytes()
|
||||
if !isProto3 || len(*sbp) > 0 {
|
||||
*dbp = append([]byte{}, *sbp...)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("message field %s without pointer", tf))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
mi := getMergeInfo(tf)
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sps := src.getPointerSlice()
|
||||
if sps != nil {
|
||||
dps := dst.getPointerSlice()
|
||||
for _, sp := range sps {
|
||||
var dp pointer
|
||||
if !sp.isNil() {
|
||||
dp = valToPointer(reflect.New(tf))
|
||||
mi.merge(dp, sp)
|
||||
}
|
||||
dps = append(dps, dp)
|
||||
}
|
||||
if dps == nil {
|
||||
dps = []pointer{}
|
||||
}
|
||||
dst.setPointerSlice(dps)
|
||||
}
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
mi := getMergeInfo(tf)
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sp := src.getPointer()
|
||||
if !sp.isNil() {
|
||||
dp := dst.getPointer()
|
||||
if dp.isNil() {
|
||||
dp = valToPointer(reflect.New(tf))
|
||||
dst.setPointer(dp)
|
||||
}
|
||||
mi.merge(dp, sp)
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic("bad pointer or slice in map case in " + tf.Name())
|
||||
default: // E.g., map[K]V
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sm := src.asPointerTo(tf).Elem()
|
||||
if sm.Len() == 0 {
|
||||
return
|
||||
}
|
||||
dm := dst.asPointerTo(tf).Elem()
|
||||
if dm.IsNil() {
|
||||
dm.Set(reflect.MakeMap(tf))
|
||||
}
|
||||
|
||||
switch tf.Elem().Kind() {
|
||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
val = reflect.ValueOf(Clone(val.Interface().(Message)))
|
||||
dm.SetMapIndex(key, val)
|
||||
}
|
||||
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
||||
dm.SetMapIndex(key, val)
|
||||
}
|
||||
default: // Basic type (e.g., string)
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
dm.SetMapIndex(key, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic("bad pointer or slice in interface case in " + tf.Name())
|
||||
default: // E.g., interface{}
|
||||
// TODO: Make this faster?
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
su := src.asPointerTo(tf).Elem()
|
||||
if !su.IsNil() {
|
||||
du := dst.asPointerTo(tf).Elem()
|
||||
typ := su.Elem().Type()
|
||||
if du.IsNil() || du.Elem().Type() != typ {
|
||||
du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
|
||||
}
|
||||
sv := su.Elem().Elem().Field(0)
|
||||
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
||||
return
|
||||
}
|
||||
dv := du.Elem().Elem().Field(0)
|
||||
if dv.Kind() == reflect.Ptr && dv.IsNil() {
|
||||
dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
|
||||
}
|
||||
switch sv.Type().Kind() {
|
||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||
Merge(dv.Interface().(Message), sv.Interface().(Message))
|
||||
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
|
||||
dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
|
||||
default: // Basic type (e.g., string)
|
||||
dv.Set(sv)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("merger not found for type:%s", tf))
|
||||
}
|
||||
mi.fields = append(mi.fields, mfi)
|
||||
}
|
||||
|
||||
mi.unrecognized = invalidField
|
||||
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
|
||||
if f.Type != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
mi.unrecognized = toField(&f)
|
||||
}
|
||||
|
||||
atomic.StoreInt32(&mi.initialized, 1)
|
||||
}
|
1967
vendor/github.com/golang/protobuf/proto/table_unmarshal.go
generated
vendored
Normal file
1967
vendor/github.com/golang/protobuf/proto/table_unmarshal.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
274
vendor/github.com/golang/protobuf/proto/text.go
generated
vendored
274
vendor/github.com/golang/protobuf/proto/text.go
generated
vendored
|
@ -50,7 +50,6 @@ import (
|
|||
var (
|
||||
newline = []byte("\n")
|
||||
spaces = []byte(" ")
|
||||
gtNewline = []byte(">\n")
|
||||
endBraceNewline = []byte("}\n")
|
||||
backslashN = []byte{'\\', 'n'}
|
||||
backslashR = []byte{'\\', 'r'}
|
||||
|
@ -154,7 +153,7 @@ func (w *textWriter) indent() { w.ind++ }
|
|||
|
||||
func (w *textWriter) unindent() {
|
||||
if w.ind == 0 {
|
||||
log.Printf("proto: textWriter unindented too far")
|
||||
log.Print("proto: textWriter unindented too far")
|
||||
return
|
||||
}
|
||||
w.ind--
|
||||
|
@ -170,20 +169,93 @@ func writeName(w *textWriter, props *Properties) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
messageSetType = reflect.TypeOf((*MessageSet)(nil)).Elem()
|
||||
)
|
||||
|
||||
// raw is the interface satisfied by RawMessage.
|
||||
type raw interface {
|
||||
Bytes() []byte
|
||||
func requiresQuotes(u string) bool {
|
||||
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
||||
for _, ch := range u {
|
||||
switch {
|
||||
case ch == '.' || ch == '/' || ch == '_':
|
||||
continue
|
||||
case '0' <= ch && ch <= '9':
|
||||
continue
|
||||
case 'A' <= ch && ch <= 'Z':
|
||||
continue
|
||||
case 'a' <= ch && ch <= 'z':
|
||||
continue
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
if sv.Type() == messageSetType {
|
||||
return writeMessageSet(w, sv.Addr().Interface().(*MessageSet))
|
||||
// isAny reports whether sv is a google.protobuf.Any message
|
||||
func isAny(sv reflect.Value) bool {
|
||||
type wkt interface {
|
||||
XXX_WellKnownType() string
|
||||
}
|
||||
t, ok := sv.Addr().Interface().(wkt)
|
||||
return ok && t.XXX_WellKnownType() == "Any"
|
||||
}
|
||||
|
||||
// writeProto3Any writes an expanded google.protobuf.Any message.
|
||||
//
|
||||
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
|
||||
// required messages are not linked in).
|
||||
//
|
||||
// It returns (true, error) when sv was written in expanded format or an error
|
||||
// was encountered.
|
||||
func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
|
||||
turl := sv.FieldByName("TypeUrl")
|
||||
val := sv.FieldByName("Value")
|
||||
if !turl.IsValid() || !val.IsValid() {
|
||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
||||
}
|
||||
|
||||
b, ok := val.Interface().([]byte)
|
||||
if !ok {
|
||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
||||
}
|
||||
|
||||
parts := strings.Split(turl.String(), "/")
|
||||
mt := MessageType(parts[len(parts)-1])
|
||||
if mt == nil {
|
||||
return false, nil
|
||||
}
|
||||
m := reflect.New(mt.Elem())
|
||||
if err := Unmarshal(b, m.Interface().(Message)); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
w.Write([]byte("["))
|
||||
u := turl.String()
|
||||
if requiresQuotes(u) {
|
||||
writeString(w, u)
|
||||
} else {
|
||||
w.Write([]byte(u))
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("]:<"))
|
||||
} else {
|
||||
w.Write([]byte("]: <\n"))
|
||||
w.ind++
|
||||
}
|
||||
if err := tm.writeStruct(w, m.Elem()); err != nil {
|
||||
return true, err
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("> "))
|
||||
} else {
|
||||
w.ind--
|
||||
w.Write([]byte(">\n"))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
if tm.ExpandAny && isAny(sv) {
|
||||
if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
|
||||
return err
|
||||
}
|
||||
}
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
for i := 0; i < sv.NumField(); i++ {
|
||||
|
@ -191,6 +263,10 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
|
|||
props := sprops.Prop[i]
|
||||
name := st.Field(i).Name
|
||||
|
||||
if name == "XXX_NoUnkeyedLiteral" {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(name, "XXX_") {
|
||||
// There are two XXX_ fields:
|
||||
// XXX_unrecognized []byte
|
||||
|
@ -235,7 +311,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
|
|||
}
|
||||
continue
|
||||
}
|
||||
if err := writeAny(w, v, props); err != nil {
|
||||
if err := tm.writeAny(w, v, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
|
@ -277,7 +353,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
if err := writeAny(w, key, props.mkeyprop); err != nil {
|
||||
if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
|
@ -294,7 +370,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
if err := writeAny(w, val, props.mvalprop); err != nil {
|
||||
if err := tm.writeAny(w, val, props.mvalprop); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
|
@ -358,15 +434,9 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
if b, ok := fv.Interface().(raw); ok {
|
||||
if err := writeRaw(w, b.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Enums have a String method, so writeAny will work fine.
|
||||
if err := writeAny(w, fv, props); err != nil {
|
||||
if err := tm.writeAny(w, fv, props); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -377,8 +447,8 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
|
|||
|
||||
// Extensions (the XXX_extensions field).
|
||||
pv := sv.Addr()
|
||||
if pv.Type().Implements(extendableProtoType) {
|
||||
if err := writeExtensions(w, pv); err != nil {
|
||||
if _, err := extendable(pv.Interface()); err == nil {
|
||||
if err := tm.writeExtensions(w, pv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -386,29 +456,8 @@ func writeStruct(w *textWriter, sv reflect.Value) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// writeRaw writes an uninterpreted raw message.
|
||||
func writeRaw(w *textWriter, b []byte) error {
|
||||
if err := w.WriteByte('<'); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.indent()
|
||||
if err := writeUnknownStruct(w, b); err != nil {
|
||||
return err
|
||||
}
|
||||
w.unindent()
|
||||
if err := w.WriteByte('>'); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeAny writes an arbitrary field.
|
||||
func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
||||
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
||||
v = reflect.Indirect(v)
|
||||
|
||||
// Floats have special cases.
|
||||
|
@ -435,7 +484,7 @@ func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
|||
switch v.Kind() {
|
||||
case reflect.Slice:
|
||||
// Should only be a []byte; repeated fields are handled in writeStruct.
|
||||
if err := writeString(w, string(v.Interface().([]byte))); err != nil {
|
||||
if err := writeString(w, string(v.Bytes())); err != nil {
|
||||
return err
|
||||
}
|
||||
case reflect.String:
|
||||
|
@ -457,17 +506,35 @@ func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
|||
}
|
||||
}
|
||||
w.indent()
|
||||
if tm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
||||
text, err := tm.MarshalText()
|
||||
if v.CanAddr() {
|
||||
// Calling v.Interface on a struct causes the reflect package to
|
||||
// copy the entire struct. This is racy with the new Marshaler
|
||||
// since we atomically update the XXX_sizecache.
|
||||
//
|
||||
// Thus, we retrieve a pointer to the struct if possible to avoid
|
||||
// a race since v.Interface on the pointer doesn't copy the struct.
|
||||
//
|
||||
// If v is not addressable, then we are not worried about a race
|
||||
// since it implies that the binary Marshaler cannot possibly be
|
||||
// mutating this value.
|
||||
v = v.Addr()
|
||||
}
|
||||
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
||||
text, err := etm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = w.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err := writeStruct(w, v); err != nil {
|
||||
} else {
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
if err := tm.writeStruct(w, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.unindent()
|
||||
if err := w.WriteByte(ket); err != nil {
|
||||
return err
|
||||
|
@ -525,44 +592,6 @@ func writeString(w *textWriter, s string) error {
|
|||
return w.WriteByte('"')
|
||||
}
|
||||
|
||||
func writeMessageSet(w *textWriter, ms *MessageSet) error {
|
||||
for _, item := range ms.Item {
|
||||
id := *item.TypeId
|
||||
if msd, ok := messageSetMap[id]; ok {
|
||||
// Known message set type.
|
||||
if _, err := fmt.Fprintf(w, "[%s]: <\n", msd.name); err != nil {
|
||||
return err
|
||||
}
|
||||
w.indent()
|
||||
|
||||
pb := reflect.New(msd.t.Elem())
|
||||
if err := Unmarshal(item.Message, pb.Interface().(Message)); err != nil {
|
||||
if _, err := fmt.Fprintf(w, "/* bad message: %v */\n", err); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := writeStruct(w, pb.Elem()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Unknown type.
|
||||
if _, err := fmt.Fprintf(w, "[%d]: <\n", id); err != nil {
|
||||
return err
|
||||
}
|
||||
w.indent()
|
||||
if err := writeUnknownStruct(w, item.Message); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.unindent()
|
||||
if _, err := w.Write(gtNewline); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeUnknownStruct(w *textWriter, data []byte) (err error) {
|
||||
if !w.compact {
|
||||
if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
|
||||
|
@ -647,19 +676,24 @@ func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|||
|
||||
// writeExtensions writes all the extensions in pv.
|
||||
// pv is assumed to be a pointer to a protocol message struct that is extendable.
|
||||
func writeExtensions(w *textWriter, pv reflect.Value) error {
|
||||
func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
|
||||
emap := extensionMaps[pv.Type().Elem()]
|
||||
ep := pv.Interface().(extendableProto)
|
||||
ep, _ := extendable(pv.Interface())
|
||||
|
||||
// Order the extensions by ID.
|
||||
// This isn't strictly necessary, but it will give us
|
||||
// canonical output, which will also make testing easier.
|
||||
m := ep.ExtensionMap()
|
||||
m, mu := ep.extensionsRead()
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
mu.Lock()
|
||||
ids := make([]int32, 0, len(m))
|
||||
for id := range m {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
sort.Sort(int32Slice(ids))
|
||||
mu.Unlock()
|
||||
|
||||
for _, extNum := range ids {
|
||||
ext := m[extNum]
|
||||
|
@ -682,13 +716,13 @@ func writeExtensions(w *textWriter, pv reflect.Value) error {
|
|||
|
||||
// Repeated extensions will appear as a slice.
|
||||
if !desc.repeated() {
|
||||
if err := writeExtension(w, desc.Name, pb); err != nil {
|
||||
if err := tm.writeExtension(w, desc.Name, pb); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
v := reflect.ValueOf(pb)
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
|
||||
if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -697,7 +731,7 @@ func writeExtensions(w *textWriter, pv reflect.Value) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func writeExtension(w *textWriter, name string, pb interface{}) error {
|
||||
func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
|
||||
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -706,7 +740,7 @@ func writeExtension(w *textWriter, name string, pb interface{}) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {
|
||||
if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
|
@ -731,7 +765,15 @@ func (w *textWriter) writeIndent() {
|
|||
w.complete = false
|
||||
}
|
||||
|
||||
func marshalText(w io.Writer, pb Message, compact bool) error {
|
||||
// TextMarshaler is a configurable text format marshaler.
|
||||
type TextMarshaler struct {
|
||||
Compact bool // use compact text format (one line).
|
||||
ExpandAny bool // expand google.protobuf.Any messages of known types
|
||||
}
|
||||
|
||||
// Marshal writes a given protocol buffer in text format.
|
||||
// The only errors returned are from w.
|
||||
func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
||||
val := reflect.ValueOf(pb)
|
||||
if pb == nil || val.IsNil() {
|
||||
w.Write([]byte("<nil>"))
|
||||
|
@ -746,11 +788,11 @@ func marshalText(w io.Writer, pb Message, compact bool) error {
|
|||
aw := &textWriter{
|
||||
w: ww,
|
||||
complete: true,
|
||||
compact: compact,
|
||||
compact: tm.Compact,
|
||||
}
|
||||
|
||||
if tm, ok := pb.(encoding.TextMarshaler); ok {
|
||||
text, err := tm.MarshalText()
|
||||
if etm, ok := pb.(encoding.TextMarshaler); ok {
|
||||
text, err := etm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -764,7 +806,7 @@ func marshalText(w io.Writer, pb Message, compact bool) error {
|
|||
}
|
||||
// Dereference the received pointer so we don't have outer < and >.
|
||||
v := reflect.Indirect(val)
|
||||
if err := writeStruct(aw, v); err != nil {
|
||||
if err := tm.writeStruct(aw, v); err != nil {
|
||||
return err
|
||||
}
|
||||
if bw != nil {
|
||||
|
@ -773,25 +815,29 @@ func marshalText(w io.Writer, pb Message, compact bool) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Text is the same as Marshal, but returns the string directly.
|
||||
func (tm *TextMarshaler) Text(pb Message) string {
|
||||
var buf bytes.Buffer
|
||||
tm.Marshal(&buf, pb)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
var (
|
||||
defaultTextMarshaler = TextMarshaler{}
|
||||
compactTextMarshaler = TextMarshaler{Compact: true}
|
||||
)
|
||||
|
||||
// TODO: consider removing some of the Marshal functions below.
|
||||
|
||||
// MarshalText writes a given protocol buffer in text format.
|
||||
// The only errors returned are from w.
|
||||
func MarshalText(w io.Writer, pb Message) error {
|
||||
return marshalText(w, pb, false)
|
||||
}
|
||||
func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
|
||||
|
||||
// MarshalTextString is the same as MarshalText, but returns the string directly.
|
||||
func MarshalTextString(pb Message) string {
|
||||
var buf bytes.Buffer
|
||||
marshalText(&buf, pb, false)
|
||||
return buf.String()
|
||||
}
|
||||
func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
|
||||
|
||||
// CompactText writes a given protocol buffer in compact text format (one line).
|
||||
func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) }
|
||||
func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
|
||||
|
||||
// CompactTextString is the same as CompactText, but returns the string directly.
|
||||
func CompactTextString(pb Message) string {
|
||||
var buf bytes.Buffer
|
||||
marshalText(&buf, pb, true)
|
||||
return buf.String()
|
||||
}
|
||||
func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
|
||||
|
|
266
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
266
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
|
@ -44,6 +44,9 @@ import (
|
|||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Error string emitted when deserializing Any and fields are already set
|
||||
const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
|
||||
|
||||
type ParseError struct {
|
||||
Message string
|
||||
Line int // 1-based line number
|
||||
|
@ -119,6 +122,14 @@ func isWhitespace(c byte) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func isQuote(c byte) bool {
|
||||
switch c {
|
||||
case '"', '\'':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *textParser) skipWhitespace() {
|
||||
i := 0
|
||||
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
|
||||
|
@ -155,7 +166,7 @@ func (p *textParser) advance() {
|
|||
p.cur.offset, p.cur.line = p.offset, p.line
|
||||
p.cur.unquoted = ""
|
||||
switch p.s[0] {
|
||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',':
|
||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
|
||||
// Single symbol
|
||||
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
||||
case '"', '\'':
|
||||
|
@ -195,7 +206,6 @@ func (p *textParser) advance() {
|
|||
|
||||
var (
|
||||
errBadUTF8 = errors.New("proto: bad UTF-8")
|
||||
errBadHex = errors.New("proto: bad hexadecimal")
|
||||
)
|
||||
|
||||
func unquoteC(s string, quote rune) (string, error) {
|
||||
|
@ -266,60 +276,47 @@ func unescape(s string) (ch string, tail string, err error) {
|
|||
return "?", s, nil // trigraph workaround
|
||||
case '\'', '"', '\\':
|
||||
return string(r), s, nil
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
if len(s) < 2 {
|
||||
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
||||
}
|
||||
base := 8
|
||||
ss := s[:2]
|
||||
ss := string(r) + s[:2]
|
||||
s = s[2:]
|
||||
if r == 'x' || r == 'X' {
|
||||
base = 16
|
||||
} else {
|
||||
ss = string(r) + ss
|
||||
}
|
||||
i, err := strconv.ParseUint(ss, base, 8)
|
||||
i, err := strconv.ParseUint(ss, 8, 8)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
|
||||
}
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
case 'u', 'U':
|
||||
n := 4
|
||||
if r == 'U' {
|
||||
case 'x', 'X', 'u', 'U':
|
||||
var n int
|
||||
switch r {
|
||||
case 'x', 'X':
|
||||
n = 2
|
||||
case 'u':
|
||||
n = 4
|
||||
case 'U':
|
||||
n = 8
|
||||
}
|
||||
if len(s) < n {
|
||||
return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
|
||||
}
|
||||
|
||||
bs := make([]byte, n/2)
|
||||
for i := 0; i < n; i += 2 {
|
||||
a, ok1 := unhex(s[i])
|
||||
b, ok2 := unhex(s[i+1])
|
||||
if !ok1 || !ok2 {
|
||||
return "", "", errBadHex
|
||||
}
|
||||
bs[i/2] = a<<4 | b
|
||||
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
|
||||
}
|
||||
ss := s[:n]
|
||||
s = s[n:]
|
||||
return string(bs), s, nil
|
||||
i, err := strconv.ParseUint(ss, 16, 64)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
|
||||
}
|
||||
if r == 'x' || r == 'X' {
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
}
|
||||
if i > utf8.MaxRune {
|
||||
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
|
||||
}
|
||||
return string(i), s, nil
|
||||
}
|
||||
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
||||
}
|
||||
|
||||
// Adapted from src/pkg/strconv/quote.go.
|
||||
func unhex(b byte) (v byte, ok bool) {
|
||||
switch {
|
||||
case '0' <= b && b <= '9':
|
||||
return b - '0', true
|
||||
case 'a' <= b && b <= 'f':
|
||||
return b - 'a' + 10, true
|
||||
case 'A' <= b && b <= 'F':
|
||||
return b - 'A' + 10, true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// Back off the parser by one token. Can only be done between calls to next().
|
||||
// It makes the next advance() a no-op.
|
||||
func (p *textParser) back() { p.backed = true }
|
||||
|
@ -333,13 +330,13 @@ func (p *textParser) next() *token {
|
|||
p.advance()
|
||||
if p.done {
|
||||
p.cur.value = ""
|
||||
} else if len(p.cur.value) > 0 && p.cur.value[0] == '"' {
|
||||
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
|
||||
// Look for multiple quoted strings separated by whitespace,
|
||||
// and concatenate them.
|
||||
cat := p.cur
|
||||
for {
|
||||
p.skipWhitespace()
|
||||
if p.done || p.s[0] != '"' {
|
||||
if p.done || !isQuote(p.s[0]) {
|
||||
break
|
||||
}
|
||||
p.advance()
|
||||
|
@ -443,7 +440,10 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||
fieldSet := make(map[string]bool)
|
||||
// A struct is a sequence of "name: value", terminated by one of
|
||||
// '>' or '}', or the end of the input. A name may also be
|
||||
// "[extension]".
|
||||
// "[extension]" or "[type/url]".
|
||||
//
|
||||
// The whole struct can also be an expanded Any message, like:
|
||||
// [type/url] < ... struct contents ... >
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
|
@ -453,33 +453,74 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||
break
|
||||
}
|
||||
if tok.value == "[" {
|
||||
// Looks like an extension.
|
||||
// Looks like an extension or an Any.
|
||||
//
|
||||
// TODO: Check whether we need to handle
|
||||
// namespace rooted names (e.g. ".something.Foo").
|
||||
extName, err := p.consumeExtName()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s := strings.LastIndex(extName, "/"); s >= 0 {
|
||||
// If it contains a slash, it's an Any type URL.
|
||||
messageName := extName[s+1:]
|
||||
mt := MessageType(messageName)
|
||||
if mt == nil {
|
||||
return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
|
||||
}
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
// consume an optional colon
|
||||
if tok.value == ":" {
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
}
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
v := reflect.New(mt.Elem())
|
||||
if pe := p.readStruct(v.Elem(), terminator); pe != nil {
|
||||
return pe
|
||||
}
|
||||
b, err := Marshal(v.Interface().(Message))
|
||||
if err != nil {
|
||||
return p.errorf("failed to marshal message of type %q: %v", messageName, err)
|
||||
}
|
||||
if fieldSet["type_url"] {
|
||||
return p.errorf(anyRepeatedlyUnpacked, "type_url")
|
||||
}
|
||||
if fieldSet["value"] {
|
||||
return p.errorf(anyRepeatedlyUnpacked, "value")
|
||||
}
|
||||
sv.FieldByName("TypeUrl").SetString(extName)
|
||||
sv.FieldByName("Value").SetBytes(b)
|
||||
fieldSet["type_url"] = true
|
||||
fieldSet["value"] = true
|
||||
continue
|
||||
}
|
||||
|
||||
var desc *ExtensionDesc
|
||||
// This could be faster, but it's functional.
|
||||
// TODO: Do something smarter than a linear scan.
|
||||
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
|
||||
if d.Name == tok.value {
|
||||
if d.Name == extName {
|
||||
desc = d
|
||||
break
|
||||
}
|
||||
}
|
||||
if desc == nil {
|
||||
return p.errorf("unrecognized extension %q", tok.value)
|
||||
}
|
||||
// Check the extension terminator.
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != "]" {
|
||||
return p.errorf("unrecognized extension terminator %q", tok.value)
|
||||
return p.errorf("unrecognized extension %q", extName)
|
||||
}
|
||||
|
||||
props := &Properties{}
|
||||
|
@ -506,7 +547,7 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||
}
|
||||
reqFieldErr = err
|
||||
}
|
||||
ep := sv.Addr().Interface().(extendableProto)
|
||||
ep := sv.Addr().Interface().(Message)
|
||||
if !rep {
|
||||
SetExtension(ep, desc, ext.Interface())
|
||||
} else {
|
||||
|
@ -537,7 +578,11 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||
props = oop.Prop
|
||||
nv := reflect.New(oop.Type.Elem())
|
||||
dst = nv.Elem().Field(0)
|
||||
sv.Field(oop.Field).Set(nv)
|
||||
field := sv.Field(oop.Field)
|
||||
if !field.IsNil() {
|
||||
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
|
||||
}
|
||||
field.Set(nv)
|
||||
}
|
||||
if !dst.IsValid() {
|
||||
return p.errorf("unknown field name %q in %v", name, st)
|
||||
|
@ -558,8 +603,9 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||
|
||||
// The map entry should be this sequence of tokens:
|
||||
// < key : KEY value : VALUE >
|
||||
// Technically the "key" and "value" could come in any order,
|
||||
// but in practice they won't.
|
||||
// However, implementations may omit key or value, and technically
|
||||
// we should support them in any order. See b/28924776 for a time
|
||||
// this went wrong.
|
||||
|
||||
tok := p.next()
|
||||
var terminator string
|
||||
|
@ -571,9 +617,16 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
if err := p.consumeToken("key"); err != nil {
|
||||
return err
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
switch tok.value {
|
||||
case "key":
|
||||
if err := p.consumeToken(":"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -583,9 +636,7 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeToken("value"); err != nil {
|
||||
return err
|
||||
}
|
||||
case "value":
|
||||
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -595,8 +646,10 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeToken(terminator); err != nil {
|
||||
return err
|
||||
default:
|
||||
p.back()
|
||||
return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
|
||||
}
|
||||
}
|
||||
|
||||
dst.SetMapIndex(key, val)
|
||||
|
@ -619,7 +672,8 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||
return err
|
||||
}
|
||||
reqFieldErr = err
|
||||
} else if props.Required {
|
||||
}
|
||||
if props.Required {
|
||||
reqCount--
|
||||
}
|
||||
|
||||
|
@ -635,6 +689,38 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||
return reqFieldErr
|
||||
}
|
||||
|
||||
// consumeExtName consumes extension name or expanded Any type URL and the
|
||||
// following ']'. It returns the name or URL consumed.
|
||||
func (p *textParser) consumeExtName() (string, error) {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return "", tok.err
|
||||
}
|
||||
|
||||
// If extension name or type url is quoted, it's a single token.
|
||||
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
|
||||
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return name, p.consumeToken("]")
|
||||
}
|
||||
|
||||
// Consume everything up to "]"
|
||||
var parts []string
|
||||
for tok.value != "]" {
|
||||
parts = append(parts, tok.value)
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
||||
}
|
||||
if p.done && tok.value != "]" {
|
||||
return "", p.errorf("unclosed type_url or extension name")
|
||||
}
|
||||
}
|
||||
return strings.Join(parts, ""), nil
|
||||
}
|
||||
|
||||
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
||||
// It is used in readStruct to provide backward compatibility.
|
||||
func (p *textParser) consumeOptionalSeparator() error {
|
||||
|
@ -672,25 +758,39 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
|||
fv.Set(reflect.ValueOf(bytes))
|
||||
return nil
|
||||
}
|
||||
// Repeated field. May already exist.
|
||||
flen := fv.Len()
|
||||
if flen == fv.Cap() {
|
||||
nav := reflect.MakeSlice(at, flen, 2*flen+1)
|
||||
reflect.Copy(nav, fv)
|
||||
fv.Set(nav)
|
||||
// Repeated field.
|
||||
if tok.value == "[" {
|
||||
// Repeated field with list notation, like [1,2,3].
|
||||
for {
|
||||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
||||
err := p.readAny(fv.Index(fv.Len()-1), props)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fv.SetLen(flen + 1)
|
||||
|
||||
// Read one.
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == "]" {
|
||||
break
|
||||
}
|
||||
if tok.value != "," {
|
||||
return p.errorf("Expected ']' or ',' found %q", tok.value)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// One value of the repeated field.
|
||||
p.back()
|
||||
return p.readAny(fv.Index(flen), props)
|
||||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
||||
return p.readAny(fv.Index(fv.Len()-1), props)
|
||||
case reflect.Bool:
|
||||
// Either "true", "false", 1 or 0.
|
||||
// true/1/t/True or false/f/0/False.
|
||||
switch tok.value {
|
||||
case "true", "1":
|
||||
case "true", "1", "t", "True":
|
||||
fv.SetBool(true)
|
||||
return nil
|
||||
case "false", "0":
|
||||
case "false", "0", "f", "False":
|
||||
fv.SetBool(false)
|
||||
return nil
|
||||
}
|
||||
|
@ -772,13 +872,9 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
|||
// UnmarshalText returns *RequiredNotSetError.
|
||||
func UnmarshalText(s string, pb Message) error {
|
||||
if um, ok := pb.(encoding.TextUnmarshaler); ok {
|
||||
err := um.UnmarshalText([]byte(s))
|
||||
return err
|
||||
return um.UnmarshalText([]byte(s))
|
||||
}
|
||||
pb.Reset()
|
||||
v := reflect.ValueOf(pb)
|
||||
if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
|
||||
return pe
|
||||
}
|
||||
return nil
|
||||
return newTextParser(s).readStruct(v.Elem(), "")
|
||||
}
|
||||
|
|
21
vendor/github.com/gorilla/context/.travis.yml
generated
vendored
21
vendor/github.com/gorilla/context/.travis.yml
generated
vendored
|
@ -1,8 +1,19 @@
|
|||
language: go
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- tip
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.3
|
||||
- go: 1.4
|
||||
- go: 1.5
|
||||
- go: 1.6
|
||||
- go: 1.7
|
||||
- go: tip
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
script:
|
||||
- go get -t -v ./...
|
||||
- diff -u <(echo -n) <(gofmt -d .)
|
||||
- go vet $(go list ./... | grep -v /vendor/)
|
||||
- go test -v -race ./...
|
||||
|
|
3
vendor/github.com/gorilla/context/README.md
generated
vendored
3
vendor/github.com/gorilla/context/README.md
generated
vendored
|
@ -4,4 +4,7 @@ context
|
|||
|
||||
gorilla/context is a general purpose registry for global request variables.
|
||||
|
||||
> Note: gorilla/context, having been born well before `context.Context` existed, does not play well
|
||||
> with the shallow copying of the request that [`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) (added to net/http Go 1.7 onwards) performs. You should either use *just* gorilla/context, or moving forward, the new `http.Request.Context()`.
|
||||
|
||||
Read the full documentation here: http://www.gorillatoolkit.org/pkg/context
|
||||
|
|
6
vendor/github.com/gorilla/context/doc.go
generated
vendored
6
vendor/github.com/gorilla/context/doc.go
generated
vendored
|
@ -5,6 +5,12 @@
|
|||
/*
|
||||
Package context stores values shared during a request lifetime.
|
||||
|
||||
Note: gorilla/context, having been born well before `context.Context` existed,
|
||||
does not play well > with the shallow copying of the request that
|
||||
[`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext)
|
||||
(added to net/http Go 1.7 onwards) performs. You should either use *just*
|
||||
gorilla/context, or moving forward, the new `http.Request.Context()`.
|
||||
|
||||
For example, a router can set variables extracted from the URL and later
|
||||
application handlers can access those values, or it can be used to store
|
||||
sessions values to be saved at the end of a request. There are several
|
||||
|
|
25
vendor/github.com/gorilla/mux/.travis.yml
generated
vendored
25
vendor/github.com/gorilla/mux/.travis.yml
generated
vendored
|
@ -1,8 +1,23 @@
|
|||
language: go
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- tip
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.5.x
|
||||
- go: 1.6.x
|
||||
- go: 1.7.x
|
||||
- go: 1.8.x
|
||||
- go: 1.9.x
|
||||
- go: 1.10.x
|
||||
- go: tip
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
install:
|
||||
- # Skip
|
||||
|
||||
script:
|
||||
- go get -t -v ./...
|
||||
- diff -u <(echo -n) <(gofmt -d .)
|
||||
- go tool vet .
|
||||
- go test -v -race ./...
|
||||
|
|
11
vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md
generated
vendored
Normal file
11
vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
**What version of Go are you running?** (Paste the output of `go version`)
|
||||
|
||||
|
||||
**What version of gorilla/mux are you at?** (Paste the output of `git rev-parse HEAD` inside `$GOPATH/src/github.com/gorilla/mux`)
|
||||
|
||||
|
||||
**Describe your problem** (and what you have tried so far)
|
||||
|
||||
|
||||
**Paste a minimal, runnable, reproduction of your issue below** (use backticks to format it)
|
||||
|
654
vendor/github.com/gorilla/mux/README.md
generated
vendored
654
vendor/github.com/gorilla/mux/README.md
generated
vendored
|
@ -1,218 +1,632 @@
|
|||
mux
|
||||
===
|
||||
# gorilla/mux
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
|
||||
[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux)
|
||||
[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux)
|
||||
[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge)
|
||||
|
||||
Package gorilla/mux implements a request router and dispatcher.
|
||||
![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png)
|
||||
|
||||
The name mux stands for "HTTP request multiplexer". Like the standard
|
||||
http.ServeMux, mux.Router matches incoming requests against a list of
|
||||
registered routes and calls a handler for the route that matches the URL
|
||||
or other conditions. The main features are:
|
||||
http://www.gorillatoolkit.org/pkg/mux
|
||||
|
||||
* Requests can be matched based on URL host, path, path prefix, schemes,
|
||||
header and query values, HTTP methods or using custom matchers.
|
||||
* URL hosts and paths can have variables with an optional regular
|
||||
expression.
|
||||
* Registered URLs can be built, or "reversed", which helps maintaining
|
||||
references to resources.
|
||||
* Routes can be used as subrouters: nested routes are only tested if the
|
||||
parent route matches. This is useful to define groups of routes that
|
||||
share common conditions like a host, a path prefix or other repeated
|
||||
attributes. As a bonus, this optimizes request matching.
|
||||
* It implements the http.Handler interface so it is compatible with the
|
||||
standard http.ServeMux.
|
||||
Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to
|
||||
their respective handler.
|
||||
|
||||
The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are:
|
||||
|
||||
* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`.
|
||||
* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers.
|
||||
* URL hosts, paths and query values can have variables with an optional regular expression.
|
||||
* Registered URLs can be built, or "reversed", which helps maintaining references to resources.
|
||||
* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching.
|
||||
|
||||
---
|
||||
|
||||
* [Install](#install)
|
||||
* [Examples](#examples)
|
||||
* [Matching Routes](#matching-routes)
|
||||
* [Static Files](#static-files)
|
||||
* [Registered URLs](#registered-urls)
|
||||
* [Walking Routes](#walking-routes)
|
||||
* [Graceful Shutdown](#graceful-shutdown)
|
||||
* [Middleware](#middleware)
|
||||
* [Testing Handlers](#testing-handlers)
|
||||
* [Full Example](#full-example)
|
||||
|
||||
---
|
||||
|
||||
## Install
|
||||
|
||||
With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain:
|
||||
|
||||
```sh
|
||||
go get -u github.com/gorilla/mux
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
Let's start registering a couple of URL paths and handlers:
|
||||
|
||||
func main() {
|
||||
```go
|
||||
func main() {
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/", HomeHandler)
|
||||
r.HandleFunc("/products", ProductsHandler)
|
||||
r.HandleFunc("/articles", ArticlesHandler)
|
||||
http.Handle("/", r)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Here we register three routes mapping URL paths to handlers. This is
|
||||
equivalent to how http.HandleFunc() works: if an incoming request URL matches
|
||||
one of the paths, the corresponding handler is called passing
|
||||
(http.ResponseWriter, *http.Request) as parameters.
|
||||
Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters.
|
||||
|
||||
Paths can have variables. They are defined using the format {name} or
|
||||
{name:pattern}. If a regular expression pattern is not defined, the matched
|
||||
variable will be anything until the next slash. For example:
|
||||
Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example:
|
||||
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/products/{key}", ProductHandler)
|
||||
r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
|
||||
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
|
||||
```go
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/products/{key}", ProductHandler)
|
||||
r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
|
||||
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
|
||||
```
|
||||
|
||||
The names are used to create a map of route variables which can be retrieved
|
||||
calling mux.Vars():
|
||||
The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`:
|
||||
|
||||
vars := mux.Vars(request)
|
||||
category := vars["category"]
|
||||
```go
|
||||
func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintf(w, "Category: %v\n", vars["category"])
|
||||
}
|
||||
```
|
||||
|
||||
And this is all you need to know about the basic usage. More advanced options
|
||||
are explained below.
|
||||
And this is all you need to know about the basic usage. More advanced options are explained below.
|
||||
|
||||
Routes can also be restricted to a domain or subdomain. Just define a host
|
||||
pattern to be matched. They can also have variables:
|
||||
### Matching Routes
|
||||
|
||||
r := mux.NewRouter()
|
||||
// Only matches if domain is "www.example.com".
|
||||
r.Host("www.example.com")
|
||||
// Matches a dynamic subdomain.
|
||||
r.Host("{subdomain:[a-z]+}.domain.com")
|
||||
Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables:
|
||||
|
||||
```go
|
||||
r := mux.NewRouter()
|
||||
// Only matches if domain is "www.example.com".
|
||||
r.Host("www.example.com")
|
||||
// Matches a dynamic subdomain.
|
||||
r.Host("{subdomain:[a-z]+}.domain.com")
|
||||
```
|
||||
|
||||
There are several other matchers that can be added. To match path prefixes:
|
||||
|
||||
r.PathPrefix("/products/")
|
||||
```go
|
||||
r.PathPrefix("/products/")
|
||||
```
|
||||
|
||||
...or HTTP methods:
|
||||
|
||||
r.Methods("GET", "POST")
|
||||
```go
|
||||
r.Methods("GET", "POST")
|
||||
```
|
||||
|
||||
...or URL schemes:
|
||||
|
||||
r.Schemes("https")
|
||||
```go
|
||||
r.Schemes("https")
|
||||
```
|
||||
|
||||
...or header values:
|
||||
|
||||
r.Headers("X-Requested-With", "XMLHttpRequest")
|
||||
```go
|
||||
r.Headers("X-Requested-With", "XMLHttpRequest")
|
||||
```
|
||||
|
||||
...or query values:
|
||||
|
||||
r.Queries("key", "value")
|
||||
```go
|
||||
r.Queries("key", "value")
|
||||
```
|
||||
|
||||
...or to use a custom matcher function:
|
||||
|
||||
r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
|
||||
```go
|
||||
r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
|
||||
return r.ProtoMajor == 0
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
...and finally, it is possible to combine several matchers in a single route:
|
||||
|
||||
r.HandleFunc("/products", ProductsHandler).
|
||||
```go
|
||||
r.HandleFunc("/products", ProductsHandler).
|
||||
Host("www.example.com").
|
||||
Methods("GET").
|
||||
Schemes("http")
|
||||
```
|
||||
|
||||
Setting the same matching conditions again and again can be boring, so we have
|
||||
a way to group several routes that share the same requirements.
|
||||
We call it "subrouting".
|
||||
Routes are tested in the order they were added to the router. If two routes match, the first one wins:
|
||||
|
||||
For example, let's say we have several URLs that should only match when the
|
||||
host is `www.example.com`. Create a route for that host and get a "subrouter"
|
||||
from it:
|
||||
```go
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/specific", specificHandler)
|
||||
r.PathPrefix("/").Handler(catchAllHandler)
|
||||
```
|
||||
|
||||
r := mux.NewRouter()
|
||||
s := r.Host("www.example.com").Subrouter()
|
||||
Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting".
|
||||
|
||||
For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it:
|
||||
|
||||
```go
|
||||
r := mux.NewRouter()
|
||||
s := r.Host("www.example.com").Subrouter()
|
||||
```
|
||||
|
||||
Then register routes in the subrouter:
|
||||
|
||||
s.HandleFunc("/products/", ProductsHandler)
|
||||
s.HandleFunc("/products/{key}", ProductHandler)
|
||||
s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
|
||||
```go
|
||||
s.HandleFunc("/products/", ProductsHandler)
|
||||
s.HandleFunc("/products/{key}", ProductHandler)
|
||||
s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
|
||||
```
|
||||
|
||||
The three URL paths we registered above will only be tested if the domain is
|
||||
`www.example.com`, because the subrouter is tested first. This is not
|
||||
only convenient, but also optimizes request matching. You can create
|
||||
subrouters combining any attribute matchers accepted by a route.
|
||||
The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route.
|
||||
|
||||
Subrouters can be used to create domain or path "namespaces": you define
|
||||
subrouters in a central place and then parts of the app can register its
|
||||
paths relatively to a given subrouter.
|
||||
Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter.
|
||||
|
||||
There's one more thing about subroutes. When a subrouter has a path prefix,
|
||||
the inner routes use it as base for their paths:
|
||||
There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths:
|
||||
|
||||
```go
|
||||
r := mux.NewRouter()
|
||||
s := r.PathPrefix("/products").Subrouter()
|
||||
// "/products/"
|
||||
s.HandleFunc("/", ProductsHandler)
|
||||
// "/products/{key}/"
|
||||
s.HandleFunc("/{key}/", ProductHandler)
|
||||
// "/products/{key}/details"
|
||||
s.HandleFunc("/{key}/details", ProductDetailsHandler)
|
||||
```
|
||||
|
||||
|
||||
### Static Files
|
||||
|
||||
Note that the path provided to `PathPrefix()` represents a "wildcard": calling
|
||||
`PathPrefix("/static/").Handler(...)` means that the handler will be passed any
|
||||
request that matches "/static/\*". This makes it easy to serve static files with mux:
|
||||
|
||||
```go
|
||||
func main() {
|
||||
var dir string
|
||||
|
||||
flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
|
||||
flag.Parse()
|
||||
r := mux.NewRouter()
|
||||
s := r.PathPrefix("/products").Subrouter()
|
||||
// "/products/"
|
||||
s.HandleFunc("/", ProductsHandler)
|
||||
// "/products/{key}/"
|
||||
s.HandleFunc("/{key}/", ProductHandler)
|
||||
// "/products/{key}/details"
|
||||
s.HandleFunc("/{key}/details", ProductDetailsHandler)
|
||||
|
||||
// This will serve files under http://localhost:8000/static/<filename>
|
||||
r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
|
||||
|
||||
srv := &http.Server{
|
||||
Handler: r,
|
||||
Addr: "127.0.0.1:8000",
|
||||
// Good practice: enforce timeouts for servers you create!
|
||||
WriteTimeout: 15 * time.Second,
|
||||
ReadTimeout: 15 * time.Second,
|
||||
}
|
||||
|
||||
log.Fatal(srv.ListenAndServe())
|
||||
}
|
||||
```
|
||||
|
||||
### Registered URLs
|
||||
|
||||
Now let's see how to build registered URLs.
|
||||
|
||||
Routes can be named. All routes that define a name can have their URLs built,
|
||||
or "reversed". We define a name calling Name() on a route. For example:
|
||||
Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example:
|
||||
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
|
||||
```go
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
|
||||
Name("article")
|
||||
```
|
||||
|
||||
To build a URL, get the route and call the URL() method, passing a sequence of
|
||||
key/value pairs for the route variables. For the previous route, we would do:
|
||||
To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do:
|
||||
|
||||
url, err := r.Get("article").URL("category", "technology", "id", "42")
|
||||
```go
|
||||
url, err := r.Get("article").URL("category", "technology", "id", "42")
|
||||
```
|
||||
|
||||
...and the result will be a url.URL with the following path:
|
||||
...and the result will be a `url.URL` with the following path:
|
||||
|
||||
"/articles/technology/42"
|
||||
```
|
||||
"/articles/technology/42"
|
||||
```
|
||||
|
||||
This also works for host variables:
|
||||
This also works for host and query value variables:
|
||||
|
||||
r := mux.NewRouter()
|
||||
r.Host("{subdomain}.domain.com").
|
||||
```go
|
||||
r := mux.NewRouter()
|
||||
r.Host("{subdomain}.domain.com").
|
||||
Path("/articles/{category}/{id:[0-9]+}").
|
||||
Queries("filter", "{filter}").
|
||||
HandlerFunc(ArticleHandler).
|
||||
Name("article")
|
||||
|
||||
// url.String() will be "http://news.domain.com/articles/technology/42"
|
||||
url, err := r.Get("article").URL("subdomain", "news",
|
||||
// url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla"
|
||||
url, err := r.Get("article").URL("subdomain", "news",
|
||||
"category", "technology",
|
||||
"id", "42")
|
||||
"id", "42",
|
||||
"filter", "gorilla")
|
||||
```
|
||||
|
||||
All variables defined in the route are required, and their values must
|
||||
conform to the corresponding patterns. These requirements guarantee that a
|
||||
generated URL will always match a registered route -- the only exception is
|
||||
for explicitly defined "build-only" routes which never match.
|
||||
All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match.
|
||||
|
||||
Regex support also exists for matching Headers within a route. For example, we could do:
|
||||
|
||||
r.HeadersRegexp("Content-Type", "application/(text|json)")
|
||||
```go
|
||||
r.HeadersRegexp("Content-Type", "application/(text|json)")
|
||||
```
|
||||
|
||||
...and the route will match both requests with a Content-Type of `application/json` as well as
|
||||
`application/text`
|
||||
...and the route will match both requests with a Content-Type of `application/json` as well as `application/text`
|
||||
|
||||
There's also a way to build only the URL host or path for a route:
|
||||
use the methods URLHost() or URLPath() instead. For the previous route,
|
||||
we would do:
|
||||
There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do:
|
||||
|
||||
// "http://news.domain.com/"
|
||||
host, err := r.Get("article").URLHost("subdomain", "news")
|
||||
```go
|
||||
// "http://news.domain.com/"
|
||||
host, err := r.Get("article").URLHost("subdomain", "news")
|
||||
|
||||
// "/articles/technology/42"
|
||||
path, err := r.Get("article").URLPath("category", "technology", "id", "42")
|
||||
// "/articles/technology/42"
|
||||
path, err := r.Get("article").URLPath("category", "technology", "id", "42")
|
||||
```
|
||||
|
||||
And if you use subrouters, host and path defined separately can be built
|
||||
as well:
|
||||
And if you use subrouters, host and path defined separately can be built as well:
|
||||
|
||||
r := mux.NewRouter()
|
||||
s := r.Host("{subdomain}.domain.com").Subrouter()
|
||||
s.Path("/articles/{category}/{id:[0-9]+}").
|
||||
```go
|
||||
r := mux.NewRouter()
|
||||
s := r.Host("{subdomain}.domain.com").Subrouter()
|
||||
s.Path("/articles/{category}/{id:[0-9]+}").
|
||||
HandlerFunc(ArticleHandler).
|
||||
Name("article")
|
||||
|
||||
// "http://news.domain.com/articles/technology/42"
|
||||
url, err := r.Get("article").URL("subdomain", "news",
|
||||
// "http://news.domain.com/articles/technology/42"
|
||||
url, err := r.Get("article").URL("subdomain", "news",
|
||||
"category", "technology",
|
||||
"id", "42")
|
||||
```
|
||||
|
||||
### Walking Routes
|
||||
|
||||
The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example,
|
||||
the following prints all of the registered routes:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
func handler(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
func main() {
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/", handler)
|
||||
r.HandleFunc("/products", handler).Methods("POST")
|
||||
r.HandleFunc("/articles", handler).Methods("GET")
|
||||
r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT")
|
||||
r.HandleFunc("/authors", handler).Queries("surname", "{surname}")
|
||||
err := r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
|
||||
pathTemplate, err := route.GetPathTemplate()
|
||||
if err == nil {
|
||||
fmt.Println("ROUTE:", pathTemplate)
|
||||
}
|
||||
pathRegexp, err := route.GetPathRegexp()
|
||||
if err == nil {
|
||||
fmt.Println("Path regexp:", pathRegexp)
|
||||
}
|
||||
queriesTemplates, err := route.GetQueriesTemplates()
|
||||
if err == nil {
|
||||
fmt.Println("Queries templates:", strings.Join(queriesTemplates, ","))
|
||||
}
|
||||
queriesRegexps, err := route.GetQueriesRegexp()
|
||||
if err == nil {
|
||||
fmt.Println("Queries regexps:", strings.Join(queriesRegexps, ","))
|
||||
}
|
||||
methods, err := route.GetMethods()
|
||||
if err == nil {
|
||||
fmt.Println("Methods:", strings.Join(methods, ","))
|
||||
}
|
||||
fmt.Println()
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
|
||||
http.Handle("/", r)
|
||||
}
|
||||
```
|
||||
|
||||
### Graceful Shutdown
|
||||
|
||||
Go 1.8 introduced the ability to [gracefully shutdown](https://golang.org/doc/go1.8#http_shutdown) a `*http.Server`. Here's how to do that alongside `mux`:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var wait time.Duration
|
||||
flag.DurationVar(&wait, "graceful-timeout", time.Second * 15, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m")
|
||||
flag.Parse()
|
||||
|
||||
r := mux.NewRouter()
|
||||
// Add your routes as needed
|
||||
|
||||
srv := &http.Server{
|
||||
Addr: "0.0.0.0:8080",
|
||||
// Good practice to set timeouts to avoid Slowloris attacks.
|
||||
WriteTimeout: time.Second * 15,
|
||||
ReadTimeout: time.Second * 15,
|
||||
IdleTimeout: time.Second * 60,
|
||||
Handler: r, // Pass our instance of gorilla/mux in.
|
||||
}
|
||||
|
||||
// Run our server in a goroutine so that it doesn't block.
|
||||
go func() {
|
||||
if err := srv.ListenAndServe(); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}()
|
||||
|
||||
c := make(chan os.Signal, 1)
|
||||
// We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C)
|
||||
// SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught.
|
||||
signal.Notify(c, os.Interrupt)
|
||||
|
||||
// Block until we receive our signal.
|
||||
<-c
|
||||
|
||||
// Create a deadline to wait for.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), wait)
|
||||
defer cancel()
|
||||
// Doesn't block if no connections, but will otherwise wait
|
||||
// until the timeout deadline.
|
||||
srv.Shutdown(ctx)
|
||||
// Optionally, you could run srv.Shutdown in a goroutine and block on
|
||||
// <-ctx.Done() if your application should wait for other services
|
||||
// to finalize based on context cancellation.
|
||||
log.Println("shutting down")
|
||||
os.Exit(0)
|
||||
}
|
||||
```
|
||||
|
||||
### Middleware
|
||||
|
||||
Mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed in the order they are added if a match is found, including its subrouters.
|
||||
Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or `ResponseWriter` hijacking.
|
||||
|
||||
Mux middlewares are defined using the de facto standard type:
|
||||
|
||||
```go
|
||||
type MiddlewareFunc func(http.Handler) http.Handler
|
||||
```
|
||||
|
||||
Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc. This takes advantage of closures being able access variables from the context where they are created, while retaining the signature enforced by the receivers.
|
||||
|
||||
A very basic middleware which logs the URI of the request being handled could be written as:
|
||||
|
||||
```go
|
||||
func loggingMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Do stuff here
|
||||
log.Println(r.RequestURI)
|
||||
// Call the next handler, which can be another middleware in the chain, or the final handler.
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
Middlewares can be added to a router using `Router.Use()`:
|
||||
|
||||
```go
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/", handler)
|
||||
r.Use(loggingMiddleware)
|
||||
```
|
||||
|
||||
A more complex authentication middleware, which maps session token to users, could be written as:
|
||||
|
||||
```go
|
||||
// Define our struct
|
||||
type authenticationMiddleware struct {
|
||||
tokenUsers map[string]string
|
||||
}
|
||||
|
||||
// Initialize it somewhere
|
||||
func (amw *authenticationMiddleware) Populate() {
|
||||
amw.tokenUsers["00000000"] = "user0"
|
||||
amw.tokenUsers["aaaaaaaa"] = "userA"
|
||||
amw.tokenUsers["05f717e5"] = "randomUser"
|
||||
amw.tokenUsers["deadbeef"] = "user0"
|
||||
}
|
||||
|
||||
// Middleware function, which will be called for each request
|
||||
func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
token := r.Header.Get("X-Session-Token")
|
||||
|
||||
if user, found := amw.tokenUsers[token]; found {
|
||||
// We found the token in our map
|
||||
log.Printf("Authenticated user %s\n", user)
|
||||
// Pass down the request to the next middleware (or final handler)
|
||||
next.ServeHTTP(w, r)
|
||||
} else {
|
||||
// Write an error and stop the handler chain
|
||||
http.Error(w, "Forbidden", http.StatusForbidden)
|
||||
}
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
```go
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/", handler)
|
||||
|
||||
amw := authenticationMiddleware{}
|
||||
amw.Populate()
|
||||
|
||||
r.Use(amw.Middleware)
|
||||
```
|
||||
|
||||
Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it.
|
||||
|
||||
### Testing Handlers
|
||||
|
||||
Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_.
|
||||
|
||||
First, our simple HTTP handler:
|
||||
|
||||
```go
|
||||
// endpoints.go
|
||||
package main
|
||||
|
||||
func HealthCheckHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// A very simple health check.
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
// In the future we could report back on the status of our DB, or our cache
|
||||
// (e.g. Redis) by performing a simple PING, and include them in the response.
|
||||
io.WriteString(w, `{"alive": true}`)
|
||||
}
|
||||
|
||||
func main() {
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/health", HealthCheckHandler)
|
||||
|
||||
log.Fatal(http.ListenAndServe("localhost:8080", r))
|
||||
}
|
||||
```
|
||||
|
||||
Our test code:
|
||||
|
||||
```go
|
||||
// endpoints_test.go
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestHealthCheckHandler(t *testing.T) {
|
||||
// Create a request to pass to our handler. We don't have any query parameters for now, so we'll
|
||||
// pass 'nil' as the third parameter.
|
||||
req, err := http.NewRequest("GET", "/health", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response.
|
||||
rr := httptest.NewRecorder()
|
||||
handler := http.HandlerFunc(HealthCheckHandler)
|
||||
|
||||
// Our handlers satisfy http.Handler, so we can call their ServeHTTP method
|
||||
// directly and pass in our Request and ResponseRecorder.
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
// Check the status code is what we expect.
|
||||
if status := rr.Code; status != http.StatusOK {
|
||||
t.Errorf("handler returned wrong status code: got %v want %v",
|
||||
status, http.StatusOK)
|
||||
}
|
||||
|
||||
// Check the response body is what we expect.
|
||||
expected := `{"alive": true}`
|
||||
if rr.Body.String() != expected {
|
||||
t.Errorf("handler returned unexpected body: got %v want %v",
|
||||
rr.Body.String(), expected)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
In the case that our routes have [variables](#examples), we can pass those in the request. We could write
|
||||
[table-driven tests](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) to test multiple
|
||||
possible route variables as needed.
|
||||
|
||||
```go
|
||||
// endpoints.go
|
||||
func main() {
|
||||
r := mux.NewRouter()
|
||||
// A route with a route variable:
|
||||
r.HandleFunc("/metrics/{type}", MetricsHandler)
|
||||
|
||||
log.Fatal(http.ListenAndServe("localhost:8080", r))
|
||||
}
|
||||
```
|
||||
|
||||
Our test file, with a table-driven test of `routeVariables`:
|
||||
|
||||
```go
|
||||
// endpoints_test.go
|
||||
func TestMetricsHandler(t *testing.T) {
|
||||
tt := []struct{
|
||||
routeVariable string
|
||||
shouldPass bool
|
||||
}{
|
||||
{"goroutines", true},
|
||||
{"heap", true},
|
||||
{"counters", true},
|
||||
{"queries", true},
|
||||
{"adhadaeqm3k", false},
|
||||
}
|
||||
|
||||
for _, tc := range tt {
|
||||
path := fmt.Sprintf("/metrics/%s", tc.routeVariable)
|
||||
req, err := http.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
// Need to create a router that we can pass the request through so that the vars will be added to the context
|
||||
router := mux.NewRouter()
|
||||
router.HandleFunc("/metrics/{type}", MetricsHandler)
|
||||
router.ServeHTTP(rr, req)
|
||||
|
||||
// In this case, our MetricsHandler returns a non-200 response
|
||||
// for a route variable it doesn't know about.
|
||||
if rr.Code == http.StatusOK && !tc.shouldPass {
|
||||
t.Errorf("handler should have failed on routeVariable %s: got %v want %v",
|
||||
tc.routeVariable, rr.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Full Example
|
||||
|
||||
Here's a complete, runnable example of a small mux based server:
|
||||
Here's a complete, runnable example of a small `mux` based server:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"log"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
|
@ -226,7 +640,7 @@ func main() {
|
|||
r.HandleFunc("/", YourHandler)
|
||||
|
||||
// Bind to a port and pass our router in
|
||||
http.ListenAndServe(":8000", r)
|
||||
log.Fatal(http.ListenAndServe(":8000", r))
|
||||
}
|
||||
```
|
||||
|
||||
|
|
26
vendor/github.com/gorilla/mux/context_gorilla.go
generated
vendored
Normal file
26
vendor/github.com/gorilla/mux/context_gorilla.go
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
// +build !go1.7
|
||||
|
||||
package mux
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/context"
|
||||
)
|
||||
|
||||
func contextGet(r *http.Request, key interface{}) interface{} {
|
||||
return context.Get(r, key)
|
||||
}
|
||||
|
||||
func contextSet(r *http.Request, key, val interface{}) *http.Request {
|
||||
if val == nil {
|
||||
return r
|
||||
}
|
||||
|
||||
context.Set(r, key, val)
|
||||
return r
|
||||
}
|
||||
|
||||
func contextClear(r *http.Request) {
|
||||
context.Clear(r)
|
||||
}
|
24
vendor/github.com/gorilla/mux/context_native.go
generated
vendored
Normal file
24
vendor/github.com/gorilla/mux/context_native.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
// +build go1.7
|
||||
|
||||
package mux
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func contextGet(r *http.Request, key interface{}) interface{} {
|
||||
return r.Context().Value(key)
|
||||
}
|
||||
|
||||
func contextSet(r *http.Request, key, val interface{}) *http.Request {
|
||||
if val == nil {
|
||||
return r
|
||||
}
|
||||
|
||||
return r.WithContext(context.WithValue(r.Context(), key, val))
|
||||
}
|
||||
|
||||
func contextClear(r *http.Request) {
|
||||
return
|
||||
}
|
112
vendor/github.com/gorilla/mux/doc.go
generated
vendored
112
vendor/github.com/gorilla/mux/doc.go
generated
vendored
|
@ -3,7 +3,7 @@
|
|||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package gorilla/mux implements a request router and dispatcher.
|
||||
Package mux implements a request router and dispatcher.
|
||||
|
||||
The name mux stands for "HTTP request multiplexer". Like the standard
|
||||
http.ServeMux, mux.Router matches incoming requests against a list of
|
||||
|
@ -12,8 +12,8 @@ or other conditions. The main features are:
|
|||
|
||||
* Requests can be matched based on URL host, path, path prefix, schemes,
|
||||
header and query values, HTTP methods or using custom matchers.
|
||||
* URL hosts and paths can have variables with an optional regular
|
||||
expression.
|
||||
* URL hosts, paths and query values can have variables with an optional
|
||||
regular expression.
|
||||
* Registered URLs can be built, or "reversed", which helps maintaining
|
||||
references to resources.
|
||||
* Routes can be used as subrouters: nested routes are only tested if the
|
||||
|
@ -47,12 +47,21 @@ variable will be anything until the next slash. For example:
|
|||
r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
|
||||
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
|
||||
|
||||
Groups can be used inside patterns, as long as they are non-capturing (?:re). For example:
|
||||
|
||||
r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler)
|
||||
|
||||
The names are used to create a map of route variables which can be retrieved
|
||||
calling mux.Vars():
|
||||
|
||||
vars := mux.Vars(request)
|
||||
category := vars["category"]
|
||||
|
||||
Note that if any capturing groups are present, mux will panic() during parsing. To prevent
|
||||
this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to
|
||||
"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably
|
||||
when capturing groups were present.
|
||||
|
||||
And this is all you need to know about the basic usage. More advanced options
|
||||
are explained below.
|
||||
|
||||
|
@ -136,6 +145,31 @@ the inner routes use it as base for their paths:
|
|||
// "/products/{key}/details"
|
||||
s.HandleFunc("/{key}/details", ProductDetailsHandler)
|
||||
|
||||
Note that the path provided to PathPrefix() represents a "wildcard": calling
|
||||
PathPrefix("/static/").Handler(...) means that the handler will be passed any
|
||||
request that matches "/static/*". This makes it easy to serve static files with mux:
|
||||
|
||||
func main() {
|
||||
var dir string
|
||||
|
||||
flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
|
||||
flag.Parse()
|
||||
r := mux.NewRouter()
|
||||
|
||||
// This will serve files under http://localhost:8000/static/<filename>
|
||||
r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
|
||||
|
||||
srv := &http.Server{
|
||||
Handler: r,
|
||||
Addr: "127.0.0.1:8000",
|
||||
// Good practice: enforce timeouts for servers you create!
|
||||
WriteTimeout: 15 * time.Second,
|
||||
ReadTimeout: 15 * time.Second,
|
||||
}
|
||||
|
||||
log.Fatal(srv.ListenAndServe())
|
||||
}
|
||||
|
||||
Now let's see how to build registered URLs.
|
||||
|
||||
Routes can be named. All routes that define a name can have their URLs built,
|
||||
|
@ -154,18 +188,20 @@ key/value pairs for the route variables. For the previous route, we would do:
|
|||
|
||||
"/articles/technology/42"
|
||||
|
||||
This also works for host variables:
|
||||
This also works for host and query value variables:
|
||||
|
||||
r := mux.NewRouter()
|
||||
r.Host("{subdomain}.domain.com").
|
||||
Path("/articles/{category}/{id:[0-9]+}").
|
||||
Queries("filter", "{filter}").
|
||||
HandlerFunc(ArticleHandler).
|
||||
Name("article")
|
||||
|
||||
// url.String() will be "http://news.domain.com/articles/technology/42"
|
||||
// url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla"
|
||||
url, err := r.Get("article").URL("subdomain", "news",
|
||||
"category", "technology",
|
||||
"id", "42")
|
||||
"id", "42",
|
||||
"filter", "gorilla")
|
||||
|
||||
All variables defined in the route are required, and their values must
|
||||
conform to the corresponding patterns. These requirements guarantee that a
|
||||
|
@ -202,5 +238,69 @@ as well:
|
|||
url, err := r.Get("article").URL("subdomain", "news",
|
||||
"category", "technology",
|
||||
"id", "42")
|
||||
|
||||
Mux supports the addition of middlewares to a Router, which are executed in the order they are added if a match is found, including its subrouters. Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or ResponseWriter hijacking.
|
||||
|
||||
type MiddlewareFunc func(http.Handler) http.Handler
|
||||
|
||||
Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created).
|
||||
|
||||
A very basic middleware which logs the URI of the request being handled could be written as:
|
||||
|
||||
func simpleMw(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Do stuff here
|
||||
log.Println(r.RequestURI)
|
||||
// Call the next handler, which can be another middleware in the chain, or the final handler.
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
Middlewares can be added to a router using `Router.Use()`:
|
||||
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/", handler)
|
||||
r.Use(simpleMw)
|
||||
|
||||
A more complex authentication middleware, which maps session token to users, could be written as:
|
||||
|
||||
// Define our struct
|
||||
type authenticationMiddleware struct {
|
||||
tokenUsers map[string]string
|
||||
}
|
||||
|
||||
// Initialize it somewhere
|
||||
func (amw *authenticationMiddleware) Populate() {
|
||||
amw.tokenUsers["00000000"] = "user0"
|
||||
amw.tokenUsers["aaaaaaaa"] = "userA"
|
||||
amw.tokenUsers["05f717e5"] = "randomUser"
|
||||
amw.tokenUsers["deadbeef"] = "user0"
|
||||
}
|
||||
|
||||
// Middleware function, which will be called for each request
|
||||
func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
token := r.Header.Get("X-Session-Token")
|
||||
|
||||
if user, found := amw.tokenUsers[token]; found {
|
||||
// We found the token in our map
|
||||
log.Printf("Authenticated user %s\n", user)
|
||||
next.ServeHTTP(w, r)
|
||||
} else {
|
||||
http.Error(w, "Forbidden", http.StatusForbidden)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/", handler)
|
||||
|
||||
amw := authenticationMiddleware{}
|
||||
amw.Populate()
|
||||
|
||||
r.Use(amw.Middleware)
|
||||
|
||||
Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to.
|
||||
|
||||
*/
|
||||
package mux
|
||||
|
|
72
vendor/github.com/gorilla/mux/middleware.go
generated
vendored
Normal file
72
vendor/github.com/gorilla/mux/middleware.go
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
package mux
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler.
|
||||
// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed
|
||||
// to it, and then calls the handler passed as parameter to the MiddlewareFunc.
|
||||
type MiddlewareFunc func(http.Handler) http.Handler
|
||||
|
||||
// middleware interface is anything which implements a MiddlewareFunc named Middleware.
|
||||
type middleware interface {
|
||||
Middleware(handler http.Handler) http.Handler
|
||||
}
|
||||
|
||||
// Middleware allows MiddlewareFunc to implement the middleware interface.
|
||||
func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler {
|
||||
return mw(handler)
|
||||
}
|
||||
|
||||
// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
|
||||
func (r *Router) Use(mwf ...MiddlewareFunc) {
|
||||
for _, fn := range mwf {
|
||||
r.middlewares = append(r.middlewares, fn)
|
||||
}
|
||||
}
|
||||
|
||||
// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
|
||||
func (r *Router) useInterface(mw middleware) {
|
||||
r.middlewares = append(r.middlewares, mw)
|
||||
}
|
||||
|
||||
// CORSMethodMiddleware sets the Access-Control-Allow-Methods response header
|
||||
// on a request, by matching routes based only on paths. It also handles
|
||||
// OPTIONS requests, by settings Access-Control-Allow-Methods, and then
|
||||
// returning without calling the next http handler.
|
||||
func CORSMethodMiddleware(r *Router) MiddlewareFunc {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
var allMethods []string
|
||||
|
||||
err := r.Walk(func(route *Route, _ *Router, _ []*Route) error {
|
||||
for _, m := range route.matchers {
|
||||
if _, ok := m.(*routeRegexp); ok {
|
||||
if m.Match(req, &RouteMatch{}) {
|
||||
methods, err := route.GetMethods()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
allMethods = append(allMethods, methods...)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
w.Header().Set("Access-Control-Allow-Methods", strings.Join(append(allMethods, "OPTIONS"), ","))
|
||||
|
||||
if req.Method == "OPTIONS" {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, req)
|
||||
})
|
||||
}
|
||||
}
|
169
vendor/github.com/gorilla/mux/mux.go
generated
vendored
169
vendor/github.com/gorilla/mux/mux.go
generated
vendored
|
@ -10,8 +10,14 @@ import (
|
|||
"net/http"
|
||||
"path"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
"github.com/gorilla/context"
|
||||
var (
|
||||
// ErrMethodMismatch is returned when the method in the request does not match
|
||||
// the method defined against the route.
|
||||
ErrMethodMismatch = errors.New("method is not allowed")
|
||||
// ErrNotFound is returned when no route match is found.
|
||||
ErrNotFound = errors.New("no matching route was found")
|
||||
)
|
||||
|
||||
// NewRouter returns a new router instance.
|
||||
|
@ -40,6 +46,10 @@ func NewRouter() *Router {
|
|||
type Router struct {
|
||||
// Configurable Handler to be used when no route matches.
|
||||
NotFoundHandler http.Handler
|
||||
|
||||
// Configurable Handler to be used when the request method does not match the route.
|
||||
MethodNotAllowedHandler http.Handler
|
||||
|
||||
// Parent route, if this is a subrouter.
|
||||
parent parentRoute
|
||||
// Routes to be matched, in order.
|
||||
|
@ -48,17 +58,59 @@ type Router struct {
|
|||
namedRoutes map[string]*Route
|
||||
// See Router.StrictSlash(). This defines the flag for new routes.
|
||||
strictSlash bool
|
||||
// If true, do not clear the request context after handling the request
|
||||
// See Router.SkipClean(). This defines the flag for new routes.
|
||||
skipClean bool
|
||||
// If true, do not clear the request context after handling the request.
|
||||
// This has no effect when go1.7+ is used, since the context is stored
|
||||
// on the request itself.
|
||||
KeepContext bool
|
||||
// see Router.UseEncodedPath(). This defines a flag for all routes.
|
||||
useEncodedPath bool
|
||||
// Slice of middlewares to be called after a match is found
|
||||
middlewares []middleware
|
||||
}
|
||||
|
||||
// Match matches registered routes against the request.
|
||||
// Match attempts to match the given request against the router's registered routes.
|
||||
//
|
||||
// If the request matches a route of this router or one of its subrouters the Route,
|
||||
// Handler, and Vars fields of the the match argument are filled and this function
|
||||
// returns true.
|
||||
//
|
||||
// If the request does not match any of this router's or its subrouters' routes
|
||||
// then this function returns false. If available, a reason for the match failure
|
||||
// will be filled in the match argument's MatchErr field. If the match failure type
|
||||
// (eg: not found) has a registered handler, the handler is assigned to the Handler
|
||||
// field of the match argument.
|
||||
func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
|
||||
for _, route := range r.routes {
|
||||
if route.Match(req, match) {
|
||||
// Build middleware chain if no error was found
|
||||
if match.MatchErr == nil {
|
||||
for i := len(r.middlewares) - 1; i >= 0; i-- {
|
||||
match.Handler = r.middlewares[i].Middleware(match.Handler)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if match.MatchErr == ErrMethodMismatch {
|
||||
if r.MethodNotAllowedHandler != nil {
|
||||
match.Handler = r.MethodNotAllowedHandler
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Closest match for a router (includes sub-routers)
|
||||
if r.NotFoundHandler != nil {
|
||||
match.Handler = r.NotFoundHandler
|
||||
match.MatchErr = ErrNotFound
|
||||
return true
|
||||
}
|
||||
|
||||
match.MatchErr = ErrNotFound
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -67,10 +119,15 @@ func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
|
|||
// When there is a match, the route variables can be retrieved calling
|
||||
// mux.Vars(request).
|
||||
func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
if !r.skipClean {
|
||||
path := req.URL.Path
|
||||
if r.useEncodedPath {
|
||||
path = req.URL.EscapedPath()
|
||||
}
|
||||
// Clean path to canonical form and redirect.
|
||||
if p := cleanPath(req.URL.Path); p != req.URL.Path {
|
||||
if p := cleanPath(path); p != path {
|
||||
|
||||
// Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query.
|
||||
// Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query.
|
||||
// This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue:
|
||||
// http://code.google.com/p/go/issues/detail?id=5252
|
||||
url := *req.URL
|
||||
|
@ -81,22 +138,27 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
|||
w.WriteHeader(http.StatusMovedPermanently)
|
||||
return
|
||||
}
|
||||
}
|
||||
var match RouteMatch
|
||||
var handler http.Handler
|
||||
if r.Match(req, &match) {
|
||||
handler = match.Handler
|
||||
setVars(req, match.Vars)
|
||||
setCurrentRoute(req, match.Route)
|
||||
req = setVars(req, match.Vars)
|
||||
req = setCurrentRoute(req, match.Route)
|
||||
}
|
||||
if handler == nil {
|
||||
handler = r.NotFoundHandler
|
||||
|
||||
if handler == nil && match.MatchErr == ErrMethodMismatch {
|
||||
handler = methodNotAllowedHandler()
|
||||
}
|
||||
|
||||
if handler == nil {
|
||||
handler = http.NotFoundHandler()
|
||||
}
|
||||
}
|
||||
|
||||
if !r.KeepContext {
|
||||
defer context.Clear(req)
|
||||
defer contextClear(req)
|
||||
}
|
||||
|
||||
handler.ServeHTTP(w, req)
|
||||
}
|
||||
|
||||
|
@ -114,13 +176,18 @@ func (r *Router) GetRoute(name string) *Route {
|
|||
// StrictSlash defines the trailing slash behavior for new routes. The initial
|
||||
// value is false.
|
||||
//
|
||||
// When true, if the route path is "/path/", accessing "/path" will redirect
|
||||
// When true, if the route path is "/path/", accessing "/path" will perform a redirect
|
||||
// to the former and vice versa. In other words, your application will always
|
||||
// see the path as specified in the route.
|
||||
//
|
||||
// When false, if the route path is "/path", accessing "/path/" will not match
|
||||
// this route and vice versa.
|
||||
//
|
||||
// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for
|
||||
// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed
|
||||
// request will be made as a GET by most clients. Use middleware or client settings
|
||||
// to modify this behaviour as needed.
|
||||
//
|
||||
// Special case: when a route sets a path prefix using the PathPrefix() method,
|
||||
// strict slash is ignored for that route because the redirect behavior can't
|
||||
// be determined from a prefix alone. However, any subrouters created from that
|
||||
|
@ -130,10 +197,41 @@ func (r *Router) StrictSlash(value bool) *Router {
|
|||
return r
|
||||
}
|
||||
|
||||
// SkipClean defines the path cleaning behaviour for new routes. The initial
|
||||
// value is false. Users should be careful about which routes are not cleaned
|
||||
//
|
||||
// When true, if the route path is "/path//to", it will remain with the double
|
||||
// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/
|
||||
//
|
||||
// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will
|
||||
// become /fetch/http/xkcd.com/534
|
||||
func (r *Router) SkipClean(value bool) *Router {
|
||||
r.skipClean = value
|
||||
return r
|
||||
}
|
||||
|
||||
// UseEncodedPath tells the router to match the encoded original path
|
||||
// to the routes.
|
||||
// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to".
|
||||
//
|
||||
// If not called, the router will match the unencoded path to the routes.
|
||||
// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to"
|
||||
func (r *Router) UseEncodedPath() *Router {
|
||||
r.useEncodedPath = true
|
||||
return r
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// parentRoute
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
func (r *Router) getBuildScheme() string {
|
||||
if r.parent != nil {
|
||||
return r.parent.getBuildScheme()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// getNamedRoutes returns the map where named routes are registered.
|
||||
func (r *Router) getNamedRoutes() map[string]*Route {
|
||||
if r.namedRoutes == nil {
|
||||
|
@ -167,7 +265,7 @@ func (r *Router) buildVars(m map[string]string) map[string]string {
|
|||
|
||||
// NewRoute registers an empty route.
|
||||
func (r *Router) NewRoute() *Route {
|
||||
route := &Route{parent: r, strictSlash: r.strictSlash}
|
||||
route := &Route{parent: r, strictSlash: r.strictSlash, skipClean: r.skipClean, useEncodedPath: r.useEncodedPath}
|
||||
r.routes = append(r.routes, route)
|
||||
return route
|
||||
}
|
||||
|
@ -233,7 +331,7 @@ func (r *Router) Schemes(schemes ...string) *Route {
|
|||
return r.NewRoute().Schemes(schemes...)
|
||||
}
|
||||
|
||||
// BuildVars registers a new route with a custom function for modifying
|
||||
// BuildVarsFunc registers a new route with a custom function for modifying
|
||||
// route variables before building a URL.
|
||||
func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route {
|
||||
return r.NewRoute().BuildVarsFunc(f)
|
||||
|
@ -257,20 +355,21 @@ type WalkFunc func(route *Route, router *Router, ancestors []*Route) error
|
|||
|
||||
func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error {
|
||||
for _, t := range r.routes {
|
||||
if t.regexp == nil || t.regexp.path == nil || t.regexp.path.template == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
err := walkFn(t, r, ancestors)
|
||||
if err == SkipRouter {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, sr := range t.matchers {
|
||||
if h, ok := sr.(*Router); ok {
|
||||
ancestors = append(ancestors, t)
|
||||
err := h.walk(walkFn, ancestors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ancestors = ancestors[:len(ancestors)-1]
|
||||
}
|
||||
}
|
||||
if h, ok := t.handler.(*Router); ok {
|
||||
|
@ -294,6 +393,11 @@ type RouteMatch struct {
|
|||
Route *Route
|
||||
Handler http.Handler
|
||||
Vars map[string]string
|
||||
|
||||
// MatchErr is set to appropriate matching error
|
||||
// It is set to ErrMethodMismatch if there is a mismatch in
|
||||
// the request method and route method
|
||||
MatchErr error
|
||||
}
|
||||
|
||||
type contextKey int
|
||||
|
@ -305,7 +409,7 @@ const (
|
|||
|
||||
// Vars returns the route variables for the current request, if any.
|
||||
func Vars(r *http.Request) map[string]string {
|
||||
if rv := context.Get(r, varsKey); rv != nil {
|
||||
if rv := contextGet(r, varsKey); rv != nil {
|
||||
return rv.(map[string]string)
|
||||
}
|
||||
return nil
|
||||
|
@ -317,18 +421,18 @@ func Vars(r *http.Request) map[string]string {
|
|||
// after the handler returns, unless the KeepContext option is set on the
|
||||
// Router.
|
||||
func CurrentRoute(r *http.Request) *Route {
|
||||
if rv := context.Get(r, routeKey); rv != nil {
|
||||
if rv := contextGet(r, routeKey); rv != nil {
|
||||
return rv.(*Route)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setVars(r *http.Request, val interface{}) {
|
||||
context.Set(r, varsKey, val)
|
||||
func setVars(r *http.Request, val interface{}) *http.Request {
|
||||
return contextSet(r, varsKey, val)
|
||||
}
|
||||
|
||||
func setCurrentRoute(r *http.Request, val interface{}) {
|
||||
context.Set(r, routeKey, val)
|
||||
func setCurrentRoute(r *http.Request, val interface{}) *http.Request {
|
||||
return contextSet(r, routeKey, val)
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
@ -350,6 +454,7 @@ func cleanPath(p string) string {
|
|||
if p[len(p)-1] == '/' && np != "/" {
|
||||
np += "/"
|
||||
}
|
||||
|
||||
return np
|
||||
}
|
||||
|
||||
|
@ -365,6 +470,8 @@ func uniqueVars(s1, s2 []string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// checkPairs returns the count of strings passed in, and an error if
|
||||
// the count is not an even number.
|
||||
func checkPairs(pairs ...string) (int, error) {
|
||||
length := len(pairs)
|
||||
if length%2 != 0 {
|
||||
|
@ -374,7 +481,8 @@ func checkPairs(pairs ...string) (int, error) {
|
|||
return length, nil
|
||||
}
|
||||
|
||||
// mapFromPairs converts variadic string parameters to a string map.
|
||||
// mapFromPairsToString converts variadic string parameters to a
|
||||
// string to string map.
|
||||
func mapFromPairsToString(pairs ...string) (map[string]string, error) {
|
||||
length, err := checkPairs(pairs...)
|
||||
if err != nil {
|
||||
|
@ -387,6 +495,8 @@ func mapFromPairsToString(pairs ...string) (map[string]string, error) {
|
|||
return m, nil
|
||||
}
|
||||
|
||||
// mapFromPairsToRegex converts variadic string parameters to a
|
||||
// string to regex map.
|
||||
func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) {
|
||||
length, err := checkPairs(pairs...)
|
||||
if err != nil {
|
||||
|
@ -467,3 +577,12 @@ func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]s
|
|||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// methodNotAllowed replies to the request with an HTTP status code 405.
|
||||
func methodNotAllowed(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
}
|
||||
|
||||
// methodNotAllowedHandler returns a simple request handler
|
||||
// that replies to each request with a status code 405.
|
||||
func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) }
|
||||
|
|
143
vendor/github.com/gorilla/mux/regexp.go
generated
vendored
143
vendor/github.com/gorilla/mux/regexp.go
generated
vendored
|
@ -14,6 +14,20 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
type routeRegexpOptions struct {
|
||||
strictSlash bool
|
||||
useEncodedPath bool
|
||||
}
|
||||
|
||||
type regexpType int
|
||||
|
||||
const (
|
||||
regexpTypePath regexpType = 0
|
||||
regexpTypeHost regexpType = 1
|
||||
regexpTypePrefix regexpType = 2
|
||||
regexpTypeQuery regexpType = 3
|
||||
)
|
||||
|
||||
// newRouteRegexp parses a route template and returns a routeRegexp,
|
||||
// used to match a host, a path or a query string.
|
||||
//
|
||||
|
@ -24,7 +38,7 @@ import (
|
|||
// Previously we accepted only Python-like identifiers for variable
|
||||
// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
|
||||
// name and pattern can't be empty, and names can't contain a colon.
|
||||
func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) {
|
||||
func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) {
|
||||
// Check if it is well-formed.
|
||||
idxs, errBraces := braceIndices(tpl)
|
||||
if errBraces != nil {
|
||||
|
@ -34,19 +48,18 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash
|
|||
template := tpl
|
||||
// Now let's parse it.
|
||||
defaultPattern := "[^/]+"
|
||||
if matchQuery {
|
||||
defaultPattern = "[^?&]*"
|
||||
} else if matchHost {
|
||||
if typ == regexpTypeQuery {
|
||||
defaultPattern = ".*"
|
||||
} else if typ == regexpTypeHost {
|
||||
defaultPattern = "[^.]+"
|
||||
matchPrefix = false
|
||||
}
|
||||
// Only match strict slash if not matching
|
||||
if matchPrefix || matchHost || matchQuery {
|
||||
strictSlash = false
|
||||
if typ != regexpTypePath {
|
||||
options.strictSlash = false
|
||||
}
|
||||
// Set a flag for strictSlash.
|
||||
endSlash := false
|
||||
if strictSlash && strings.HasSuffix(tpl, "/") {
|
||||
if options.strictSlash && strings.HasSuffix(tpl, "/") {
|
||||
tpl = tpl[:len(tpl)-1]
|
||||
endSlash = true
|
||||
}
|
||||
|
@ -73,14 +86,14 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash
|
|||
tpl[idxs[i]:end])
|
||||
}
|
||||
// Build the regexp pattern.
|
||||
varIdx := i / 2
|
||||
fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(varIdx), patt)
|
||||
fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt)
|
||||
|
||||
// Build the reverse template.
|
||||
fmt.Fprintf(reverse, "%s%%s", raw)
|
||||
|
||||
// Append variable name and compiled pattern.
|
||||
varsN[varIdx] = name
|
||||
varsR[varIdx], err = regexp.Compile(fmt.Sprintf("^%s$", patt))
|
||||
varsN[i/2] = name
|
||||
varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -88,16 +101,16 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash
|
|||
// Add the remaining.
|
||||
raw := tpl[end:]
|
||||
pattern.WriteString(regexp.QuoteMeta(raw))
|
||||
if strictSlash {
|
||||
if options.strictSlash {
|
||||
pattern.WriteString("[/]?")
|
||||
}
|
||||
if matchQuery {
|
||||
if typ == regexpTypeQuery {
|
||||
// Add the default pattern if the query value is empty
|
||||
if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" {
|
||||
pattern.WriteString(defaultPattern)
|
||||
}
|
||||
}
|
||||
if !matchPrefix {
|
||||
if typ != regexpTypePrefix {
|
||||
pattern.WriteByte('$')
|
||||
}
|
||||
reverse.WriteString(raw)
|
||||
|
@ -109,12 +122,18 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash
|
|||
if errCompile != nil {
|
||||
return nil, errCompile
|
||||
}
|
||||
|
||||
// Check for capturing groups which used to work in older versions
|
||||
if reg.NumSubexp() != len(idxs)/2 {
|
||||
panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) +
|
||||
"Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)")
|
||||
}
|
||||
|
||||
// Done!
|
||||
return &routeRegexp{
|
||||
template: template,
|
||||
matchHost: matchHost,
|
||||
matchQuery: matchQuery,
|
||||
strictSlash: strictSlash,
|
||||
regexpType: typ,
|
||||
options: options,
|
||||
regexp: reg,
|
||||
reverse: reverse.String(),
|
||||
varsN: varsN,
|
||||
|
@ -127,12 +146,10 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash
|
|||
type routeRegexp struct {
|
||||
// The unmodified template.
|
||||
template string
|
||||
// True for host match, false for path or query string match.
|
||||
matchHost bool
|
||||
// True for query string match, false for path and host match.
|
||||
matchQuery bool
|
||||
// The strictSlash value defined on the route, but disabled if PathPrefix was used.
|
||||
strictSlash bool
|
||||
// The type of match
|
||||
regexpType regexpType
|
||||
// Options for matching
|
||||
options routeRegexpOptions
|
||||
// Expanded regexp.
|
||||
regexp *regexp.Regexp
|
||||
// Reverse template.
|
||||
|
@ -145,13 +162,17 @@ type routeRegexp struct {
|
|||
|
||||
// Match matches the regexp against the URL host or path.
|
||||
func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
|
||||
if !r.matchHost {
|
||||
if r.matchQuery {
|
||||
if r.regexpType != regexpTypeHost {
|
||||
if r.regexpType == regexpTypeQuery {
|
||||
return r.matchQueryString(req)
|
||||
} else {
|
||||
return r.regexp.MatchString(req.URL.Path)
|
||||
}
|
||||
path := req.URL.Path
|
||||
if r.options.useEncodedPath {
|
||||
path = req.URL.EscapedPath()
|
||||
}
|
||||
return r.regexp.MatchString(path)
|
||||
}
|
||||
|
||||
return r.regexp.MatchString(getHost(req))
|
||||
}
|
||||
|
||||
|
@ -163,6 +184,9 @@ func (r *routeRegexp) url(values map[string]string) (string, error) {
|
|||
if !ok {
|
||||
return "", fmt.Errorf("mux: missing route variable %q", v)
|
||||
}
|
||||
if r.regexpType == regexpTypeQuery {
|
||||
value = url.QueryEscape(value)
|
||||
}
|
||||
urlValues[k] = value
|
||||
}
|
||||
rv := fmt.Sprintf(r.reverse, urlValues...)
|
||||
|
@ -181,11 +205,11 @@ func (r *routeRegexp) url(values map[string]string) (string, error) {
|
|||
return rv, nil
|
||||
}
|
||||
|
||||
// getUrlQuery returns a single query parameter from a request URL.
|
||||
// getURLQuery returns a single query parameter from a request URL.
|
||||
// For a URL with foo=bar&baz=ding, we return only the relevant key
|
||||
// value pair for the routeRegexp.
|
||||
func (r *routeRegexp) getUrlQuery(req *http.Request) string {
|
||||
if !r.matchQuery {
|
||||
func (r *routeRegexp) getURLQuery(req *http.Request) string {
|
||||
if r.regexpType != regexpTypeQuery {
|
||||
return ""
|
||||
}
|
||||
templateKey := strings.SplitN(r.template, "=", 2)[0]
|
||||
|
@ -198,14 +222,14 @@ func (r *routeRegexp) getUrlQuery(req *http.Request) string {
|
|||
}
|
||||
|
||||
func (r *routeRegexp) matchQueryString(req *http.Request) bool {
|
||||
return r.regexp.MatchString(r.getUrlQuery(req))
|
||||
return r.regexp.MatchString(r.getURLQuery(req))
|
||||
}
|
||||
|
||||
// braceIndices returns the first level curly brace indices from a string.
|
||||
// It returns an error in case of unbalanced braces.
|
||||
func braceIndices(s string) ([]int, error) {
|
||||
var level, idx int
|
||||
idxs := make([]int, 0)
|
||||
var idxs []int
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch s[i] {
|
||||
case '{':
|
||||
|
@ -246,33 +270,24 @@ type routeRegexpGroup struct {
|
|||
func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
|
||||
// Store host variables.
|
||||
if v.host != nil {
|
||||
hostVars := v.host.regexp.FindStringSubmatch(getHost(req))
|
||||
if hostVars != nil {
|
||||
subexpNames := v.host.regexp.SubexpNames()
|
||||
varName := 0
|
||||
for i, name := range subexpNames[1:] {
|
||||
if name != "" && name == varGroupName(varName) {
|
||||
m.Vars[v.host.varsN[varName]] = hostVars[i+1]
|
||||
varName++
|
||||
}
|
||||
host := getHost(req)
|
||||
matches := v.host.regexp.FindStringSubmatchIndex(host)
|
||||
if len(matches) > 0 {
|
||||
extractVars(host, matches, v.host.varsN, m.Vars)
|
||||
}
|
||||
}
|
||||
path := req.URL.Path
|
||||
if r.useEncodedPath {
|
||||
path = req.URL.EscapedPath()
|
||||
}
|
||||
// Store path variables.
|
||||
if v.path != nil {
|
||||
pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path)
|
||||
if pathVars != nil {
|
||||
subexpNames := v.path.regexp.SubexpNames()
|
||||
varName := 0
|
||||
for i, name := range subexpNames[1:] {
|
||||
if name != "" && name == varGroupName(varName) {
|
||||
m.Vars[v.path.varsN[varName]] = pathVars[i+1]
|
||||
varName++
|
||||
}
|
||||
}
|
||||
matches := v.path.regexp.FindStringSubmatchIndex(path)
|
||||
if len(matches) > 0 {
|
||||
extractVars(path, matches, v.path.varsN, m.Vars)
|
||||
// Check if we should redirect.
|
||||
if v.path.strictSlash {
|
||||
p1 := strings.HasSuffix(req.URL.Path, "/")
|
||||
if v.path.options.strictSlash {
|
||||
p1 := strings.HasSuffix(path, "/")
|
||||
p2 := strings.HasSuffix(v.path.template, "/")
|
||||
if p1 != p2 {
|
||||
u, _ := url.Parse(req.URL.String())
|
||||
|
@ -288,16 +303,10 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route)
|
|||
}
|
||||
// Store query string variables.
|
||||
for _, q := range v.queries {
|
||||
queryVars := q.regexp.FindStringSubmatch(q.getUrlQuery(req))
|
||||
if queryVars != nil {
|
||||
subexpNames := q.regexp.SubexpNames()
|
||||
varName := 0
|
||||
for i, name := range subexpNames[1:] {
|
||||
if name != "" && name == varGroupName(varName) {
|
||||
m.Vars[q.varsN[varName]] = queryVars[i+1]
|
||||
varName++
|
||||
}
|
||||
}
|
||||
queryURL := q.getURLQuery(req)
|
||||
matches := q.regexp.FindStringSubmatchIndex(queryURL)
|
||||
if len(matches) > 0 {
|
||||
extractVars(queryURL, matches, q.varsN, m.Vars)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -315,3 +324,9 @@ func getHost(r *http.Request) string {
|
|||
return host
|
||||
|
||||
}
|
||||
|
||||
func extractVars(input string, matches []int, names []string, output map[string]string) {
|
||||
for i, name := range names {
|
||||
output[name] = input[matches[2*i+2]:matches[2*i+3]]
|
||||
}
|
||||
}
|
||||
|
|
202
vendor/github.com/gorilla/mux/route.go
generated
vendored
202
vendor/github.com/gorilla/mux/route.go
generated
vendored
|
@ -26,6 +26,13 @@ type Route struct {
|
|||
// If true, when the path pattern is "/path/", accessing "/path" will
|
||||
// redirect to the former and vice versa.
|
||||
strictSlash bool
|
||||
// If true, when the path pattern is "/path//to", accessing "/path//to"
|
||||
// will not redirect
|
||||
skipClean bool
|
||||
// If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to"
|
||||
useEncodedPath bool
|
||||
// The scheme used when building URLs.
|
||||
buildScheme string
|
||||
// If true, this route never matches: it is only used to build URLs.
|
||||
buildOnly bool
|
||||
// The name used to build URLs.
|
||||
|
@ -36,17 +43,44 @@ type Route struct {
|
|||
buildVarsFunc BuildVarsFunc
|
||||
}
|
||||
|
||||
// SkipClean reports whether path cleaning is enabled for this route via
|
||||
// Router.SkipClean.
|
||||
func (r *Route) SkipClean() bool {
|
||||
return r.skipClean
|
||||
}
|
||||
|
||||
// Match matches the route against the request.
|
||||
func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
|
||||
if r.buildOnly || r.err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var matchErr error
|
||||
|
||||
// Match everything.
|
||||
for _, m := range r.matchers {
|
||||
if matched := m.Match(req, match); !matched {
|
||||
if _, ok := m.(methodMatcher); ok {
|
||||
matchErr = ErrMethodMismatch
|
||||
continue
|
||||
}
|
||||
matchErr = nil
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if matchErr != nil {
|
||||
match.MatchErr = matchErr
|
||||
return false
|
||||
}
|
||||
|
||||
if match.MatchErr == ErrMethodMismatch {
|
||||
// We found a route which matches request method, clear MatchErr
|
||||
match.MatchErr = nil
|
||||
// Then override the mis-matched handler
|
||||
match.Handler = r.handler
|
||||
}
|
||||
|
||||
// Yay, we have a match. Let's collect some info about it.
|
||||
if match.Route == nil {
|
||||
match.Route = r
|
||||
|
@ -57,6 +91,7 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
|
|||
if match.Vars == nil {
|
||||
match.Vars = make(map[string]string)
|
||||
}
|
||||
|
||||
// Set variables.
|
||||
if r.regexp != nil {
|
||||
r.regexp.setMatch(req, match, r)
|
||||
|
@ -138,20 +173,23 @@ func (r *Route) addMatcher(m matcher) *Route {
|
|||
}
|
||||
|
||||
// addRegexpMatcher adds a host or path matcher and builder to a route.
|
||||
func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error {
|
||||
func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error {
|
||||
if r.err != nil {
|
||||
return r.err
|
||||
}
|
||||
r.regexp = r.getRegexpGroup()
|
||||
if !matchHost && !matchQuery {
|
||||
if len(tpl) == 0 || tpl[0] != '/' {
|
||||
if typ == regexpTypePath || typ == regexpTypePrefix {
|
||||
if len(tpl) > 0 && tpl[0] != '/' {
|
||||
return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
|
||||
}
|
||||
if r.regexp.path != nil {
|
||||
tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
|
||||
}
|
||||
}
|
||||
rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash)
|
||||
rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{
|
||||
strictSlash: r.strictSlash,
|
||||
useEncodedPath: r.useEncodedPath,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -160,7 +198,7 @@ func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery
|
|||
return err
|
||||
}
|
||||
}
|
||||
if matchHost {
|
||||
if typ == regexpTypeHost {
|
||||
if r.regexp.path != nil {
|
||||
if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {
|
||||
return err
|
||||
|
@ -173,7 +211,7 @@ func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery
|
|||
return err
|
||||
}
|
||||
}
|
||||
if matchQuery {
|
||||
if typ == regexpTypeQuery {
|
||||
r.regexp.queries = append(r.regexp.queries, rr)
|
||||
} else {
|
||||
r.regexp.path = rr
|
||||
|
@ -217,14 +255,16 @@ func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool {
|
|||
return matchMapWithRegex(m, r.Header, true)
|
||||
}
|
||||
|
||||
// Regular expressions can be used with headers as well.
|
||||
// It accepts a sequence of key/value pairs, where the value has regex support. For example
|
||||
// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex
|
||||
// support. For example:
|
||||
//
|
||||
// r := mux.NewRouter()
|
||||
// r.HeadersRegexp("Content-Type", "application/(text|json)",
|
||||
// "X-Requested-With", "XMLHttpRequest")
|
||||
//
|
||||
// The above route will only match if both the request header matches both regular expressions.
|
||||
// It the value is an empty string, it will match any value if the key is set.
|
||||
// If the value is an empty string, it will match any value if the key is set.
|
||||
// Use the start and end of string anchors (^ and $) to match an exact value.
|
||||
func (r *Route) HeadersRegexp(pairs ...string) *Route {
|
||||
if r.err == nil {
|
||||
var headers map[string]*regexp.Regexp
|
||||
|
@ -254,7 +294,7 @@ func (r *Route) HeadersRegexp(pairs ...string) *Route {
|
|||
// Variable names must be unique in a given route. They can be retrieved
|
||||
// calling mux.Vars(request).
|
||||
func (r *Route) Host(tpl string) *Route {
|
||||
r.err = r.addRegexpMatcher(tpl, true, false, false)
|
||||
r.err = r.addRegexpMatcher(tpl, regexpTypeHost)
|
||||
return r
|
||||
}
|
||||
|
||||
|
@ -263,6 +303,7 @@ func (r *Route) Host(tpl string) *Route {
|
|||
// MatcherFunc is the function signature used by custom matchers.
|
||||
type MatcherFunc func(*http.Request, *RouteMatch) bool
|
||||
|
||||
// Match returns the match for a given request.
|
||||
func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool {
|
||||
return m(r, match)
|
||||
}
|
||||
|
@ -313,7 +354,7 @@ func (r *Route) Methods(methods ...string) *Route {
|
|||
// Variable names must be unique in a given route. They can be retrieved
|
||||
// calling mux.Vars(request).
|
||||
func (r *Route) Path(tpl string) *Route {
|
||||
r.err = r.addRegexpMatcher(tpl, false, false, false)
|
||||
r.err = r.addRegexpMatcher(tpl, regexpTypePath)
|
||||
return r
|
||||
}
|
||||
|
||||
|
@ -329,7 +370,7 @@ func (r *Route) Path(tpl string) *Route {
|
|||
// Also note that the setting of Router.StrictSlash() has no effect on routes
|
||||
// with a PathPrefix matcher.
|
||||
func (r *Route) PathPrefix(tpl string) *Route {
|
||||
r.err = r.addRegexpMatcher(tpl, false, true, false)
|
||||
r.err = r.addRegexpMatcher(tpl, regexpTypePrefix)
|
||||
return r
|
||||
}
|
||||
|
||||
|
@ -360,7 +401,7 @@ func (r *Route) Queries(pairs ...string) *Route {
|
|||
return nil
|
||||
}
|
||||
for i := 0; i < length; i += 2 {
|
||||
if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, false, true); r.err != nil {
|
||||
if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil {
|
||||
return r
|
||||
}
|
||||
}
|
||||
|
@ -383,6 +424,9 @@ func (r *Route) Schemes(schemes ...string) *Route {
|
|||
for k, v := range schemes {
|
||||
schemes[k] = strings.ToLower(v)
|
||||
}
|
||||
if r.buildScheme == "" && len(schemes) > 0 {
|
||||
r.buildScheme = schemes[0]
|
||||
}
|
||||
return r.addMatcher(schemeMatcher(schemes))
|
||||
}
|
||||
|
||||
|
@ -466,22 +510,33 @@ func (r *Route) URL(pairs ...string) (*url.URL, error) {
|
|||
return nil, err
|
||||
}
|
||||
var scheme, host, path string
|
||||
queries := make([]string, 0, len(r.regexp.queries))
|
||||
if r.regexp.host != nil {
|
||||
// Set a default scheme.
|
||||
scheme = "http"
|
||||
if host, err = r.regexp.host.url(values); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
scheme = "http"
|
||||
if s := r.getBuildScheme(); s != "" {
|
||||
scheme = s
|
||||
}
|
||||
}
|
||||
if r.regexp.path != nil {
|
||||
if path, err = r.regexp.path.url(values); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
for _, q := range r.regexp.queries {
|
||||
var query string
|
||||
if query, err = q.url(values); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
queries = append(queries, query)
|
||||
}
|
||||
return &url.URL{
|
||||
Scheme: scheme,
|
||||
Host: host,
|
||||
Path: path,
|
||||
RawQuery: strings.Join(queries, "&"),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -503,10 +558,14 @@ func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &url.URL{
|
||||
u := &url.URL{
|
||||
Scheme: "http",
|
||||
Host: host,
|
||||
}, nil
|
||||
}
|
||||
if s := r.getBuildScheme(); s != "" {
|
||||
u.Scheme = s
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// URLPath builds the path part of the URL for a route. See Route.URL().
|
||||
|
@ -532,6 +591,104 @@ func (r *Route) URLPath(pairs ...string) (*url.URL, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
// GetPathTemplate returns the template used to build the
|
||||
// route match.
|
||||
// This is useful for building simple REST API documentation and for instrumentation
|
||||
// against third-party services.
|
||||
// An error will be returned if the route does not define a path.
|
||||
func (r *Route) GetPathTemplate() (string, error) {
|
||||
if r.err != nil {
|
||||
return "", r.err
|
||||
}
|
||||
if r.regexp == nil || r.regexp.path == nil {
|
||||
return "", errors.New("mux: route doesn't have a path")
|
||||
}
|
||||
return r.regexp.path.template, nil
|
||||
}
|
||||
|
||||
// GetPathRegexp returns the expanded regular expression used to match route path.
|
||||
// This is useful for building simple REST API documentation and for instrumentation
|
||||
// against third-party services.
|
||||
// An error will be returned if the route does not define a path.
|
||||
func (r *Route) GetPathRegexp() (string, error) {
|
||||
if r.err != nil {
|
||||
return "", r.err
|
||||
}
|
||||
if r.regexp == nil || r.regexp.path == nil {
|
||||
return "", errors.New("mux: route does not have a path")
|
||||
}
|
||||
return r.regexp.path.regexp.String(), nil
|
||||
}
|
||||
|
||||
// GetQueriesRegexp returns the expanded regular expressions used to match the
|
||||
// route queries.
|
||||
// This is useful for building simple REST API documentation and for instrumentation
|
||||
// against third-party services.
|
||||
// An error will be returned if the route does not have queries.
|
||||
func (r *Route) GetQueriesRegexp() ([]string, error) {
|
||||
if r.err != nil {
|
||||
return nil, r.err
|
||||
}
|
||||
if r.regexp == nil || r.regexp.queries == nil {
|
||||
return nil, errors.New("mux: route doesn't have queries")
|
||||
}
|
||||
var queries []string
|
||||
for _, query := range r.regexp.queries {
|
||||
queries = append(queries, query.regexp.String())
|
||||
}
|
||||
return queries, nil
|
||||
}
|
||||
|
||||
// GetQueriesTemplates returns the templates used to build the
|
||||
// query matching.
|
||||
// This is useful for building simple REST API documentation and for instrumentation
|
||||
// against third-party services.
|
||||
// An error will be returned if the route does not define queries.
|
||||
func (r *Route) GetQueriesTemplates() ([]string, error) {
|
||||
if r.err != nil {
|
||||
return nil, r.err
|
||||
}
|
||||
if r.regexp == nil || r.regexp.queries == nil {
|
||||
return nil, errors.New("mux: route doesn't have queries")
|
||||
}
|
||||
var queries []string
|
||||
for _, query := range r.regexp.queries {
|
||||
queries = append(queries, query.template)
|
||||
}
|
||||
return queries, nil
|
||||
}
|
||||
|
||||
// GetMethods returns the methods the route matches against
|
||||
// This is useful for building simple REST API documentation and for instrumentation
|
||||
// against third-party services.
|
||||
// An error will be returned if route does not have methods.
|
||||
func (r *Route) GetMethods() ([]string, error) {
|
||||
if r.err != nil {
|
||||
return nil, r.err
|
||||
}
|
||||
for _, m := range r.matchers {
|
||||
if methods, ok := m.(methodMatcher); ok {
|
||||
return []string(methods), nil
|
||||
}
|
||||
}
|
||||
return nil, errors.New("mux: route doesn't have methods")
|
||||
}
|
||||
|
||||
// GetHostTemplate returns the template used to build the
|
||||
// route match.
|
||||
// This is useful for building simple REST API documentation and for instrumentation
|
||||
// against third-party services.
|
||||
// An error will be returned if the route does not define a host.
|
||||
func (r *Route) GetHostTemplate() (string, error) {
|
||||
if r.err != nil {
|
||||
return "", r.err
|
||||
}
|
||||
if r.regexp == nil || r.regexp.host == nil {
|
||||
return "", errors.New("mux: route doesn't have a host")
|
||||
}
|
||||
return r.regexp.host.template, nil
|
||||
}
|
||||
|
||||
// prepareVars converts the route variable pairs into a map. If the route has a
|
||||
// BuildVarsFunc, it is invoked.
|
||||
func (r *Route) prepareVars(pairs ...string) (map[string]string, error) {
|
||||
|
@ -558,11 +715,22 @@ func (r *Route) buildVars(m map[string]string) map[string]string {
|
|||
|
||||
// parentRoute allows routes to know about parent host and path definitions.
|
||||
type parentRoute interface {
|
||||
getBuildScheme() string
|
||||
getNamedRoutes() map[string]*Route
|
||||
getRegexpGroup() *routeRegexpGroup
|
||||
buildVars(map[string]string) map[string]string
|
||||
}
|
||||
|
||||
func (r *Route) getBuildScheme() string {
|
||||
if r.buildScheme != "" {
|
||||
return r.buildScheme
|
||||
}
|
||||
if r.parent != nil {
|
||||
return r.parent.getBuildScheme()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// getNamedRoutes returns the map where named routes are registered.
|
||||
func (r *Route) getNamedRoutes() map[string]*Route {
|
||||
if r.parent == nil {
|
||||
|
|
19
vendor/github.com/gorilla/mux/test_helpers.go
generated
vendored
Normal file
19
vendor/github.com/gorilla/mux/test_helpers.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
// Copyright 2012 The Gorilla Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package mux
|
||||
|
||||
import "net/http"
|
||||
|
||||
// SetURLVars sets the URL variables for the given request, to be accessed via
|
||||
// mux.Vars for testing route behaviour. Arguments are not modified, a shallow
|
||||
// copy is returned.
|
||||
//
|
||||
// This API should only be used for testing purposes; it provides a way to
|
||||
// inject variables into the request context. Alternatively, URL variables
|
||||
// can be set by making a route that captures the required variables,
|
||||
// starting a server and sending the request to that server.
|
||||
func SetURLVars(r *http.Request, val map[string]string) *http.Request {
|
||||
return setVars(r, val)
|
||||
}
|
4
vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
generated
vendored
4
vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
generated
vendored
|
@ -178,7 +178,7 @@
|
|||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
|
@ -186,7 +186,7 @@
|
|||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2013 Matt T. Proud
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
|
1
vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
generated
vendored
Normal file
1
vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
Copyright 2012 Matt T. Proud (matt.proud@gmail.com)
|
1
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
generated
vendored
Normal file
1
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
cover.dat
|
7
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
generated
vendored
Normal file
7
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
all:
|
||||
|
||||
cover:
|
||||
go test -cover -v -coverprofile=cover.dat ./...
|
||||
go tool cover -func cover.dat
|
||||
|
||||
.PHONY: cover
|
2
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
generated
vendored
2
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
generated
vendored
|
@ -38,7 +38,7 @@ var errInvalidVarint = errors.New("invalid varint32 encountered")
|
|||
func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
|
||||
// Per AbstractParser#parsePartialDelimitedFrom with
|
||||
// CodedInputStream#readRawVarint32.
|
||||
headerBuf := make([]byte, binary.MaxVarintLen32)
|
||||
var headerBuf [binary.MaxVarintLen32]byte
|
||||
var bytesRead, varIntBytes int
|
||||
var messageLength uint64
|
||||
for varIntBytes == 0 { // i.e. no varint has been decoded yet.
|
||||
|
|
4
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
generated
vendored
4
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
generated
vendored
|
@ -33,8 +33,8 @@ func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
|
|||
return 0, err
|
||||
}
|
||||
|
||||
buf := make([]byte, binary.MaxVarintLen32)
|
||||
encodedLength := binary.PutUvarint(buf, uint64(len(buffer)))
|
||||
var buf [binary.MaxVarintLen32]byte
|
||||
encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
|
||||
|
||||
sync, err := w.Write(buf[:encodedLength])
|
||||
if err != nil {
|
||||
|
|
18
vendor/github.com/prometheus/client_golang/AUTHORS.md
generated
vendored
Normal file
18
vendor/github.com/prometheus/client_golang/AUTHORS.md
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
The Prometheus project was started by Matt T. Proud (emeritus) and
|
||||
Julius Volz in 2012.
|
||||
|
||||
Maintainers of this repository:
|
||||
|
||||
* Björn Rabenstein <beorn@soundcloud.com>
|
||||
|
||||
The following individuals have contributed code to this repository
|
||||
(listed in alphabetical order):
|
||||
|
||||
* Bernerd Schaefer <bj.schaefer@gmail.com>
|
||||
* Björn Rabenstein <beorn@soundcloud.com>
|
||||
* Daniel Bornkessel <daniel@soundcloud.com>
|
||||
* Jeff Younker <jeff@drinktomi.com>
|
||||
* Julius Volz <julius.volz@gmail.com>
|
||||
* Matt T. Proud <matt.proud@gmail.com>
|
||||
* Tobias Schmidt <ts@soundcloud.com>
|
||||
|
5
vendor/github.com/prometheus/client_golang/NOTICE
generated
vendored
5
vendor/github.com/prometheus/client_golang/NOTICE
generated
vendored
|
@ -7,11 +7,6 @@ SoundCloud Ltd. (http://soundcloud.com/).
|
|||
|
||||
The following components are included in this product:
|
||||
|
||||
goautoneg
|
||||
http://bitbucket.org/ww/goautoneg
|
||||
Copyright 2011, Open Knowledge Foundation Ltd.
|
||||
See README.txt for license details.
|
||||
|
||||
perks - a fork of https://github.com/bmizerany/perks
|
||||
https://github.com/beorn7/perks
|
||||
Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
|
||||
|
|
54
vendor/github.com/prometheus/client_golang/prometheus/README.md
generated
vendored
54
vendor/github.com/prometheus/client_golang/prometheus/README.md
generated
vendored
|
@ -1,53 +1 @@
|
|||
# Overview
|
||||
This is the [Prometheus](http://www.prometheus.io) telemetric
|
||||
instrumentation client [Go](http://golang.org) client library. It
|
||||
enable authors to define process-space metrics for their servers and
|
||||
expose them through a web service interface for extraction,
|
||||
aggregation, and a whole slew of other post processing techniques.
|
||||
|
||||
# Installing
|
||||
$ go get github.com/prometheus/client_golang/prometheus
|
||||
|
||||
# Example
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
indexed = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "my_company",
|
||||
Subsystem: "indexer",
|
||||
Name: "documents_indexed",
|
||||
Help: "The number of documents indexed.",
|
||||
})
|
||||
size = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "my_company",
|
||||
Subsystem: "storage",
|
||||
Name: "documents_total_size_bytes",
|
||||
Help: "The total size of all documents in the storage.",
|
||||
})
|
||||
)
|
||||
|
||||
func main() {
|
||||
http.Handle("/metrics", prometheus.Handler())
|
||||
|
||||
indexed.Inc()
|
||||
size.Set(5)
|
||||
|
||||
http.ListenAndServe(":8080", nil)
|
||||
}
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(indexed)
|
||||
prometheus.MustRegister(size)
|
||||
}
|
||||
```
|
||||
|
||||
# Documentation
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/prometheus/client_golang?status.png)](https://godoc.org/github.com/prometheus/client_golang)
|
||||
See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).
|
||||
|
|
52
vendor/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
52
vendor/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
|
@ -15,15 +15,15 @@ package prometheus
|
|||
|
||||
// Collector is the interface implemented by anything that can be used by
|
||||
// Prometheus to collect metrics. A Collector has to be registered for
|
||||
// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet.
|
||||
// collection. See Registerer.Register.
|
||||
//
|
||||
// The stock metrics provided by this package (like Gauge, Counter, Summary) are
|
||||
// also Collectors (which only ever collect one metric, namely itself). An
|
||||
// implementer of Collector may, however, collect multiple metrics in a
|
||||
// coordinated fashion and/or create metrics on the fly. Examples for collectors
|
||||
// already implemented in this library are the metric vectors (i.e. collection
|
||||
// of multiple instances of the same Metric but with different label values)
|
||||
// like GaugeVec or SummaryVec, and the ExpvarCollector.
|
||||
// The stock metrics provided by this package (Gauge, Counter, Summary,
|
||||
// Histogram, Untyped) are also Collectors (which only ever collect one metric,
|
||||
// namely itself). An implementer of Collector may, however, collect multiple
|
||||
// metrics in a coordinated fashion and/or create metrics on the fly. Examples
|
||||
// for collectors already implemented in this library are the metric vectors
|
||||
// (i.e. collection of multiple instances of the same Metric but with different
|
||||
// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
|
||||
type Collector interface {
|
||||
// Describe sends the super-set of all possible descriptors of metrics
|
||||
// collected by this Collector to the provided channel and returns once
|
||||
|
@ -37,39 +37,39 @@ type Collector interface {
|
|||
// executing this method, it must send an invalid descriptor (created
|
||||
// with NewInvalidDesc) to signal the error to the registry.
|
||||
Describe(chan<- *Desc)
|
||||
// Collect is called by Prometheus when collecting metrics. The
|
||||
// implementation sends each collected metric via the provided channel
|
||||
// and returns once the last metric has been sent. The descriptor of
|
||||
// each sent metric is one of those returned by Describe. Returned
|
||||
// metrics that share the same descriptor must differ in their variable
|
||||
// label values. This method may be called concurrently and must
|
||||
// therefore be implemented in a concurrency safe way. Blocking occurs
|
||||
// at the expense of total performance of rendering all registered
|
||||
// metrics. Ideally, Collector implementations support concurrent
|
||||
// readers.
|
||||
// Collect is called by the Prometheus registry when collecting
|
||||
// metrics. The implementation sends each collected metric via the
|
||||
// provided channel and returns once the last metric has been sent. The
|
||||
// descriptor of each sent metric is one of those returned by
|
||||
// Describe. Returned metrics that share the same descriptor must differ
|
||||
// in their variable label values. This method may be called
|
||||
// concurrently and must therefore be implemented in a concurrency safe
|
||||
// way. Blocking occurs at the expense of total performance of rendering
|
||||
// all registered metrics. Ideally, Collector implementations support
|
||||
// concurrent readers.
|
||||
Collect(chan<- Metric)
|
||||
}
|
||||
|
||||
// SelfCollector implements Collector for a single Metric so that that the
|
||||
// Metric collects itself. Add it as an anonymous field to a struct that
|
||||
// implements Metric, and call Init with the Metric itself as an argument.
|
||||
type SelfCollector struct {
|
||||
// selfCollector implements Collector for a single Metric so that the Metric
|
||||
// collects itself. Add it as an anonymous field to a struct that implements
|
||||
// Metric, and call init with the Metric itself as an argument.
|
||||
type selfCollector struct {
|
||||
self Metric
|
||||
}
|
||||
|
||||
// Init provides the SelfCollector with a reference to the metric it is supposed
|
||||
// init provides the selfCollector with a reference to the metric it is supposed
|
||||
// to collect. It is usually called within the factory function to create a
|
||||
// metric. See example.
|
||||
func (c *SelfCollector) Init(self Metric) {
|
||||
func (c *selfCollector) init(self Metric) {
|
||||
c.self = self
|
||||
}
|
||||
|
||||
// Describe implements Collector.
|
||||
func (c *SelfCollector) Describe(ch chan<- *Desc) {
|
||||
func (c *selfCollector) Describe(ch chan<- *Desc) {
|
||||
ch <- c.self.Desc()
|
||||
}
|
||||
|
||||
// Collect implements Collector.
|
||||
func (c *SelfCollector) Collect(ch chan<- Metric) {
|
||||
func (c *selfCollector) Collect(ch chan<- Metric) {
|
||||
ch <- c.self
|
||||
}
|
||||
|
|
19
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
19
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
|
@ -15,7 +15,6 @@ package prometheus
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"hash/fnv"
|
||||
)
|
||||
|
||||
// Counter is a Metric that represents a single numerical value that only ever
|
||||
|
@ -36,6 +35,9 @@ type Counter interface {
|
|||
// Prometheus metric. Do not use it for regular handling of a
|
||||
// Prometheus counter (as it can be used to break the contract of
|
||||
// monotonically increasing values).
|
||||
//
|
||||
// Deprecated: Use NewConstMetric to create a counter for an external
|
||||
// value. A Counter should never be set.
|
||||
Set(float64)
|
||||
// Inc increments the counter by 1.
|
||||
Inc()
|
||||
|
@ -56,7 +58,7 @@ func NewCounter(opts CounterOpts) Counter {
|
|||
opts.ConstLabels,
|
||||
)
|
||||
result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
|
||||
result.Init(result) // Init self-collection.
|
||||
result.init(result) // Init self-collection.
|
||||
return result
|
||||
}
|
||||
|
||||
|
@ -80,7 +82,7 @@ func (c *counter) Add(v float64) {
|
|||
// CounterVec embeds MetricVec. See there for a full list of methods with
|
||||
// detailed documentation.
|
||||
type CounterVec struct {
|
||||
MetricVec
|
||||
*MetricVec
|
||||
}
|
||||
|
||||
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
|
||||
|
@ -94,20 +96,15 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
|
|||
opts.ConstLabels,
|
||||
)
|
||||
return &CounterVec{
|
||||
MetricVec: MetricVec{
|
||||
children: map[uint64]Metric{},
|
||||
desc: desc,
|
||||
hash: fnv.New64a(),
|
||||
newMetric: func(lvs ...string) Metric {
|
||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
result := &counter{value: value{
|
||||
desc: desc,
|
||||
valType: CounterValue,
|
||||
labelPairs: makeLabelPairs(desc, lvs),
|
||||
}}
|
||||
result.Init(result) // Init self-collection.
|
||||
result.init(result) // Init self-collection.
|
||||
return result
|
||||
},
|
||||
},
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
42
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
42
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
|
@ -1,10 +1,21 @@
|
|||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
@ -131,31 +142,24 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
|||
d.err = errors.New("duplicate label names")
|
||||
return d
|
||||
}
|
||||
h := fnv.New64a()
|
||||
var b bytes.Buffer // To copy string contents into, avoiding []byte allocations.
|
||||
vh := hashNew()
|
||||
for _, val := range labelValues {
|
||||
b.Reset()
|
||||
b.WriteString(val)
|
||||
b.WriteByte(separatorByte)
|
||||
h.Write(b.Bytes())
|
||||
vh = hashAdd(vh, val)
|
||||
vh = hashAddByte(vh, separatorByte)
|
||||
}
|
||||
d.id = h.Sum64()
|
||||
d.id = vh
|
||||
// Sort labelNames so that order doesn't matter for the hash.
|
||||
sort.Strings(labelNames)
|
||||
// Now hash together (in this order) the help string and the sorted
|
||||
// label names.
|
||||
h.Reset()
|
||||
b.Reset()
|
||||
b.WriteString(help)
|
||||
b.WriteByte(separatorByte)
|
||||
h.Write(b.Bytes())
|
||||
lh := hashNew()
|
||||
lh = hashAdd(lh, help)
|
||||
lh = hashAddByte(lh, separatorByte)
|
||||
for _, labelName := range labelNames {
|
||||
b.Reset()
|
||||
b.WriteString(labelName)
|
||||
b.WriteByte(separatorByte)
|
||||
h.Write(b.Bytes())
|
||||
lh = hashAdd(lh, labelName)
|
||||
lh = hashAddByte(lh, separatorByte)
|
||||
}
|
||||
d.dimHash = h.Sum64()
|
||||
d.dimHash = lh
|
||||
|
||||
d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
|
||||
for n, v := range constLabels {
|
||||
|
|
174
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
174
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
|
@ -11,18 +11,17 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package prometheus provides embeddable metric primitives for servers and
|
||||
// standardized exposition of telemetry through a web services interface.
|
||||
// Package prometheus provides metrics primitives to instrument code for
|
||||
// monitoring. It also offers a registry for metrics. Sub-packages allow to
|
||||
// expose the registered metrics via HTTP (package promhttp) or push them to a
|
||||
// Pushgateway (package push).
|
||||
//
|
||||
// All exported functions and methods are safe to be used concurrently unless
|
||||
// specified otherwise.
|
||||
//specified otherwise.
|
||||
//
|
||||
// To expose metrics registered with the Prometheus registry, an HTTP server
|
||||
// needs to know about the Prometheus handler. The usual endpoint is "/metrics".
|
||||
// A Basic Example
|
||||
//
|
||||
// http.Handle("/metrics", prometheus.Handler())
|
||||
//
|
||||
// As a starting point a very basic usage example:
|
||||
// As a starting point, a very basic usage example:
|
||||
//
|
||||
// package main
|
||||
//
|
||||
|
@ -30,6 +29,7 @@
|
|||
// "net/http"
|
||||
//
|
||||
// "github.com/prometheus/client_golang/prometheus"
|
||||
// "github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
// )
|
||||
//
|
||||
// var (
|
||||
|
@ -37,73 +37,145 @@
|
|||
// Name: "cpu_temperature_celsius",
|
||||
// Help: "Current temperature of the CPU.",
|
||||
// })
|
||||
// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
// hdFailures = prometheus.NewCounterVec(
|
||||
// prometheus.CounterOpts{
|
||||
// Name: "hd_errors_total",
|
||||
// Help: "Number of hard-disk errors.",
|
||||
// })
|
||||
// },
|
||||
// []string{"device"},
|
||||
// )
|
||||
// )
|
||||
//
|
||||
// func init() {
|
||||
// // Metrics have to be registered to be exposed:
|
||||
// prometheus.MustRegister(cpuTemp)
|
||||
// prometheus.MustRegister(hdFailures)
|
||||
// }
|
||||
//
|
||||
// func main() {
|
||||
// cpuTemp.Set(65.3)
|
||||
// hdFailures.Inc()
|
||||
// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
|
||||
//
|
||||
// http.Handle("/metrics", prometheus.Handler())
|
||||
// // The Handler function provides a default handler to expose metrics
|
||||
// // via an HTTP server. "/metrics" is the usual endpoint for that.
|
||||
// http.Handle("/metrics", promhttp.Handler())
|
||||
// http.ListenAndServe(":8080", nil)
|
||||
// }
|
||||
//
|
||||
//
|
||||
// This is a complete program that exports two metrics, a Gauge and a Counter.
|
||||
// It also exports some stats about the HTTP usage of the /metrics
|
||||
// endpoint. (See the Handler function for more detail.)
|
||||
// This is a complete program that exports two metrics, a Gauge and a Counter,
|
||||
// the latter with a label attached to turn it into a (one-dimensional) vector.
|
||||
//
|
||||
// Two more advanced metric types are the Summary and Histogram.
|
||||
// Metrics
|
||||
//
|
||||
// In addition to the fundamental metric types Gauge, Counter, Summary, and
|
||||
// Histogram, a very important part of the Prometheus data model is the
|
||||
// partitioning of samples along dimensions called labels, which results in
|
||||
// The number of exported identifiers in this package might appear a bit
|
||||
// overwhelming. Hovever, in addition to the basic plumbing shown in the example
|
||||
// above, you only need to understand the different metric types and their
|
||||
// vector versions for basic usage.
|
||||
//
|
||||
// Above, you have already touched the Counter and the Gauge. There are two more
|
||||
// advanced metric types: the Summary and Histogram. A more thorough description
|
||||
// of those four metric types can be found in the Prometheus docs:
|
||||
// https://prometheus.io/docs/concepts/metric_types/
|
||||
//
|
||||
// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
|
||||
// Prometheus server not to assume anything about its type.
|
||||
//
|
||||
// In addition to the fundamental metric types Gauge, Counter, Summary,
|
||||
// Histogram, and Untyped, a very important part of the Prometheus data model is
|
||||
// the partitioning of samples along dimensions called labels, which results in
|
||||
// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
|
||||
// and HistogramVec.
|
||||
// HistogramVec, and UntypedVec.
|
||||
//
|
||||
// Those are all the parts needed for basic usage. Detailed documentation and
|
||||
// examples are provided below.
|
||||
// While only the fundamental metric types implement the Metric interface, both
|
||||
// the metrics and their vector versions implement the Collector interface. A
|
||||
// Collector manages the collection of a number of Metrics, but for convenience,
|
||||
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
|
||||
// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
|
||||
// SummaryVec, HistogramVec, and UntypedVec are not.
|
||||
//
|
||||
// Everything else this package offers is essentially for "power users" only. A
|
||||
// few pointers to "power user features":
|
||||
// To create instances of Metrics and their vector versions, you need a suitable
|
||||
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts,
|
||||
// HistogramOpts, or UntypedOpts.
|
||||
//
|
||||
// All the various ...Opts structs have a ConstLabels field for labels that
|
||||
// never change their value (which is only useful under special circumstances,
|
||||
// see documentation of the Opts type).
|
||||
// Custom Collectors and constant Metrics
|
||||
//
|
||||
// The Untyped metric behaves like a Gauge, but signals the Prometheus server
|
||||
// not to assume anything about its type.
|
||||
// While you could create your own implementations of Metric, most likely you
|
||||
// will only ever implement the Collector interface on your own. At a first
|
||||
// glance, a custom Collector seems handy to bundle Metrics for common
|
||||
// registration (with the prime example of the different metric vectors above,
|
||||
// which bundle all the metrics of the same name but with different labels).
|
||||
//
|
||||
// Functions to fine-tune how the metric registry works: EnableCollectChecks,
|
||||
// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook.
|
||||
// There is a more involved use case, too: If you already have metrics
|
||||
// available, created outside of the Prometheus context, you don't need the
|
||||
// interface of the various Metric types. You essentially want to mirror the
|
||||
// existing numbers into Prometheus Metrics during collection. An own
|
||||
// implementation of the Collector interface is perfect for that. You can create
|
||||
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
|
||||
// NewConstSummary (and their respective Must… versions). That will happen in
|
||||
// the Collect method. The Describe method has to return separate Desc
|
||||
// instances, representative of the “throw-away” metrics to be created
|
||||
// later. NewDesc comes in handy to create those Desc instances.
|
||||
//
|
||||
// For custom metric collection, there are two entry points: Custom Metric
|
||||
// implementations and custom Collector implementations. A Metric is the
|
||||
// fundamental unit in the Prometheus data model: a sample at a point in time
|
||||
// together with its meta-data (like its fully-qualified name and any number of
|
||||
// pairs of label name and label value) that knows how to marshal itself into a
|
||||
// data transfer object (aka DTO, implemented as a protocol buffer). A Collector
|
||||
// gets registered with the Prometheus registry and manages the collection of
|
||||
// one or more Metrics. Many parts of this package are building blocks for
|
||||
// Metrics and Collectors. Desc is the metric descriptor, actually used by all
|
||||
// metrics under the hood, and by Collectors to describe the Metrics to be
|
||||
// collected, but only to be dealt with by users if they implement their own
|
||||
// Metrics or Collectors. To create a Desc, the BuildFQName function will come
|
||||
// in handy. Other useful components for Metric and Collector implementation
|
||||
// include: LabelPairSorter to sort the DTO version of label pairs,
|
||||
// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at
|
||||
// collection time, MetricVec to bundle custom Metrics into a metric vector
|
||||
// Collector, SelfCollector to make a custom Metric collect itself.
|
||||
// The Collector example illustrates the use case. You can also look at the
|
||||
// source code of the processCollector (mirroring process metrics), the
|
||||
// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
|
||||
// metrics) as examples that are used in this package itself.
|
||||
//
|
||||
// A good example for a custom Collector is the ExpVarCollector included in this
|
||||
// package, which exports variables exported via the "expvar" package as
|
||||
// Prometheus metrics.
|
||||
// If you just need to call a function to get a single float value to collect as
|
||||
// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
|
||||
// shortcuts.
|
||||
//
|
||||
// Advanced Uses of the Registry
|
||||
//
|
||||
// While MustRegister is the by far most common way of registering a Collector,
|
||||
// sometimes you might want to handle the errors the registration might
|
||||
// cause. As suggested by the name, MustRegister panics if an error occurs. With
|
||||
// the Register function, the error is returned and can be handled.
|
||||
//
|
||||
// An error is returned if the registered Collector is incompatible or
|
||||
// inconsistent with already registered metrics. The registry aims for
|
||||
// consistency of the collected metrics according to the Prometheus data
|
||||
// model. Inconsistencies are ideally detected at registration time, not at
|
||||
// collect time. The former will usually be detected at start-up time of a
|
||||
// program, while the latter will only happen at scrape time, possibly not even
|
||||
// on the first scrape if the inconsistency only becomes relevant later. That is
|
||||
// the main reason why a Collector and a Metric have to describe themselves to
|
||||
// the registry.
|
||||
//
|
||||
// So far, everything we did operated on the so-called default registry, as it
|
||||
// can be found in the global DefaultRegistry variable. With NewRegistry, you
|
||||
// can create a custom registry, or you can even implement the Registerer or
|
||||
// Gatherer interfaces yourself. The methods Register and Unregister work in
|
||||
// the same way on a custom registry as the global functions Register and
|
||||
// Unregister on the default registry.
|
||||
//
|
||||
// There are a number of uses for custom registries: You can use registries
|
||||
// with special properties, see NewPedanticRegistry. You can avoid global state,
|
||||
// as it is imposed by the DefaultRegistry. You can use multiple registries at
|
||||
// the same time to expose different metrics in different ways. You can use
|
||||
// separate registries for testing purposes.
|
||||
//
|
||||
// Also note that the DefaultRegistry comes registered with a Collector for Go
|
||||
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
|
||||
// NewProcessCollector). With a custom registry, you are in control and decide
|
||||
// yourself about the Collectors to register.
|
||||
//
|
||||
// HTTP Exposition
|
||||
//
|
||||
// The Registry implements the Gatherer interface. The caller of the Gather
|
||||
// method can then expose the gathered metrics in some way. Usually, the metrics
|
||||
// are served via HTTP on the /metrics endpoint. That's happening in the example
|
||||
// above. The tools to expose metrics via HTTP are in the promhttp
|
||||
// sub-package. (The top-level functions in the prometheus package are
|
||||
// deprecated.)
|
||||
//
|
||||
// Pushing to the Pushgateway
|
||||
//
|
||||
// Function for pushing to the Pushgateway can be found in the push sub-package.
|
||||
//
|
||||
// Other Means of Exposition
|
||||
//
|
||||
// More ways of exposing metrics can easily be added. Sending metrics to
|
||||
// Graphite would be an example that will soon be implemented.
|
||||
package prometheus
|
||||
|
|
|
@ -18,21 +18,21 @@ import (
|
|||
"expvar"
|
||||
)
|
||||
|
||||
// ExpvarCollector collects metrics from the expvar interface. It provides a
|
||||
// quick way to expose numeric values that are already exported via expvar as
|
||||
// Prometheus metrics. Note that the data models of expvar and Prometheus are
|
||||
// fundamentally different, and that the ExpvarCollector is inherently
|
||||
// slow. Thus, the ExpvarCollector is probably great for experiments and
|
||||
// prototying, but you should seriously consider a more direct implementation of
|
||||
// Prometheus metrics for monitoring production systems.
|
||||
//
|
||||
// Use NewExpvarCollector to create new instances.
|
||||
type ExpvarCollector struct {
|
||||
type expvarCollector struct {
|
||||
exports map[string]*Desc
|
||||
}
|
||||
|
||||
// NewExpvarCollector returns a newly allocated ExpvarCollector that still has
|
||||
// to be registered with the Prometheus registry.
|
||||
// NewExpvarCollector returns a newly allocated expvar Collector that still has
|
||||
// to be registered with a Prometheus registry.
|
||||
//
|
||||
// An expvar Collector collects metrics from the expvar interface. It provides a
|
||||
// quick way to expose numeric values that are already exported via expvar as
|
||||
// Prometheus metrics. Note that the data models of expvar and Prometheus are
|
||||
// fundamentally different, and that the expvar Collector is inherently slower
|
||||
// than native Prometheus metrics. Thus, the expvar Collector is probably great
|
||||
// for experiments and prototying, but you should seriously consider a more
|
||||
// direct implementation of Prometheus metrics for monitoring production
|
||||
// systems.
|
||||
//
|
||||
// The exports map has the following meaning:
|
||||
//
|
||||
|
@ -59,21 +59,21 @@ type ExpvarCollector struct {
|
|||
// sample values.
|
||||
//
|
||||
// Anything that does not fit into the scheme above is silently ignored.
|
||||
func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector {
|
||||
return &ExpvarCollector{
|
||||
func NewExpvarCollector(exports map[string]*Desc) Collector {
|
||||
return &expvarCollector{
|
||||
exports: exports,
|
||||
}
|
||||
}
|
||||
|
||||
// Describe implements Collector.
|
||||
func (e *ExpvarCollector) Describe(ch chan<- *Desc) {
|
||||
func (e *expvarCollector) Describe(ch chan<- *Desc) {
|
||||
for _, desc := range e.exports {
|
||||
ch <- desc
|
||||
}
|
||||
}
|
||||
|
||||
// Collect implements Collector.
|
||||
func (e *ExpvarCollector) Collect(ch chan<- Metric) {
|
||||
func (e *expvarCollector) Collect(ch chan<- Metric) {
|
||||
for name, desc := range e.exports {
|
||||
var m Metric
|
||||
expVar := expvar.Get(name)
|
29
vendor/github.com/prometheus/client_golang/prometheus/fnv.go
generated
vendored
Normal file
29
vendor/github.com/prometheus/client_golang/prometheus/fnv.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
package prometheus
|
||||
|
||||
// Inline and byte-free variant of hash/fnv's fnv64a.
|
||||
|
||||
const (
|
||||
offset64 = 14695981039346656037
|
||||
prime64 = 1099511628211
|
||||
)
|
||||
|
||||
// hashNew initializies a new fnv64a hash value.
|
||||
func hashNew() uint64 {
|
||||
return offset64
|
||||
}
|
||||
|
||||
// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
|
||||
func hashAdd(h uint64, s string) uint64 {
|
||||
for i := 0; i < len(s); i++ {
|
||||
h ^= uint64(s[i])
|
||||
h *= prime64
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
|
||||
func hashAddByte(h uint64, b byte) uint64 {
|
||||
h ^= uint64(b)
|
||||
h *= prime64
|
||||
return h
|
||||
}
|
13
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
13
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
|
@ -13,8 +13,6 @@
|
|||
|
||||
package prometheus
|
||||
|
||||
import "hash/fnv"
|
||||
|
||||
// Gauge is a Metric that represents a single numerical value that can
|
||||
// arbitrarily go up and down.
|
||||
//
|
||||
|
@ -60,7 +58,7 @@ func NewGauge(opts GaugeOpts) Gauge {
|
|||
// (e.g. number of operations queued, partitioned by user and operation
|
||||
// type). Create instances with NewGaugeVec.
|
||||
type GaugeVec struct {
|
||||
MetricVec
|
||||
*MetricVec
|
||||
}
|
||||
|
||||
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
|
||||
|
@ -74,14 +72,9 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
|
|||
opts.ConstLabels,
|
||||
)
|
||||
return &GaugeVec{
|
||||
MetricVec: MetricVec{
|
||||
children: map[uint64]Metric{},
|
||||
desc: desc,
|
||||
hash: fnv.New64a(),
|
||||
newMetric: func(lvs ...string) Metric {
|
||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
return newValue(desc, GaugeValue, 0, lvs...)
|
||||
},
|
||||
},
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
217
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
217
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
|
@ -1,6 +1,7 @@
|
|||
package prometheus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"time"
|
||||
|
@ -9,27 +10,226 @@ import (
|
|||
type goCollector struct {
|
||||
goroutines Gauge
|
||||
gcDesc *Desc
|
||||
|
||||
// metrics to describe and collect
|
||||
metrics memStatsMetrics
|
||||
}
|
||||
|
||||
// NewGoCollector returns a collector which exports metrics about the current
|
||||
// go process.
|
||||
func NewGoCollector() *goCollector {
|
||||
func NewGoCollector() Collector {
|
||||
return &goCollector{
|
||||
goroutines: NewGauge(GaugeOpts{
|
||||
Name: "go_goroutines",
|
||||
Namespace: "go",
|
||||
Name: "goroutines",
|
||||
Help: "Number of goroutines that currently exist.",
|
||||
}),
|
||||
gcDesc: NewDesc(
|
||||
"go_gc_duration_seconds",
|
||||
"A summary of the GC invocation durations.",
|
||||
nil, nil),
|
||||
metrics: memStatsMetrics{
|
||||
{
|
||||
desc: NewDesc(
|
||||
memstatNamespace("alloc_bytes"),
|
||||
"Number of bytes allocated and still in use.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("alloc_bytes_total"),
|
||||
"Total number of bytes allocated, even if freed.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
|
||||
valType: CounterValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("sys_bytes"),
|
||||
"Number of bytes obtained by system. Sum of all system allocations.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("lookups_total"),
|
||||
"Total number of pointer lookups.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
|
||||
valType: CounterValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("mallocs_total"),
|
||||
"Total number of mallocs.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
|
||||
valType: CounterValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("frees_total"),
|
||||
"Total number of frees.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
|
||||
valType: CounterValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("heap_alloc_bytes"),
|
||||
"Number of heap bytes allocated and still in use.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("heap_sys_bytes"),
|
||||
"Number of heap bytes obtained from system.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("heap_idle_bytes"),
|
||||
"Number of heap bytes waiting to be used.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("heap_inuse_bytes"),
|
||||
"Number of heap bytes that are in use.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("heap_released_bytes_total"),
|
||||
"Total number of heap bytes released to OS.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
|
||||
valType: CounterValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("heap_objects"),
|
||||
"Number of allocated objects.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("stack_inuse_bytes"),
|
||||
"Number of bytes in use by the stack allocator.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("stack_sys_bytes"),
|
||||
"Number of bytes obtained from system for stack allocator.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("mspan_inuse_bytes"),
|
||||
"Number of bytes in use by mspan structures.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("mspan_sys_bytes"),
|
||||
"Number of bytes used for mspan structures obtained from system.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("mcache_inuse_bytes"),
|
||||
"Number of bytes in use by mcache structures.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("mcache_sys_bytes"),
|
||||
"Number of bytes used for mcache structures obtained from system.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("buck_hash_sys_bytes"),
|
||||
"Number of bytes used by the profiling bucket hash table.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("gc_sys_bytes"),
|
||||
"Number of bytes used for garbage collection system metadata.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("other_sys_bytes"),
|
||||
"Number of bytes used for other system allocations.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("next_gc_bytes"),
|
||||
"Number of heap bytes when next garbage collection will take place.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("last_gc_time_seconds"),
|
||||
"Number of seconds since 1970 of last garbage collection.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
|
||||
valType: GaugeValue,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func memstatNamespace(s string) string {
|
||||
return fmt.Sprintf("go_memstats_%s", s)
|
||||
}
|
||||
|
||||
// Describe returns all descriptions of the collector.
|
||||
func (c *goCollector) Describe(ch chan<- *Desc) {
|
||||
ch <- c.goroutines.Desc()
|
||||
ch <- c.gcDesc
|
||||
|
||||
for _, i := range c.metrics {
|
||||
ch <- i.desc
|
||||
}
|
||||
}
|
||||
|
||||
// Collect returns the current state of all metrics of the collector.
|
||||
|
@ -47,4 +247,17 @@ func (c *goCollector) Collect(ch chan<- Metric) {
|
|||
}
|
||||
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
|
||||
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
|
||||
|
||||
ms := &runtime.MemStats{}
|
||||
runtime.ReadMemStats(ms)
|
||||
for _, i := range c.metrics {
|
||||
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
|
||||
}
|
||||
}
|
||||
|
||||
// memStatsMetrics provide description, value, and value type for memstat metrics.
|
||||
type memStatsMetrics []struct {
|
||||
desc *Desc
|
||||
eval func(*runtime.MemStats) float64
|
||||
valType ValueType
|
||||
}
|
||||
|
|
24
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
24
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
|
@ -15,7 +15,6 @@ package prometheus
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"math"
|
||||
"sort"
|
||||
"sync/atomic"
|
||||
|
@ -52,11 +51,11 @@ type Histogram interface {
|
|||
// bucket of a histogram ("le" -> "less or equal").
|
||||
const bucketLabel = "le"
|
||||
|
||||
// DefBuckets are the default Histogram buckets. The default buckets are
|
||||
// tailored to broadly measure the response time (in seconds) of a network
|
||||
// service. Most likely, however, you will be required to define buckets
|
||||
// customized to your use case.
|
||||
var (
|
||||
// DefBuckets are the default Histogram buckets. The default buckets are
|
||||
// tailored to broadly measure the response time (in seconds) of a
|
||||
// network service. Most likely, however, you will be required to define
|
||||
// buckets customized to your use case.
|
||||
DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
|
||||
|
||||
errBucketLabelNotAllowed = fmt.Errorf(
|
||||
|
@ -211,7 +210,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
|||
// Finally we know the final length of h.upperBounds and can make counts.
|
||||
h.counts = make([]uint64, len(h.upperBounds))
|
||||
|
||||
h.Init(h) // Init self-collection.
|
||||
h.init(h) // Init self-collection.
|
||||
return h
|
||||
}
|
||||
|
||||
|
@ -223,7 +222,7 @@ type histogram struct {
|
|||
sumBits uint64
|
||||
count uint64
|
||||
|
||||
SelfCollector
|
||||
selfCollector
|
||||
// Note that there is no mutex required.
|
||||
|
||||
desc *Desc
|
||||
|
@ -288,7 +287,7 @@ func (h *histogram) Write(out *dto.Metric) error {
|
|||
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
||||
// instances with NewHistogramVec.
|
||||
type HistogramVec struct {
|
||||
MetricVec
|
||||
*MetricVec
|
||||
}
|
||||
|
||||
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
|
||||
|
@ -302,14 +301,9 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
|
|||
opts.ConstLabels,
|
||||
)
|
||||
return &HistogramVec{
|
||||
MetricVec: MetricVec{
|
||||
children: map[uint64]Metric{},
|
||||
desc: desc,
|
||||
hash: fnv.New64a(),
|
||||
newMetric: func(lvs ...string) Metric {
|
||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
return newHistogram(desc, opts, lvs...)
|
||||
},
|
||||
},
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
151
vendor/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
151
vendor/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
|
@ -15,14 +15,114 @@ package prometheus
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/common/expfmt"
|
||||
)
|
||||
|
||||
// TODO(beorn7): Remove this whole file. It is a partial mirror of
|
||||
// promhttp/http.go (to avoid circular import chains) where everything HTTP
|
||||
// related should live. The functions here are just for avoiding
|
||||
// breakage. Everything is deprecated.
|
||||
|
||||
const (
|
||||
contentTypeHeader = "Content-Type"
|
||||
contentLengthHeader = "Content-Length"
|
||||
contentEncodingHeader = "Content-Encoding"
|
||||
acceptEncodingHeader = "Accept-Encoding"
|
||||
)
|
||||
|
||||
var bufPool sync.Pool
|
||||
|
||||
func getBuf() *bytes.Buffer {
|
||||
buf := bufPool.Get()
|
||||
if buf == nil {
|
||||
return &bytes.Buffer{}
|
||||
}
|
||||
return buf.(*bytes.Buffer)
|
||||
}
|
||||
|
||||
func giveBuf(buf *bytes.Buffer) {
|
||||
buf.Reset()
|
||||
bufPool.Put(buf)
|
||||
}
|
||||
|
||||
// Handler returns an HTTP handler for the DefaultGatherer. It is
|
||||
// already instrumented with InstrumentHandler (using "prometheus" as handler
|
||||
// name).
|
||||
//
|
||||
// Deprecated: Please note the issues described in the doc comment of
|
||||
// InstrumentHandler. You might want to consider using promhttp.Handler instead
|
||||
// (which is non instrumented).
|
||||
func Handler() http.Handler {
|
||||
return InstrumentHandler("prometheus", UninstrumentedHandler())
|
||||
}
|
||||
|
||||
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
|
||||
//
|
||||
// Deprecated: Use promhttp.Handler instead. See there for further documentation.
|
||||
func UninstrumentedHandler() http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
mfs, err := DefaultGatherer.Gather()
|
||||
if err != nil {
|
||||
http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
contentType := expfmt.Negotiate(req.Header)
|
||||
buf := getBuf()
|
||||
defer giveBuf(buf)
|
||||
writer, encoding := decorateWriter(req, buf)
|
||||
enc := expfmt.NewEncoder(writer, contentType)
|
||||
var lastErr error
|
||||
for _, mf := range mfs {
|
||||
if err := enc.Encode(mf); err != nil {
|
||||
lastErr = err
|
||||
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
if closer, ok := writer.(io.Closer); ok {
|
||||
closer.Close()
|
||||
}
|
||||
if lastErr != nil && buf.Len() == 0 {
|
||||
http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
header := w.Header()
|
||||
header.Set(contentTypeHeader, string(contentType))
|
||||
header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
|
||||
if encoding != "" {
|
||||
header.Set(contentEncodingHeader, encoding)
|
||||
}
|
||||
w.Write(buf.Bytes())
|
||||
})
|
||||
}
|
||||
|
||||
// decorateWriter wraps a writer to handle gzip compression if requested. It
|
||||
// returns the decorated writer and the appropriate "Content-Encoding" header
|
||||
// (which is empty if no compression is enabled).
|
||||
func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
|
||||
header := request.Header.Get(acceptEncodingHeader)
|
||||
parts := strings.Split(header, ",")
|
||||
for _, part := range parts {
|
||||
part := strings.TrimSpace(part)
|
||||
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
|
||||
return gzip.NewWriter(writer), "gzip"
|
||||
}
|
||||
}
|
||||
return writer, ""
|
||||
}
|
||||
|
||||
var instLabels = []string{"method", "code"}
|
||||
|
||||
type nower interface {
|
||||
|
@ -57,12 +157,34 @@ func nowSeries(t ...time.Time) nower {
|
|||
// has a constant label named "handler" with the provided handlerName as
|
||||
// value. http_requests_total is a metric vector partitioned by HTTP method
|
||||
// (label name "method") and HTTP status code (label name "code").
|
||||
//
|
||||
// Deprecated: InstrumentHandler has several issues:
|
||||
//
|
||||
// - It uses Summaries rather than Histograms. Summaries are not useful if
|
||||
// aggregation across multiple instances is required.
|
||||
//
|
||||
// - It uses microseconds as unit, which is deprecated and should be replaced by
|
||||
// seconds.
|
||||
//
|
||||
// - The size of the request is calculated in a separate goroutine. Since this
|
||||
// calculator requires access to the request header, it creates a race with
|
||||
// any writes to the header performed during request handling.
|
||||
// httputil.ReverseProxy is a prominent example for a handler
|
||||
// performing such writes.
|
||||
//
|
||||
// Upcoming versions of this package will provide ways of instrumenting HTTP
|
||||
// handlers that are more flexible and have fewer issues. Please prefer direct
|
||||
// instrumentation in the meantime.
|
||||
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
|
||||
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
|
||||
}
|
||||
|
||||
// InstrumentHandlerFunc wraps the given function for instrumentation. It
|
||||
// otherwise works in the same way as InstrumentHandler.
|
||||
// otherwise works in the same way as InstrumentHandler (and shares the same
|
||||
// issues).
|
||||
//
|
||||
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
|
||||
// InstrumentHandler is.
|
||||
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
||||
return InstrumentHandlerFuncWithOpts(
|
||||
SummaryOpts{
|
||||
|
@ -73,13 +195,13 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri
|
|||
)
|
||||
}
|
||||
|
||||
// InstrumentHandlerWithOpts works like InstrumentHandler but provides more
|
||||
// flexibility (at the cost of a more complex call syntax). As
|
||||
// InstrumentHandler, this function registers four metric collectors, but it
|
||||
// uses the provided SummaryOpts to create them. However, the fields "Name" and
|
||||
// "Help" in the SummaryOpts are ignored. "Name" is replaced by
|
||||
// "requests_total", "request_duration_microseconds", "request_size_bytes", and
|
||||
// "response_size_bytes", respectively. "Help" is replaced by an appropriate
|
||||
// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
|
||||
// issues) but provides more flexibility (at the cost of a more complex call
|
||||
// syntax). As InstrumentHandler, this function registers four metric
|
||||
// collectors, but it uses the provided SummaryOpts to create them. However, the
|
||||
// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
|
||||
// by "requests_total", "request_duration_microseconds", "request_size_bytes",
|
||||
// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
|
||||
// help string. The names of the variable labels of the http_requests_total
|
||||
// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
|
||||
//
|
||||
|
@ -98,13 +220,20 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri
|
|||
// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
|
||||
// and all its fields are set to the equally named fields in the provided
|
||||
// SummaryOpts.
|
||||
//
|
||||
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
|
||||
// InstrumentHandler is.
|
||||
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
|
||||
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
|
||||
}
|
||||
|
||||
// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc but provides
|
||||
// more flexibility (at the cost of a more complex call syntax). See
|
||||
// InstrumentHandlerWithOpts for details how the provided SummaryOpts are used.
|
||||
// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
|
||||
// the same issues) but provides more flexibility (at the cost of a more complex
|
||||
// call syntax). See InstrumentHandlerWithOpts for details how the provided
|
||||
// SummaryOpts are used.
|
||||
//
|
||||
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
|
||||
// as InstrumentHandler is.
|
||||
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
||||
reqCnt := NewCounterVec(
|
||||
CounterOpts{
|
||||
|
|
34
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
34
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
|
@ -22,10 +22,8 @@ import (
|
|||
const separatorByte byte = 255
|
||||
|
||||
// A Metric models a single sample value with its meta data being exported to
|
||||
// Prometheus. Implementers of Metric in this package inclued Gauge, Counter,
|
||||
// Untyped, and Summary. Users can implement their own Metric types, but that
|
||||
// should be rarely needed. See the example for SelfCollector, which is also an
|
||||
// example for a user-implemented Metric.
|
||||
// Prometheus. Implementations of Metric in this package are Gauge, Counter,
|
||||
// Histogram, Summary, and Untyped.
|
||||
type Metric interface {
|
||||
// Desc returns the descriptor for the Metric. This method idempotently
|
||||
// returns the same descriptor throughout the lifetime of the
|
||||
|
@ -36,21 +34,23 @@ type Metric interface {
|
|||
// Write encodes the Metric into a "Metric" Protocol Buffer data
|
||||
// transmission object.
|
||||
//
|
||||
// Implementers of custom Metric types must observe concurrency safety
|
||||
// as reads of this metric may occur at any time, and any blocking
|
||||
// occurs at the expense of total performance of rendering all
|
||||
// registered metrics. Ideally Metric implementations should support
|
||||
// concurrent readers.
|
||||
// Metric implementations must observe concurrency safety as reads of
|
||||
// this metric may occur at any time, and any blocking occurs at the
|
||||
// expense of total performance of rendering all registered
|
||||
// metrics. Ideally, Metric implementations should support concurrent
|
||||
// readers.
|
||||
//
|
||||
// The Prometheus client library attempts to minimize memory allocations
|
||||
// and will provide a pre-existing reset dto.Metric pointer. Prometheus
|
||||
// may recycle the dto.Metric proto message, so Metric implementations
|
||||
// should just populate the provided dto.Metric and then should not keep
|
||||
// any reference to it.
|
||||
//
|
||||
// While populating dto.Metric, labels must be sorted lexicographically.
|
||||
// (Implementers may find LabelPairSorter useful for that.)
|
||||
// While populating dto.Metric, it is the responsibility of the
|
||||
// implementation to ensure validity of the Metric protobuf (like valid
|
||||
// UTF-8 strings or syntactically valid metric and label names). It is
|
||||
// recommended to sort labels lexicographically. (Implementers may find
|
||||
// LabelPairSorter useful for that.) Callers of Write should still make
|
||||
// sure of sorting if they depend on it.
|
||||
Write(*dto.Metric) error
|
||||
// TODO(beorn7): The original rationale of passing in a pre-allocated
|
||||
// dto.Metric protobuf to save allocations has disappeared. The
|
||||
// signature of this method should be changed to "Write() (*dto.Metric,
|
||||
// error)".
|
||||
}
|
||||
|
||||
// Opts bundles the options for creating most Metric types. Each metric
|
||||
|
|
4
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
4
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
|
@ -28,7 +28,7 @@ type processCollector struct {
|
|||
// NewProcessCollector returns a collector which exports the current state of
|
||||
// process metrics including cpu, memory and file descriptor usage as well as
|
||||
// the process start time for the given process id under the given namespace.
|
||||
func NewProcessCollector(pid int, namespace string) *processCollector {
|
||||
func NewProcessCollector(pid int, namespace string) Collector {
|
||||
return NewProcessCollectorPIDFn(
|
||||
func() (int, error) { return pid, nil },
|
||||
namespace,
|
||||
|
@ -43,7 +43,7 @@ func NewProcessCollector(pid int, namespace string) *processCollector {
|
|||
func NewProcessCollectorPIDFn(
|
||||
pidFn func() (int, error),
|
||||
namespace string,
|
||||
) *processCollector {
|
||||
) Collector {
|
||||
c := processCollector{
|
||||
pidFn: pidFn,
|
||||
collectFn: func(chan<- Metric) {},
|
||||
|
|
65
vendor/github.com/prometheus/client_golang/prometheus/push.go
generated
vendored
65
vendor/github.com/prometheus/client_golang/prometheus/push.go
generated
vendored
|
@ -1,65 +0,0 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Copyright (c) 2013, The Prometheus Authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be found
|
||||
// in the LICENSE file.
|
||||
|
||||
package prometheus
|
||||
|
||||
// Push triggers a metric collection by the default registry and pushes all
|
||||
// collected metrics to the Pushgateway specified by addr. See the Pushgateway
|
||||
// documentation for detailed implications of the job and instance
|
||||
// parameter. instance can be left empty. You can use just host:port or ip:port
|
||||
// as url, in which case 'http://' is added automatically. You can also include
|
||||
// the schema in the URL. However, do not include the '/metrics/jobs/...' part.
|
||||
//
|
||||
// Note that all previously pushed metrics with the same job and instance will
|
||||
// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT'
|
||||
// to push to the Pushgateway.)
|
||||
func Push(job, instance, url string) error {
|
||||
return defRegistry.Push(job, instance, url, "PUT")
|
||||
}
|
||||
|
||||
// PushAdd works like Push, but only previously pushed metrics with the same
|
||||
// name (and the same job and instance) will be replaced. (It uses HTTP method
|
||||
// 'POST' to push to the Pushgateway.)
|
||||
func PushAdd(job, instance, url string) error {
|
||||
return defRegistry.Push(job, instance, url, "POST")
|
||||
}
|
||||
|
||||
// PushCollectors works like Push, but it does not collect from the default
|
||||
// registry. Instead, it collects from the provided collectors. It is a
|
||||
// convenient way to push only a few metrics.
|
||||
func PushCollectors(job, instance, url string, collectors ...Collector) error {
|
||||
return pushCollectors(job, instance, url, "PUT", collectors...)
|
||||
}
|
||||
|
||||
// PushAddCollectors works like PushAdd, but it does not collect from the
|
||||
// default registry. Instead, it collects from the provided collectors. It is a
|
||||
// convenient way to push only a few metrics.
|
||||
func PushAddCollectors(job, instance, url string, collectors ...Collector) error {
|
||||
return pushCollectors(job, instance, url, "POST", collectors...)
|
||||
}
|
||||
|
||||
func pushCollectors(job, instance, url, method string, collectors ...Collector) error {
|
||||
r := newRegistry()
|
||||
for _, collector := range collectors {
|
||||
if _, err := r.Register(collector); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return r.Push(job, instance, url, method)
|
||||
}
|
944
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
944
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
File diff suppressed because it is too large
Load diff
28
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
28
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
|
@ -15,7 +15,6 @@ package prometheus
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
|
@ -54,8 +53,8 @@ type Summary interface {
|
|||
Observe(float64)
|
||||
}
|
||||
|
||||
// DefObjectives are the default Summary quantile values.
|
||||
var (
|
||||
// DefObjectives are the default Summary quantile values.
|
||||
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
|
||||
|
||||
errQuantileLabelNotAllowed = fmt.Errorf(
|
||||
|
@ -140,11 +139,11 @@ type SummaryOpts struct {
|
|||
BufCap uint32
|
||||
}
|
||||
|
||||
// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge
|
||||
// method of perk/quantile is actually not working as advertised - and it might
|
||||
// be unfixable, as the underlying algorithm is apparently not capable of
|
||||
// merging summaries in the first place. To avoid using Merge, we are currently
|
||||
// adding observations to _each_ age bucket, i.e. the effort to add a sample is
|
||||
// Great fuck-up with the sliding-window decay algorithm... The Merge method of
|
||||
// perk/quantile is actually not working as advertised - and it might be
|
||||
// unfixable, as the underlying algorithm is apparently not capable of merging
|
||||
// summaries in the first place. To avoid using Merge, we are currently adding
|
||||
// observations to _each_ age bucket, i.e. the effort to add a sample is
|
||||
// essentially multiplied by the number of age buckets. When rotating age
|
||||
// buckets, we empty the previous head stream. On scrape time, we simply take
|
||||
// the quantiles from the head stream (no merging required). Result: More effort
|
||||
|
@ -228,12 +227,12 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
|||
}
|
||||
sort.Float64s(s.sortedObjectives)
|
||||
|
||||
s.Init(s) // Init self-collection.
|
||||
s.init(s) // Init self-collection.
|
||||
return s
|
||||
}
|
||||
|
||||
type summary struct {
|
||||
SelfCollector
|
||||
selfCollector
|
||||
|
||||
bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
|
||||
mtx sync.Mutex // Protects every other moving part.
|
||||
|
@ -391,7 +390,7 @@ func (s quantSort) Less(i, j int) bool {
|
|||
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
||||
// instances with NewSummaryVec.
|
||||
type SummaryVec struct {
|
||||
MetricVec
|
||||
*MetricVec
|
||||
}
|
||||
|
||||
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
|
||||
|
@ -405,14 +404,9 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
|
|||
opts.ConstLabels,
|
||||
)
|
||||
return &SummaryVec{
|
||||
MetricVec: MetricVec{
|
||||
children: map[uint64]Metric{},
|
||||
desc: desc,
|
||||
hash: fnv.New64a(),
|
||||
newMetric: func(lvs ...string) Metric {
|
||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
return newSummary(desc, opts, lvs...)
|
||||
},
|
||||
},
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
13
vendor/github.com/prometheus/client_golang/prometheus/untyped.go
generated
vendored
13
vendor/github.com/prometheus/client_golang/prometheus/untyped.go
generated
vendored
|
@ -13,8 +13,6 @@
|
|||
|
||||
package prometheus
|
||||
|
||||
import "hash/fnv"
|
||||
|
||||
// Untyped is a Metric that represents a single numerical value that can
|
||||
// arbitrarily go up and down.
|
||||
//
|
||||
|
@ -58,7 +56,7 @@ func NewUntyped(opts UntypedOpts) Untyped {
|
|||
// labels. This is used if you want to count the same thing partitioned by
|
||||
// various dimensions. Create instances with NewUntypedVec.
|
||||
type UntypedVec struct {
|
||||
MetricVec
|
||||
*MetricVec
|
||||
}
|
||||
|
||||
// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
|
||||
|
@ -72,14 +70,9 @@ func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
|
|||
opts.ConstLabels,
|
||||
)
|
||||
return &UntypedVec{
|
||||
MetricVec: MetricVec{
|
||||
children: map[uint64]Metric{},
|
||||
desc: desc,
|
||||
hash: fnv.New64a(),
|
||||
newMetric: func(lvs ...string) Metric {
|
||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
return newValue(desc, UntypedValue, 0, lvs...)
|
||||
},
|
||||
},
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
8
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
8
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
|
@ -48,7 +48,7 @@ type value struct {
|
|||
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
valBits uint64
|
||||
|
||||
SelfCollector
|
||||
selfCollector
|
||||
|
||||
desc *Desc
|
||||
valType ValueType
|
||||
|
@ -68,7 +68,7 @@ func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...strin
|
|||
valBits: math.Float64bits(val),
|
||||
labelPairs: makeLabelPairs(desc, labelValues),
|
||||
}
|
||||
result.Init(result)
|
||||
result.init(result)
|
||||
return result
|
||||
}
|
||||
|
||||
|
@ -113,7 +113,7 @@ func (v *value) Write(out *dto.Metric) error {
|
|||
// library to back the implementations of CounterFunc, GaugeFunc, and
|
||||
// UntypedFunc.
|
||||
type valueFunc struct {
|
||||
SelfCollector
|
||||
selfCollector
|
||||
|
||||
desc *Desc
|
||||
valType ValueType
|
||||
|
@ -134,7 +134,7 @@ func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *val
|
|||
function: function,
|
||||
labelPairs: makeLabelPairs(desc, nil),
|
||||
}
|
||||
result.Init(result)
|
||||
result.init(result)
|
||||
return result
|
||||
}
|
||||
|
||||
|
|
251
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
251
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
|
@ -14,10 +14,10 @@
|
|||
package prometheus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"hash"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
// MetricVec is a Collector to bundle metrics of the same name that
|
||||
|
@ -26,17 +26,32 @@ import (
|
|||
// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
|
||||
// provided in this package.
|
||||
type MetricVec struct {
|
||||
mtx sync.RWMutex // Protects not only children, but also hash and buf.
|
||||
children map[uint64]Metric
|
||||
mtx sync.RWMutex // Protects the children.
|
||||
children map[uint64][]metricWithLabelValues
|
||||
desc *Desc
|
||||
|
||||
// hash is our own hash instance to avoid repeated allocations.
|
||||
hash hash.Hash64
|
||||
// buf is used to copy string contents into it for hashing,
|
||||
// again to avoid allocations.
|
||||
buf bytes.Buffer
|
||||
|
||||
newMetric func(labelValues ...string) Metric
|
||||
hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling
|
||||
hashAddByte func(h uint64, b byte) uint64
|
||||
}
|
||||
|
||||
// newMetricVec returns an initialized MetricVec. The concrete value is
|
||||
// returned for embedding into another struct.
|
||||
func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
|
||||
return &MetricVec{
|
||||
children: map[uint64][]metricWithLabelValues{},
|
||||
desc: desc,
|
||||
newMetric: newMetric,
|
||||
hashAdd: hashAdd,
|
||||
hashAddByte: hashAddByte,
|
||||
}
|
||||
}
|
||||
|
||||
// metricWithLabelValues provides the metric and its label values for
|
||||
// disambiguation on hash collision.
|
||||
type metricWithLabelValues struct {
|
||||
values []string
|
||||
metric Metric
|
||||
}
|
||||
|
||||
// Describe implements Collector. The length of the returned slice
|
||||
|
@ -50,8 +65,10 @@ func (m *MetricVec) Collect(ch chan<- Metric) {
|
|||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
|
||||
for _, metric := range m.children {
|
||||
ch <- metric
|
||||
for _, metrics := range m.children {
|
||||
for _, metric := range metrics {
|
||||
ch <- metric.metric
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -80,14 +97,12 @@ func (m *MetricVec) Collect(ch chan<- Metric) {
|
|||
// with a performance overhead (for creating and processing the Labels map).
|
||||
// See also the GaugeVec example.
|
||||
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
h, err := m.hashLabelValues(lvs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m.getOrCreateMetric(h, lvs...), nil
|
||||
|
||||
return m.getOrCreateMetricWithLabelValues(h, lvs), nil
|
||||
}
|
||||
|
||||
// GetMetricWith returns the Metric for the given Labels map (the label names
|
||||
|
@ -103,18 +118,12 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
|
|||
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
||||
// methods.
|
||||
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
h, err := m.hashLabels(labels)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lvs := make([]string, len(labels))
|
||||
for i, label := range m.desc.variableLabels {
|
||||
lvs[i] = labels[label]
|
||||
}
|
||||
return m.getOrCreateMetric(h, lvs...), nil
|
||||
|
||||
return m.getOrCreateMetricWithLabels(h, labels), nil
|
||||
}
|
||||
|
||||
// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
|
||||
|
@ -162,11 +171,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
|
|||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if _, has := m.children[h]; !has {
|
||||
return false
|
||||
}
|
||||
delete(m.children, h)
|
||||
return true
|
||||
return m.deleteByHashWithLabelValues(h, lvs)
|
||||
}
|
||||
|
||||
// Delete deletes the metric where the variable labels are the same as those
|
||||
|
@ -187,10 +192,50 @@ func (m *MetricVec) Delete(labels Labels) bool {
|
|||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if _, has := m.children[h]; !has {
|
||||
|
||||
return m.deleteByHashWithLabels(h, labels)
|
||||
}
|
||||
|
||||
// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
|
||||
// there are multiple matches in the bucket, use lvs to select a metric and
|
||||
// remove only that metric.
|
||||
func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
|
||||
metrics, ok := m.children[h]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
i := m.findMetricWithLabelValues(metrics, lvs)
|
||||
if i >= len(metrics) {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(metrics) > 1 {
|
||||
m.children[h] = append(metrics[:i], metrics[i+1:]...)
|
||||
} else {
|
||||
delete(m.children, h)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// deleteByHashWithLabels removes the metric from the hash bucket h. If there
|
||||
// are multiple matches in the bucket, use lvs to select a metric and remove
|
||||
// only that metric.
|
||||
func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {
|
||||
metrics, ok := m.children[h]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
i := m.findMetricWithLabels(metrics, labels)
|
||||
if i >= len(metrics) {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(metrics) > 1 {
|
||||
m.children[h] = append(metrics[:i], metrics[i+1:]...)
|
||||
} else {
|
||||
delete(m.children, h)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -208,40 +253,152 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
|
|||
if len(vals) != len(m.desc.variableLabels) {
|
||||
return 0, errInconsistentCardinality
|
||||
}
|
||||
m.hash.Reset()
|
||||
h := hashNew()
|
||||
for _, val := range vals {
|
||||
m.buf.Reset()
|
||||
m.buf.WriteString(val)
|
||||
m.hash.Write(m.buf.Bytes())
|
||||
h = m.hashAdd(h, val)
|
||||
h = m.hashAddByte(h, model.SeparatorByte)
|
||||
}
|
||||
return m.hash.Sum64(), nil
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
|
||||
if len(labels) != len(m.desc.variableLabels) {
|
||||
return 0, errInconsistentCardinality
|
||||
}
|
||||
m.hash.Reset()
|
||||
h := hashNew()
|
||||
for _, label := range m.desc.variableLabels {
|
||||
val, ok := labels[label]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("label name %q missing in label map", label)
|
||||
}
|
||||
m.buf.Reset()
|
||||
m.buf.WriteString(val)
|
||||
m.hash.Write(m.buf.Bytes())
|
||||
h = m.hashAdd(h, val)
|
||||
h = m.hashAddByte(h, model.SeparatorByte)
|
||||
}
|
||||
return m.hash.Sum64(), nil
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric {
|
||||
metric, ok := m.children[hash]
|
||||
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
|
||||
// or creates it and returns the new one.
|
||||
//
|
||||
// This function holds the mutex.
|
||||
func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric {
|
||||
m.mtx.RLock()
|
||||
metric, ok := m.getMetricWithLabelValues(hash, lvs)
|
||||
m.mtx.RUnlock()
|
||||
if ok {
|
||||
return metric
|
||||
}
|
||||
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
metric, ok = m.getMetricWithLabelValues(hash, lvs)
|
||||
if !ok {
|
||||
// Copy labelValues. Otherwise, they would be allocated even if we don't go
|
||||
// down this code path.
|
||||
copiedLabelValues := append(make([]string, 0, len(labelValues)), labelValues...)
|
||||
metric = m.newMetric(copiedLabelValues...)
|
||||
m.children[hash] = metric
|
||||
// Copy to avoid allocation in case wo don't go down this code path.
|
||||
copiedLVs := make([]string, len(lvs))
|
||||
copy(copiedLVs, lvs)
|
||||
metric = m.newMetric(copiedLVs...)
|
||||
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric})
|
||||
}
|
||||
return metric
|
||||
}
|
||||
|
||||
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
|
||||
// or creates it and returns the new one.
|
||||
//
|
||||
// This function holds the mutex.
|
||||
func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric {
|
||||
m.mtx.RLock()
|
||||
metric, ok := m.getMetricWithLabels(hash, labels)
|
||||
m.mtx.RUnlock()
|
||||
if ok {
|
||||
return metric
|
||||
}
|
||||
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
metric, ok = m.getMetricWithLabels(hash, labels)
|
||||
if !ok {
|
||||
lvs := m.extractLabelValues(labels)
|
||||
metric = m.newMetric(lvs...)
|
||||
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric})
|
||||
}
|
||||
return metric
|
||||
}
|
||||
|
||||
// getMetricWithLabelValues gets a metric while handling possible collisions in
|
||||
// the hash space. Must be called while holding read mutex.
|
||||
func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) {
|
||||
metrics, ok := m.children[h]
|
||||
if ok {
|
||||
if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) {
|
||||
return metrics[i].metric, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// getMetricWithLabels gets a metric while handling possible collisions in
|
||||
// the hash space. Must be called while holding read mutex.
|
||||
func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) {
|
||||
metrics, ok := m.children[h]
|
||||
if ok {
|
||||
if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) {
|
||||
return metrics[i].metric, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// findMetricWithLabelValues returns the index of the matching metric or
|
||||
// len(metrics) if not found.
|
||||
func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int {
|
||||
for i, metric := range metrics {
|
||||
if m.matchLabelValues(metric.values, lvs) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return len(metrics)
|
||||
}
|
||||
|
||||
// findMetricWithLabels returns the index of the matching metric or len(metrics)
|
||||
// if not found.
|
||||
func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int {
|
||||
for i, metric := range metrics {
|
||||
if m.matchLabels(metric.values, labels) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return len(metrics)
|
||||
}
|
||||
|
||||
func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool {
|
||||
if len(values) != len(lvs) {
|
||||
return false
|
||||
}
|
||||
for i, v := range values {
|
||||
if v != lvs[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
|
||||
if len(labels) != len(values) {
|
||||
return false
|
||||
}
|
||||
for i, k := range m.desc.variableLabels {
|
||||
if values[i] != labels[k] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *MetricVec) extractLabelValues(labels Labels) []string {
|
||||
labelValues := make([]string, len(labels))
|
||||
for i, k := range m.desc.variableLabels {
|
||||
labelValues[i] = labels[k]
|
||||
}
|
||||
return labelValues
|
||||
}
|
||||
|
|
201
vendor/github.com/prometheus/client_model/ruby/LICENSE
generated
vendored
Normal file
201
vendor/github.com/prometheus/client_model/ruby/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
201
vendor/github.com/prometheus/common/LICENSE
generated
vendored
Normal file
201
vendor/github.com/prometheus/common/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
5
vendor/github.com/prometheus/common/NOTICE
generated
vendored
Normal file
5
vendor/github.com/prometheus/common/NOTICE
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
Common libraries shared by Prometheus Go components.
|
||||
Copyright 2015 The Prometheus Authors
|
||||
|
||||
This product includes software developed at
|
||||
SoundCloud Ltd. (http://soundcloud.com/).
|
94
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
94
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
|
@ -31,6 +31,7 @@ type Decoder interface {
|
|||
Decode(*dto.MetricFamily) error
|
||||
}
|
||||
|
||||
// DecodeOptions contains options used by the Decoder and in sample extraction.
|
||||
type DecodeOptions struct {
|
||||
// Timestamp is added to each value from the stream that has no explicit timestamp set.
|
||||
Timestamp model.Time
|
||||
|
@ -46,10 +47,7 @@ func ResponseFormat(h http.Header) Format {
|
|||
return FmtUnknown
|
||||
}
|
||||
|
||||
const (
|
||||
textType = "text/plain"
|
||||
jsonType = "application/json"
|
||||
)
|
||||
const textType = "text/plain"
|
||||
|
||||
switch mediatype {
|
||||
case ProtoType:
|
||||
|
@ -66,22 +64,6 @@ func ResponseFormat(h http.Header) Format {
|
|||
return FmtUnknown
|
||||
}
|
||||
return FmtText
|
||||
|
||||
case jsonType:
|
||||
var prometheusAPIVersion string
|
||||
|
||||
if params["schema"] == "prometheus/telemetry" && params["version"] != "" {
|
||||
prometheusAPIVersion = params["version"]
|
||||
} else {
|
||||
prometheusAPIVersion = h.Get("X-Prometheus-API-Version")
|
||||
}
|
||||
|
||||
switch prometheusAPIVersion {
|
||||
case "0.0.2", "":
|
||||
return fmtJSON2
|
||||
default:
|
||||
return FmtUnknown
|
||||
}
|
||||
}
|
||||
|
||||
return FmtUnknown
|
||||
|
@ -93,8 +75,6 @@ func NewDecoder(r io.Reader, format Format) Decoder {
|
|||
switch format {
|
||||
case FmtProtoDelim:
|
||||
return &protoDecoder{r: r}
|
||||
case fmtJSON2:
|
||||
return newJSON2Decoder(r)
|
||||
}
|
||||
return &textDecoder{r: r}
|
||||
}
|
||||
|
@ -107,10 +87,32 @@ type protoDecoder struct {
|
|||
// Decode implements the Decoder interface.
|
||||
func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
|
||||
_, err := pbutil.ReadDelimited(d.r, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
|
||||
return fmt.Errorf("invalid metric name %q", v.GetName())
|
||||
}
|
||||
for _, m := range v.GetMetric() {
|
||||
if m == nil {
|
||||
continue
|
||||
}
|
||||
for _, l := range m.GetLabel() {
|
||||
if l == nil {
|
||||
continue
|
||||
}
|
||||
if !model.LabelValue(l.GetValue()).IsValid() {
|
||||
return fmt.Errorf("invalid label value %q", l.GetValue())
|
||||
}
|
||||
if !model.LabelName(l.GetName()).IsValid() {
|
||||
return fmt.Errorf("invalid label name %q", l.GetName())
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// textDecoder implements the Decoder interface for the text protcol.
|
||||
// textDecoder implements the Decoder interface for the text protocol.
|
||||
type textDecoder struct {
|
||||
r io.Reader
|
||||
p TextParser
|
||||
|
@ -141,6 +143,8 @@ func (d *textDecoder) Decode(v *dto.MetricFamily) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// SampleDecoder wraps a Decoder to extract samples from the metric families
|
||||
// decoded by the wrapped Decoder.
|
||||
type SampleDecoder struct {
|
||||
Dec Decoder
|
||||
Opts *DecodeOptions
|
||||
|
@ -148,37 +152,51 @@ type SampleDecoder struct {
|
|||
f dto.MetricFamily
|
||||
}
|
||||
|
||||
// Decode calls the Decode method of the wrapped Decoder and then extracts the
|
||||
// samples from the decoded MetricFamily into the provided model.Vector.
|
||||
func (sd *SampleDecoder) Decode(s *model.Vector) error {
|
||||
if err := sd.Dec.Decode(&sd.f); err != nil {
|
||||
err := sd.Dec.Decode(&sd.f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*s = extractSamples(&sd.f, sd.Opts)
|
||||
return nil
|
||||
*s, err = extractSamples(&sd.f, sd.Opts)
|
||||
return err
|
||||
}
|
||||
|
||||
// Extract samples builds a slice of samples from the provided metric families.
|
||||
func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector {
|
||||
var all model.Vector
|
||||
// ExtractSamples builds a slice of samples from the provided metric
|
||||
// families. If an error occurrs during sample extraction, it continues to
|
||||
// extract from the remaining metric families. The returned error is the last
|
||||
// error that has occurred.
|
||||
func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
|
||||
var (
|
||||
all model.Vector
|
||||
lastErr error
|
||||
)
|
||||
for _, f := range fams {
|
||||
all = append(all, extractSamples(f, o)...)
|
||||
some, err := extractSamples(f, o)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
return all
|
||||
all = append(all, some...)
|
||||
}
|
||||
return all, lastErr
|
||||
}
|
||||
|
||||
func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector {
|
||||
func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) {
|
||||
switch f.GetType() {
|
||||
case dto.MetricType_COUNTER:
|
||||
return extractCounter(o, f)
|
||||
return extractCounter(o, f), nil
|
||||
case dto.MetricType_GAUGE:
|
||||
return extractGauge(o, f)
|
||||
return extractGauge(o, f), nil
|
||||
case dto.MetricType_SUMMARY:
|
||||
return extractSummary(o, f)
|
||||
return extractSummary(o, f), nil
|
||||
case dto.MetricType_UNTYPED:
|
||||
return extractUntyped(o, f)
|
||||
return extractUntyped(o, f), nil
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
return extractHistogram(o, f)
|
||||
return extractHistogram(o, f), nil
|
||||
}
|
||||
panic("expfmt.extractSamples: unknown metric family type")
|
||||
return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType())
|
||||
}
|
||||
|
||||
func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
|
||||
|
|
2
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
2
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
|
@ -18,9 +18,9 @@ import (
|
|||
"io"
|
||||
"net/http"
|
||||
|
||||
"bitbucket.org/ww/goautoneg"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
||||
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
|
10
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
10
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
|
@ -11,27 +11,25 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// A package for reading and writing Prometheus metrics.
|
||||
// Package expfmt contains tools for reading and writing Prometheus metrics.
|
||||
package expfmt
|
||||
|
||||
// Format specifies the HTTP content type of the different wire protocols.
|
||||
type Format string
|
||||
|
||||
// Constants to assemble the Content-Type values for the different wire protocols.
|
||||
const (
|
||||
TextVersion = "0.0.4"
|
||||
|
||||
ProtoType = `application/vnd.google.protobuf`
|
||||
ProtoProtocol = `io.prometheus.client.MetricFamily`
|
||||
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
|
||||
|
||||
// The Content-Type values for the different wire protocols.
|
||||
FmtUnknown Format = `<unknown>`
|
||||
FmtText Format = `text/plain; version=` + TextVersion
|
||||
FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
|
||||
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
|
||||
FmtProtoText Format = ProtoFmt + ` encoding=text`
|
||||
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
|
||||
|
||||
// fmtJSON2 is hidden as it is deprecated.
|
||||
fmtJSON2 Format = `application/json; version=0.0.2`
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
4
vendor/github.com/prometheus/common/expfmt/fuzz.go
generated
vendored
4
vendor/github.com/prometheus/common/expfmt/fuzz.go
generated
vendored
|
@ -20,8 +20,8 @@ import "bytes"
|
|||
|
||||
// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
|
||||
//
|
||||
// go-fuzz-build github.com/prometheus/client_golang/text
|
||||
// go-fuzz -bin text-fuzz.zip -workdir fuzz
|
||||
// go-fuzz-build github.com/prometheus/common/expfmt
|
||||
// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
|
||||
//
|
||||
// Further input samples should go in the folder fuzz/corpus.
|
||||
func Fuzz(in []byte) int {
|
||||
|
|
162
vendor/github.com/prometheus/common/expfmt/json_decode.go
generated
vendored
162
vendor/github.com/prometheus/common/expfmt/json_decode.go
generated
vendored
|
@ -1,162 +0,0 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package expfmt
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
type json2Decoder struct {
|
||||
dec *json.Decoder
|
||||
fams []*dto.MetricFamily
|
||||
}
|
||||
|
||||
func newJSON2Decoder(r io.Reader) Decoder {
|
||||
return &json2Decoder{
|
||||
dec: json.NewDecoder(r),
|
||||
}
|
||||
}
|
||||
|
||||
type histogram002 struct {
|
||||
Labels model.LabelSet `json:"labels"`
|
||||
Values map[string]float64 `json:"value"`
|
||||
}
|
||||
|
||||
type counter002 struct {
|
||||
Labels model.LabelSet `json:"labels"`
|
||||
Value float64 `json:"value"`
|
||||
}
|
||||
|
||||
func protoLabelSet(base, ext model.LabelSet) []*dto.LabelPair {
|
||||
labels := base.Clone().Merge(ext)
|
||||
delete(labels, model.MetricNameLabel)
|
||||
|
||||
names := make([]string, 0, len(labels))
|
||||
for ln := range labels {
|
||||
names = append(names, string(ln))
|
||||
}
|
||||
sort.Strings(names)
|
||||
|
||||
pairs := make([]*dto.LabelPair, 0, len(labels))
|
||||
|
||||
for _, ln := range names {
|
||||
lv := labels[model.LabelName(ln)]
|
||||
|
||||
pairs = append(pairs, &dto.LabelPair{
|
||||
Name: proto.String(ln),
|
||||
Value: proto.String(string(lv)),
|
||||
})
|
||||
}
|
||||
|
||||
return pairs
|
||||
}
|
||||
|
||||
func (d *json2Decoder) more() error {
|
||||
var entities []struct {
|
||||
BaseLabels model.LabelSet `json:"baseLabels"`
|
||||
Docstring string `json:"docstring"`
|
||||
Metric struct {
|
||||
Type string `json:"type"`
|
||||
Values json.RawMessage `json:"value"`
|
||||
} `json:"metric"`
|
||||
}
|
||||
|
||||
if err := d.dec.Decode(&entities); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, e := range entities {
|
||||
f := &dto.MetricFamily{
|
||||
Name: proto.String(string(e.BaseLabels[model.MetricNameLabel])),
|
||||
Help: proto.String(e.Docstring),
|
||||
Type: dto.MetricType_UNTYPED.Enum(),
|
||||
Metric: []*dto.Metric{},
|
||||
}
|
||||
|
||||
d.fams = append(d.fams, f)
|
||||
|
||||
switch e.Metric.Type {
|
||||
case "counter", "gauge":
|
||||
var values []counter002
|
||||
|
||||
if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
|
||||
return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
|
||||
}
|
||||
|
||||
for _, ctr := range values {
|
||||
f.Metric = append(f.Metric, &dto.Metric{
|
||||
Label: protoLabelSet(e.BaseLabels, ctr.Labels),
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(ctr.Value),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
case "histogram":
|
||||
var values []histogram002
|
||||
|
||||
if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
|
||||
return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
|
||||
}
|
||||
|
||||
for _, hist := range values {
|
||||
quants := make([]string, 0, len(values))
|
||||
for q := range hist.Values {
|
||||
quants = append(quants, q)
|
||||
}
|
||||
|
||||
sort.Strings(quants)
|
||||
|
||||
for _, q := range quants {
|
||||
value := hist.Values[q]
|
||||
// The correct label is "quantile" but to not break old expressions
|
||||
// this remains "percentile"
|
||||
hist.Labels["percentile"] = model.LabelValue(q)
|
||||
|
||||
f.Metric = append(f.Metric, &dto.Metric{
|
||||
Label: protoLabelSet(e.BaseLabels, hist.Labels),
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(value),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown metric type %q", e.Metric.Type)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode implements the Decoder interface.
|
||||
func (d *json2Decoder) Decode(v *dto.MetricFamily) error {
|
||||
if len(d.fams) == 0 {
|
||||
if err := d.more(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
*v = *d.fams[0]
|
||||
d.fams = d.fams[1:]
|
||||
|
||||
return nil
|
||||
}
|
30
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
30
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
|
@ -14,7 +14,6 @@
|
|||
package expfmt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
|
@ -26,9 +25,12 @@ import (
|
|||
|
||||
// MetricFamilyToText converts a MetricFamily proto message into text format and
|
||||
// writes the resulting lines to 'out'. It returns the number of bytes written
|
||||
// and any error encountered. This function does not perform checks on the
|
||||
// content of the metric and label names, i.e. invalid metric or label names
|
||||
// and any error encountered. The output will have the same order as the input,
|
||||
// no further sorting is performed. Furthermore, this function assumes the input
|
||||
// is already sanitized and does not perform any sanity checks. If the input
|
||||
// contains duplicate metrics or invalid metric or label names, the conversion
|
||||
// will result in invalid text format output.
|
||||
//
|
||||
// This method fulfills the type 'prometheus.encoder'.
|
||||
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
|
||||
var written int
|
||||
|
@ -285,21 +287,17 @@ func labelPairsToText(
|
|||
return written, nil
|
||||
}
|
||||
|
||||
var (
|
||||
escape = strings.NewReplacer("\\", `\\`, "\n", `\n`)
|
||||
escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
|
||||
)
|
||||
|
||||
// escapeString replaces '\' by '\\', new line character by '\n', and - if
|
||||
// includeDoubleQuote is true - '"' by '\"'.
|
||||
func escapeString(v string, includeDoubleQuote bool) string {
|
||||
result := bytes.NewBuffer(make([]byte, 0, len(v)))
|
||||
for _, c := range v {
|
||||
switch {
|
||||
case c == '\\':
|
||||
result.WriteString(`\\`)
|
||||
case includeDoubleQuote && c == '"':
|
||||
result.WriteString(`\"`)
|
||||
case c == '\n':
|
||||
result.WriteString(`\n`)
|
||||
default:
|
||||
result.WriteRune(c)
|
||||
if includeDoubleQuote {
|
||||
return escapeWithDoubleQuote.Replace(v)
|
||||
}
|
||||
}
|
||||
return result.String()
|
||||
|
||||
return escape.Replace(v)
|
||||
}
|
||||
|
|
17
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
17
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
|
@ -47,7 +47,7 @@ func (e ParseError) Error() string {
|
|||
}
|
||||
|
||||
// TextParser is used to parse the simple and flat text-based exchange format. Its
|
||||
// nil value is ready to use.
|
||||
// zero value is ready to use.
|
||||
type TextParser struct {
|
||||
metricFamiliesByName map[string]*dto.MetricFamily
|
||||
buf *bufio.Reader // Where the parsed input is read through.
|
||||
|
@ -108,6 +108,13 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF
|
|||
delete(p.metricFamiliesByName, k)
|
||||
}
|
||||
}
|
||||
// If p.err is io.EOF now, we have run into a premature end of the input
|
||||
// stream. Turn this error into something nicer and more
|
||||
// meaningful. (io.EOF is often used as a signal for the legitimate end
|
||||
// of an input stream.)
|
||||
if p.err == io.EOF {
|
||||
p.parseError("unexpected end of input stream")
|
||||
}
|
||||
return p.metricFamiliesByName, p.err
|
||||
}
|
||||
|
||||
|
@ -308,6 +315,10 @@ func (p *TextParser) startLabelValue() stateFn {
|
|||
if p.readTokenAsLabelValue(); p.err != nil {
|
||||
return nil
|
||||
}
|
||||
if !model.LabelValue(p.currentToken.String()).IsValid() {
|
||||
p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String()))
|
||||
return nil
|
||||
}
|
||||
p.currentLabelPair.Value = proto.String(p.currentToken.String())
|
||||
// Special treatment of summaries:
|
||||
// - Quantile labels are special, will result in dto.Quantile later.
|
||||
|
@ -545,8 +556,8 @@ func (p *TextParser) readTokenUntilWhitespace() {
|
|||
// byte considered is the byte already read (now in p.currentByte). The first
|
||||
// newline byte encountered is still copied into p.currentByte, but not into
|
||||
// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
|
||||
// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All
|
||||
// other escape sequences are invalid and cause an error.
|
||||
// recognized: '\\' translates into '\', and '\n' into a line-feed character.
|
||||
// All other escape sequences are invalid and cause an error.
|
||||
func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
|
||||
p.currentToken.Reset()
|
||||
escaped := false
|
||||
|
|
136
vendor/github.com/prometheus/common/model/alert.go
generated
vendored
Normal file
136
vendor/github.com/prometheus/common/model/alert.go
generated
vendored
Normal file
|
@ -0,0 +1,136 @@
|
|||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
type AlertStatus string
|
||||
|
||||
const (
|
||||
AlertFiring AlertStatus = "firing"
|
||||
AlertResolved AlertStatus = "resolved"
|
||||
)
|
||||
|
||||
// Alert is a generic representation of an alert in the Prometheus eco-system.
|
||||
type Alert struct {
|
||||
// Label value pairs for purpose of aggregation, matching, and disposition
|
||||
// dispatching. This must minimally include an "alertname" label.
|
||||
Labels LabelSet `json:"labels"`
|
||||
|
||||
// Extra key/value information which does not define alert identity.
|
||||
Annotations LabelSet `json:"annotations"`
|
||||
|
||||
// The known time range for this alert. Both ends are optional.
|
||||
StartsAt time.Time `json:"startsAt,omitempty"`
|
||||
EndsAt time.Time `json:"endsAt,omitempty"`
|
||||
GeneratorURL string `json:"generatorURL"`
|
||||
}
|
||||
|
||||
// Name returns the name of the alert. It is equivalent to the "alertname" label.
|
||||
func (a *Alert) Name() string {
|
||||
return string(a.Labels[AlertNameLabel])
|
||||
}
|
||||
|
||||
// Fingerprint returns a unique hash for the alert. It is equivalent to
|
||||
// the fingerprint of the alert's label set.
|
||||
func (a *Alert) Fingerprint() Fingerprint {
|
||||
return a.Labels.Fingerprint()
|
||||
}
|
||||
|
||||
func (a *Alert) String() string {
|
||||
s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7])
|
||||
if a.Resolved() {
|
||||
return s + "[resolved]"
|
||||
}
|
||||
return s + "[active]"
|
||||
}
|
||||
|
||||
// Resolved returns true iff the activity interval ended in the past.
|
||||
func (a *Alert) Resolved() bool {
|
||||
return a.ResolvedAt(time.Now())
|
||||
}
|
||||
|
||||
// ResolvedAt returns true off the activity interval ended before
|
||||
// the given timestamp.
|
||||
func (a *Alert) ResolvedAt(ts time.Time) bool {
|
||||
if a.EndsAt.IsZero() {
|
||||
return false
|
||||
}
|
||||
return !a.EndsAt.After(ts)
|
||||
}
|
||||
|
||||
// Status returns the status of the alert.
|
||||
func (a *Alert) Status() AlertStatus {
|
||||
if a.Resolved() {
|
||||
return AlertResolved
|
||||
}
|
||||
return AlertFiring
|
||||
}
|
||||
|
||||
// Validate checks whether the alert data is inconsistent.
|
||||
func (a *Alert) Validate() error {
|
||||
if a.StartsAt.IsZero() {
|
||||
return fmt.Errorf("start time missing")
|
||||
}
|
||||
if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
|
||||
return fmt.Errorf("start time must be before end time")
|
||||
}
|
||||
if err := a.Labels.Validate(); err != nil {
|
||||
return fmt.Errorf("invalid label set: %s", err)
|
||||
}
|
||||
if len(a.Labels) == 0 {
|
||||
return fmt.Errorf("at least one label pair required")
|
||||
}
|
||||
if err := a.Annotations.Validate(); err != nil {
|
||||
return fmt.Errorf("invalid annotations: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Alert is a list of alerts that can be sorted in chronological order.
|
||||
type Alerts []*Alert
|
||||
|
||||
func (as Alerts) Len() int { return len(as) }
|
||||
func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
|
||||
|
||||
func (as Alerts) Less(i, j int) bool {
|
||||
if as[i].StartsAt.Before(as[j].StartsAt) {
|
||||
return true
|
||||
}
|
||||
if as[i].EndsAt.Before(as[j].EndsAt) {
|
||||
return true
|
||||
}
|
||||
return as[i].Fingerprint() < as[j].Fingerprint()
|
||||
}
|
||||
|
||||
// HasFiring returns true iff one of the alerts is not resolved.
|
||||
func (as Alerts) HasFiring() bool {
|
||||
for _, a := range as {
|
||||
if !a.Resolved() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Status returns StatusFiring iff at least one of the alerts is firing.
|
||||
func (as Alerts) Status() AlertStatus {
|
||||
if as.HasFiring() {
|
||||
return AlertFiring
|
||||
}
|
||||
return AlertResolved
|
||||
}
|
42
vendor/github.com/prometheus/common/model/fnv.go
generated
vendored
Normal file
42
vendor/github.com/prometheus/common/model/fnv.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
// Inline and byte-free variant of hash/fnv's fnv64a.
|
||||
|
||||
const (
|
||||
offset64 = 14695981039346656037
|
||||
prime64 = 1099511628211
|
||||
)
|
||||
|
||||
// hashNew initializies a new fnv64a hash value.
|
||||
func hashNew() uint64 {
|
||||
return offset64
|
||||
}
|
||||
|
||||
// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
|
||||
func hashAdd(h uint64, s string) uint64 {
|
||||
for i := 0; i < len(s); i++ {
|
||||
h ^= uint64(s[i])
|
||||
h *= prime64
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
|
||||
func hashAddByte(h uint64, b byte) uint64 {
|
||||
h ^= uint64(b)
|
||||
h *= prime64
|
||||
return h
|
||||
}
|
32
vendor/github.com/prometheus/common/model/labels.go
generated
vendored
32
vendor/github.com/prometheus/common/model/labels.go
generated
vendored
|
@ -17,8 +17,8 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -80,20 +80,37 @@ const (
|
|||
QuantileLabel = "quantile"
|
||||
)
|
||||
|
||||
// LabelNameRE is a regular expression matching valid label names.
|
||||
// LabelNameRE is a regular expression matching valid label names. Note that the
|
||||
// IsValid method of LabelName performs the same check but faster than a match
|
||||
// with this regular expression.
|
||||
var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
|
||||
|
||||
// A LabelName is a key for a LabelSet or Metric. It has a value associated
|
||||
// therewith.
|
||||
type LabelName string
|
||||
|
||||
// IsValid is true iff the label name matches the pattern of LabelNameRE. This
|
||||
// method, however, does not use LabelNameRE for the check but a much faster
|
||||
// hardcoded implementation.
|
||||
func (ln LabelName) IsValid() bool {
|
||||
if len(ln) == 0 {
|
||||
return false
|
||||
}
|
||||
for i, b := range ln {
|
||||
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var s string
|
||||
if err := unmarshal(&s); err != nil {
|
||||
return err
|
||||
}
|
||||
if !LabelNameRE.MatchString(s) {
|
||||
if !LabelName(s).IsValid() {
|
||||
return fmt.Errorf("%q is not a valid label name", s)
|
||||
}
|
||||
*ln = LabelName(s)
|
||||
|
@ -106,7 +123,7 @@ func (ln *LabelName) UnmarshalJSON(b []byte) error {
|
|||
if err := json.Unmarshal(b, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
if !LabelNameRE.MatchString(s) {
|
||||
if !LabelName(s).IsValid() {
|
||||
return fmt.Errorf("%q is not a valid label name", s)
|
||||
}
|
||||
*ln = LabelName(s)
|
||||
|
@ -139,6 +156,11 @@ func (l LabelNames) String() string {
|
|||
// A LabelValue is an associated value for a LabelName.
|
||||
type LabelValue string
|
||||
|
||||
// IsValid returns true iff the string is a valid UTF8.
|
||||
func (lv LabelValue) IsValid() bool {
|
||||
return utf8.ValidString(string(lv))
|
||||
}
|
||||
|
||||
// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
|
||||
type LabelValues []LabelValue
|
||||
|
||||
|
@ -147,7 +169,7 @@ func (l LabelValues) Len() int {
|
|||
}
|
||||
|
||||
func (l LabelValues) Less(i, j int) bool {
|
||||
return sort.StringsAreSorted([]string{string(l[i]), string(l[j])})
|
||||
return string(l[i]) < string(l[j])
|
||||
}
|
||||
|
||||
func (l LabelValues) Swap(i, j int) {
|
||||
|
|
18
vendor/github.com/prometheus/common/model/labelset.go
generated
vendored
18
vendor/github.com/prometheus/common/model/labelset.go
generated
vendored
|
@ -27,6 +27,21 @@ import (
|
|||
// match.
|
||||
type LabelSet map[LabelName]LabelValue
|
||||
|
||||
// Validate checks whether all names and values in the label set
|
||||
// are valid.
|
||||
func (ls LabelSet) Validate() error {
|
||||
for ln, lv := range ls {
|
||||
if !ln.IsValid() {
|
||||
return fmt.Errorf("invalid name %q", ln)
|
||||
}
|
||||
if !lv.IsValid() {
|
||||
return fmt.Errorf("invalid value %q", lv)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Equal returns true iff both label sets have exactly the same key/value pairs.
|
||||
func (ls LabelSet) Equal(o LabelSet) bool {
|
||||
if len(ls) != len(o) {
|
||||
return false
|
||||
|
@ -90,6 +105,7 @@ func (ls LabelSet) Before(o LabelSet) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// Clone returns a copy of the label set.
|
||||
func (ls LabelSet) Clone() LabelSet {
|
||||
lsn := make(LabelSet, len(ls))
|
||||
for ln, lv := range ls {
|
||||
|
@ -144,7 +160,7 @@ func (l *LabelSet) UnmarshalJSON(b []byte) error {
|
|||
// LabelName as a string and does not call its UnmarshalJSON method.
|
||||
// Thus, we have to replicate the behavior here.
|
||||
for ln := range m {
|
||||
if !LabelNameRE.MatchString(string(ln)) {
|
||||
if !ln.IsValid() {
|
||||
return fmt.Errorf("%q is not a valid label name", ln)
|
||||
}
|
||||
}
|
||||
|
|
26
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
26
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
|
@ -15,11 +15,18 @@ package model
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var separator = []byte{0}
|
||||
var (
|
||||
separator = []byte{0}
|
||||
// MetricNameRE is a regular expression matching valid metric
|
||||
// names. Note that the IsValidMetricName function performs the same
|
||||
// check but faster than a match with this regular expression.
|
||||
MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
|
||||
)
|
||||
|
||||
// A Metric is similar to a LabelSet, but the key difference is that a Metric is
|
||||
// a singleton and refers to one and only one stream of samples.
|
||||
|
@ -37,7 +44,7 @@ func (m Metric) Before(o Metric) bool {
|
|||
|
||||
// Clone returns a copy of the Metric.
|
||||
func (m Metric) Clone() Metric {
|
||||
clone := Metric{}
|
||||
clone := make(Metric, len(m))
|
||||
for k, v := range m {
|
||||
clone[k] = v
|
||||
}
|
||||
|
@ -79,3 +86,18 @@ func (m Metric) Fingerprint() Fingerprint {
|
|||
func (m Metric) FastFingerprint() Fingerprint {
|
||||
return LabelSet(m).FastFingerprint()
|
||||
}
|
||||
|
||||
// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
|
||||
// This function, however, does not use MetricNameRE for the check but a much
|
||||
// faster hardcoded implementation.
|
||||
func IsValidMetricName(n LabelValue) bool {
|
||||
if len(n) == 0 {
|
||||
return false
|
||||
}
|
||||
for i, b := range n {
|
||||
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
2
vendor/github.com/prometheus/common/model/model.go
generated
vendored
2
vendor/github.com/prometheus/common/model/model.go
generated
vendored
|
@ -12,5 +12,5 @@
|
|||
// limitations under the License.
|
||||
|
||||
// Package model contains common data structures that are shared across
|
||||
// Prometheus componenets and libraries.
|
||||
// Prometheus components and libraries.
|
||||
package model
|
||||
|
|
108
vendor/github.com/prometheus/common/model/signature.go
generated
vendored
108
vendor/github.com/prometheus/common/model/signature.go
generated
vendored
|
@ -14,11 +14,7 @@
|
|||
package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"hash"
|
||||
"hash/fnv"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
|
||||
|
@ -28,30 +24,9 @@ const SeparatorByte byte = 255
|
|||
|
||||
var (
|
||||
// cache the signature of an empty label set.
|
||||
emptyLabelSignature = fnv.New64a().Sum64()
|
||||
|
||||
hashAndBufPool sync.Pool
|
||||
emptyLabelSignature = hashNew()
|
||||
)
|
||||
|
||||
type hashAndBuf struct {
|
||||
h hash.Hash64
|
||||
b bytes.Buffer
|
||||
}
|
||||
|
||||
func getHashAndBuf() *hashAndBuf {
|
||||
hb := hashAndBufPool.Get()
|
||||
if hb == nil {
|
||||
return &hashAndBuf{h: fnv.New64a()}
|
||||
}
|
||||
return hb.(*hashAndBuf)
|
||||
}
|
||||
|
||||
func putHashAndBuf(hb *hashAndBuf) {
|
||||
hb.h.Reset()
|
||||
hb.b.Reset()
|
||||
hashAndBufPool.Put(hb)
|
||||
}
|
||||
|
||||
// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
|
||||
// given label set. (Collisions are possible but unlikely if the number of label
|
||||
// sets the function is applied to is small.)
|
||||
|
@ -66,18 +41,14 @@ func LabelsToSignature(labels map[string]string) uint64 {
|
|||
}
|
||||
sort.Strings(labelNames)
|
||||
|
||||
hb := getHashAndBuf()
|
||||
defer putHashAndBuf(hb)
|
||||
|
||||
sum := hashNew()
|
||||
for _, labelName := range labelNames {
|
||||
hb.b.WriteString(labelName)
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.b.WriteString(labels[labelName])
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.h.Write(hb.b.Bytes())
|
||||
hb.b.Reset()
|
||||
sum = hashAdd(sum, labelName)
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
sum = hashAdd(sum, labels[labelName])
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
}
|
||||
return hb.h.Sum64()
|
||||
return sum
|
||||
}
|
||||
|
||||
// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
|
||||
|
@ -93,18 +64,14 @@ func labelSetToFingerprint(ls LabelSet) Fingerprint {
|
|||
}
|
||||
sort.Sort(labelNames)
|
||||
|
||||
hb := getHashAndBuf()
|
||||
defer putHashAndBuf(hb)
|
||||
|
||||
sum := hashNew()
|
||||
for _, labelName := range labelNames {
|
||||
hb.b.WriteString(string(labelName))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.b.WriteString(string(ls[labelName]))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.h.Write(hb.b.Bytes())
|
||||
hb.b.Reset()
|
||||
sum = hashAdd(sum, string(labelName))
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
sum = hashAdd(sum, string(ls[labelName]))
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
}
|
||||
return Fingerprint(hb.h.Sum64())
|
||||
return Fingerprint(sum)
|
||||
}
|
||||
|
||||
// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
|
||||
|
@ -116,17 +83,12 @@ func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
|
|||
}
|
||||
|
||||
var result uint64
|
||||
hb := getHashAndBuf()
|
||||
defer putHashAndBuf(hb)
|
||||
|
||||
for labelName, labelValue := range ls {
|
||||
hb.b.WriteString(string(labelName))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.b.WriteString(string(labelValue))
|
||||
hb.h.Write(hb.b.Bytes())
|
||||
result ^= hb.h.Sum64()
|
||||
hb.h.Reset()
|
||||
hb.b.Reset()
|
||||
sum := hashNew()
|
||||
sum = hashAdd(sum, string(labelName))
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
sum = hashAdd(sum, string(labelValue))
|
||||
result ^= sum
|
||||
}
|
||||
return Fingerprint(result)
|
||||
}
|
||||
|
@ -136,24 +98,20 @@ func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
|
|||
// specified LabelNames into the signature calculation. The labels passed in
|
||||
// will be sorted by this function.
|
||||
func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
|
||||
if len(m) == 0 || len(labels) == 0 {
|
||||
if len(labels) == 0 {
|
||||
return emptyLabelSignature
|
||||
}
|
||||
|
||||
sort.Sort(LabelNames(labels))
|
||||
|
||||
hb := getHashAndBuf()
|
||||
defer putHashAndBuf(hb)
|
||||
|
||||
sum := hashNew()
|
||||
for _, label := range labels {
|
||||
hb.b.WriteString(string(label))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.b.WriteString(string(m[label]))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.h.Write(hb.b.Bytes())
|
||||
hb.b.Reset()
|
||||
sum = hashAdd(sum, string(label))
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
sum = hashAdd(sum, string(m[label]))
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
}
|
||||
return hb.h.Sum64()
|
||||
return sum
|
||||
}
|
||||
|
||||
// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
|
||||
|
@ -175,16 +133,12 @@ func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
|
|||
}
|
||||
sort.Sort(labelNames)
|
||||
|
||||
hb := getHashAndBuf()
|
||||
defer putHashAndBuf(hb)
|
||||
|
||||
sum := hashNew()
|
||||
for _, labelName := range labelNames {
|
||||
hb.b.WriteString(string(labelName))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.b.WriteString(string(m[labelName]))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.h.Write(hb.b.Bytes())
|
||||
hb.b.Reset()
|
||||
sum = hashAdd(sum, string(labelName))
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
sum = hashAdd(sum, string(m[labelName]))
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
}
|
||||
return hb.h.Sum64()
|
||||
return sum
|
||||
}
|
||||
|
|
106
vendor/github.com/prometheus/common/model/silence.go
generated
vendored
Normal file
106
vendor/github.com/prometheus/common/model/silence.go
generated
vendored
Normal file
|
@ -0,0 +1,106 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Matcher describes a matches the value of a given label.
|
||||
type Matcher struct {
|
||||
Name LabelName `json:"name"`
|
||||
Value string `json:"value"`
|
||||
IsRegex bool `json:"isRegex"`
|
||||
}
|
||||
|
||||
func (m *Matcher) UnmarshalJSON(b []byte) error {
|
||||
type plain Matcher
|
||||
if err := json.Unmarshal(b, (*plain)(m)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(m.Name) == 0 {
|
||||
return fmt.Errorf("label name in matcher must not be empty")
|
||||
}
|
||||
if m.IsRegex {
|
||||
if _, err := regexp.Compile(m.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate returns true iff all fields of the matcher have valid values.
|
||||
func (m *Matcher) Validate() error {
|
||||
if !m.Name.IsValid() {
|
||||
return fmt.Errorf("invalid name %q", m.Name)
|
||||
}
|
||||
if m.IsRegex {
|
||||
if _, err := regexp.Compile(m.Value); err != nil {
|
||||
return fmt.Errorf("invalid regular expression %q", m.Value)
|
||||
}
|
||||
} else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 {
|
||||
return fmt.Errorf("invalid value %q", m.Value)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Silence defines the representation of a silence definition in the Prometheus
|
||||
// eco-system.
|
||||
type Silence struct {
|
||||
ID uint64 `json:"id,omitempty"`
|
||||
|
||||
Matchers []*Matcher `json:"matchers"`
|
||||
|
||||
StartsAt time.Time `json:"startsAt"`
|
||||
EndsAt time.Time `json:"endsAt"`
|
||||
|
||||
CreatedAt time.Time `json:"createdAt,omitempty"`
|
||||
CreatedBy string `json:"createdBy"`
|
||||
Comment string `json:"comment,omitempty"`
|
||||
}
|
||||
|
||||
// Validate returns true iff all fields of the silence have valid values.
|
||||
func (s *Silence) Validate() error {
|
||||
if len(s.Matchers) == 0 {
|
||||
return fmt.Errorf("at least one matcher required")
|
||||
}
|
||||
for _, m := range s.Matchers {
|
||||
if err := m.Validate(); err != nil {
|
||||
return fmt.Errorf("invalid matcher: %s", err)
|
||||
}
|
||||
}
|
||||
if s.StartsAt.IsZero() {
|
||||
return fmt.Errorf("start time missing")
|
||||
}
|
||||
if s.EndsAt.IsZero() {
|
||||
return fmt.Errorf("end time missing")
|
||||
}
|
||||
if s.EndsAt.Before(s.StartsAt) {
|
||||
return fmt.Errorf("start time must be before end time")
|
||||
}
|
||||
if s.CreatedBy == "" {
|
||||
return fmt.Errorf("creator information missing")
|
||||
}
|
||||
if s.Comment == "" {
|
||||
return fmt.Errorf("comment missing")
|
||||
}
|
||||
if s.CreatedAt.IsZero() {
|
||||
return fmt.Errorf("creation timestamp missing")
|
||||
}
|
||||
return nil
|
||||
}
|
78
vendor/github.com/prometheus/common/model/time.go
generated
vendored
78
vendor/github.com/prometheus/common/model/time.go
generated
vendored
|
@ -163,51 +163,85 @@ func (t *Time) UnmarshalJSON(b []byte) error {
|
|||
// This type should not propagate beyond the scope of input/output processing.
|
||||
type Duration time.Duration
|
||||
|
||||
// StringToDuration parses a string into a time.Duration, assuming that a year
|
||||
// a day always has 24h.
|
||||
// Set implements pflag/flag.Value
|
||||
func (d *Duration) Set(s string) error {
|
||||
var err error
|
||||
*d, err = ParseDuration(s)
|
||||
return err
|
||||
}
|
||||
|
||||
// Type implements pflag.Value
|
||||
func (d *Duration) Type() string {
|
||||
return "duration"
|
||||
}
|
||||
|
||||
var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
|
||||
|
||||
// ParseDuration parses a string into a time.Duration, assuming that a year
|
||||
// always has 365d, a week always has 7d, and a day always has 24h.
|
||||
func ParseDuration(durationStr string) (Duration, error) {
|
||||
matches := durationRE.FindStringSubmatch(durationStr)
|
||||
if len(matches) != 3 {
|
||||
return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
|
||||
}
|
||||
durSeconds, _ := strconv.Atoi(matches[1])
|
||||
dur := time.Duration(durSeconds) * time.Second
|
||||
unit := matches[2]
|
||||
switch unit {
|
||||
var (
|
||||
n, _ = strconv.Atoi(matches[1])
|
||||
dur = time.Duration(n) * time.Millisecond
|
||||
)
|
||||
switch unit := matches[2]; unit {
|
||||
case "y":
|
||||
dur *= 1000 * 60 * 60 * 24 * 365
|
||||
case "w":
|
||||
dur *= 1000 * 60 * 60 * 24 * 7
|
||||
case "d":
|
||||
dur *= 60 * 60 * 24
|
||||
dur *= 1000 * 60 * 60 * 24
|
||||
case "h":
|
||||
dur *= 60 * 60
|
||||
dur *= 1000 * 60 * 60
|
||||
case "m":
|
||||
dur *= 60
|
||||
dur *= 1000 * 60
|
||||
case "s":
|
||||
dur *= 1
|
||||
dur *= 1000
|
||||
case "ms":
|
||||
// Value already correct
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
|
||||
}
|
||||
return Duration(dur), nil
|
||||
}
|
||||
|
||||
var durationRE = regexp.MustCompile("^([0-9]+)([ywdhms]+)$")
|
||||
|
||||
func (d Duration) String() string {
|
||||
seconds := int64(time.Duration(d) / time.Second)
|
||||
var (
|
||||
ms = int64(time.Duration(d) / time.Millisecond)
|
||||
unit = "ms"
|
||||
)
|
||||
if ms == 0 {
|
||||
return "0s"
|
||||
}
|
||||
factors := map[string]int64{
|
||||
"d": 60 * 60 * 24,
|
||||
"h": 60 * 60,
|
||||
"m": 60,
|
||||
"s": 1,
|
||||
"y": 1000 * 60 * 60 * 24 * 365,
|
||||
"w": 1000 * 60 * 60 * 24 * 7,
|
||||
"d": 1000 * 60 * 60 * 24,
|
||||
"h": 1000 * 60 * 60,
|
||||
"m": 1000 * 60,
|
||||
"s": 1000,
|
||||
"ms": 1,
|
||||
}
|
||||
unit := "s"
|
||||
|
||||
switch int64(0) {
|
||||
case seconds % factors["d"]:
|
||||
case ms % factors["y"]:
|
||||
unit = "y"
|
||||
case ms % factors["w"]:
|
||||
unit = "w"
|
||||
case ms % factors["d"]:
|
||||
unit = "d"
|
||||
case seconds % factors["h"]:
|
||||
case ms % factors["h"]:
|
||||
unit = "h"
|
||||
case seconds % factors["m"]:
|
||||
case ms % factors["m"]:
|
||||
unit = "m"
|
||||
case ms % factors["s"]:
|
||||
unit = "s"
|
||||
}
|
||||
return fmt.Sprintf("%v%v", seconds/factors[unit], unit)
|
||||
return fmt.Sprintf("%v%v", ms/factors[unit], unit)
|
||||
}
|
||||
|
||||
// MarshalYAML implements the yaml.Marshaler interface.
|
||||
|
|
37
vendor/github.com/prometheus/common/model/value.go
generated
vendored
37
vendor/github.com/prometheus/common/model/value.go
generated
vendored
|
@ -16,11 +16,28 @@ package model
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
|
||||
// non-existing sample pair. It is a SamplePair with timestamp Earliest and
|
||||
// value 0.0. Note that the natural zero value of SamplePair has a timestamp
|
||||
// of 0, which is possible to appear in a real SamplePair and thus not
|
||||
// suitable to signal a non-existing SamplePair.
|
||||
ZeroSamplePair = SamplePair{Timestamp: Earliest}
|
||||
|
||||
// ZeroSample is the pseudo zero-value of Sample used to signal a
|
||||
// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
|
||||
// and metric nil. Note that the natural zero value of Sample has a timestamp
|
||||
// of 0, which is possible to appear in a real Sample and thus not suitable
|
||||
// to signal a non-existing Sample.
|
||||
ZeroSample = Sample{Timestamp: Earliest}
|
||||
)
|
||||
|
||||
// A SampleValue is a representation of a value for a given sample at a given
|
||||
// time.
|
||||
type SampleValue float64
|
||||
|
@ -43,8 +60,14 @@ func (v *SampleValue) UnmarshalJSON(b []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Equal returns true if the value of v and o is equal or if both are NaN. Note
|
||||
// that v==o is false if both are NaN. If you want the conventional float
|
||||
// behavior, use == to compare two SampleValues.
|
||||
func (v SampleValue) Equal(o SampleValue) bool {
|
||||
return v == o
|
||||
if v == o {
|
||||
return true
|
||||
}
|
||||
return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
|
||||
}
|
||||
|
||||
func (v SampleValue) String() string {
|
||||
|
@ -77,9 +100,9 @@ func (s *SamplePair) UnmarshalJSON(b []byte) error {
|
|||
}
|
||||
|
||||
// Equal returns true if this SamplePair and o have equal Values and equal
|
||||
// Timestamps.
|
||||
// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
|
||||
func (s *SamplePair) Equal(o *SamplePair) bool {
|
||||
return s == o || (s.Value == o.Value && s.Timestamp.Equal(o.Timestamp))
|
||||
return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
|
||||
}
|
||||
|
||||
func (s SamplePair) String() string {
|
||||
|
@ -93,7 +116,8 @@ type Sample struct {
|
|||
Timestamp Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
// Equal compares first the metrics, then the timestamp, then the value.
|
||||
// Equal compares first the metrics, then the timestamp, then the value. The
|
||||
// semantics of value equality is defined by SampleValue.Equal.
|
||||
func (s *Sample) Equal(o *Sample) bool {
|
||||
if s == o {
|
||||
return true
|
||||
|
@ -105,11 +129,8 @@ func (s *Sample) Equal(o *Sample) bool {
|
|||
if !s.Timestamp.Equal(o.Timestamp) {
|
||||
return false
|
||||
}
|
||||
if s.Value != o.Value {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
return s.Value.Equal(o.Value)
|
||||
}
|
||||
|
||||
func (s Sample) String() string {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue