diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json new file mode 100644 index 0000000..369be31 --- /dev/null +++ b/Godeps/Godeps.json @@ -0,0 +1,48 @@ +{ + "ImportPath": "github.com/Luzifer/dbx-sync", + "GoVersion": "go1.7", + "GodepVersion": "v74", + "Deps": [ + { + "ImportPath": "github.com/Luzifer/rconfig", + "Comment": "v1.1.0", + "Rev": "c27bd3a64b5b19556914d9fec69922cf3852d585" + }, + { + "ImportPath": "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox", + "Rev": "fe9db01d5c86f2ea3ed2e1489012aa3f8dae576d" + }, + { + "ImportPath": "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/async", + "Rev": "fe9db01d5c86f2ea3ed2e1489012aa3f8dae576d" + }, + { + "ImportPath": "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files", + "Rev": "fe9db01d5c86f2ea3ed2e1489012aa3f8dae576d" + }, + { + "ImportPath": "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/properties", + "Rev": "fe9db01d5c86f2ea3ed2e1489012aa3f8dae576d" + }, + { + "ImportPath": "github.com/spf13/pflag", + "Rev": "c7e63cf4530bcd3ba943729cee0efeff2ebea63f" + }, + { + "ImportPath": "golang.org/x/net/context", + "Rev": "f09c4662a0bd6bd8943ac7b4931e185df9471da4" + }, + { + "ImportPath": "golang.org/x/oauth2", + "Rev": "2baa8a1b9338cf13d9eeb27696d761155fa480be" + }, + { + "ImportPath": "golang.org/x/oauth2/internal", + "Rev": "2baa8a1b9338cf13d9eeb27696d761155fa480be" + }, + { + "ImportPath": "gopkg.in/yaml.v2", + "Rev": "31c299268d302dd0aa9a0dcf765a3d58971ac83f" + } + ] +} diff --git a/Godeps/Readme b/Godeps/Readme new file mode 100644 index 0000000..4cdaa53 --- /dev/null +++ b/Godeps/Readme @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. diff --git a/vendor/github.com/Luzifer/rconfig/.travis.yml b/vendor/github.com/Luzifer/rconfig/.travis.yml new file mode 100644 index 0000000..520bedf --- /dev/null +++ b/vendor/github.com/Luzifer/rconfig/.travis.yml @@ -0,0 +1,8 @@ +language: go + +go: + - 1.4 + - 1.5 + - tip + +script: go test -v -race -cover ./... diff --git a/vendor/github.com/Luzifer/rconfig/History.md b/vendor/github.com/Luzifer/rconfig/History.md new file mode 100644 index 0000000..8bc33a6 --- /dev/null +++ b/vendor/github.com/Luzifer/rconfig/History.md @@ -0,0 +1,5 @@ +# 1.1.0 / 2016-06-28 + + * Support time.Duration config parameters + * Added goreportcard badge + * Added testcase for using bool with ENV and default diff --git a/vendor/github.com/Luzifer/rconfig/LICENSE b/vendor/github.com/Luzifer/rconfig/LICENSE new file mode 100644 index 0000000..4fde5d2 --- /dev/null +++ b/vendor/github.com/Luzifer/rconfig/LICENSE @@ -0,0 +1,13 @@ +Copyright 2015 Knut Ahlers + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/Luzifer/rconfig/README.md b/vendor/github.com/Luzifer/rconfig/README.md new file mode 100644 index 0000000..67fbf87 --- /dev/null +++ b/vendor/github.com/Luzifer/rconfig/README.md @@ -0,0 +1,94 @@ +[![Build Status](https://travis-ci.org/Luzifer/rconfig.svg?branch=master)](https://travis-ci.org/Luzifer/rconfig) +[![License: Apache v2.0](https://badge.luzifer.io/v1/badge?color=5d79b5&title=license&text=Apache+v2.0)](http://www.apache.org/licenses/LICENSE-2.0) +[![Documentation](https://badge.luzifer.io/v1/badge?title=godoc&text=reference)](https://godoc.org/github.com/Luzifer/rconfig) +[![Go Report](http://goreportcard.com/badge/Luzifer/rconfig)](http://goreportcard.com/report/Luzifer/rconfig) + +## Description + +> Package rconfig implements a CLI configuration reader with struct-embedded defaults, environment variables and posix compatible flag parsing using the [pflag](https://github.com/spf13/pflag) library. + +## Installation + +Install by running: + +``` +go get -u github.com/Luzifer/rconfig +``` + +OR fetch a specific version: + +``` +go get -u gopkg.in/luzifer/rconfig.v1 +``` + +Run tests by running: + +``` +go test -v -race -cover github.com/Luzifer/rconfig +``` + +## Usage + +As a first step define a struct holding your configuration: + +```go +type config struct { + Username string `default:"unknown" flag:"user" description:"Your name"` + Details struct { + Age int `default:"25" flag:"age" env:"age" description:"Your age"` + } +} +``` + +Next create an instance of that struct and let `rconfig` fill that config: + +```go +var cfg config +func init() { + cfg = config{} + rconfig.Parse(&cfg) +} +``` + +You're ready to access your configuration: + +```go +func main() { + fmt.Printf("Hello %s, happy birthday for your %dth birthday.", + cfg.Username, + cfg.Details.Age) +} +``` + +### Provide variable defaults by using a file + +Given you have a file `~/.myapp.yml` containing some secrets or usernames (for the example below username is assumed to be "luzifer") as a default configuration for your application you can use this source code to load the defaults from that file using the `vardefault` tag in your configuration struct. + +The order of the directives (lower number = higher precedence): + +1. Flags provided in command line +1. Environment variables +1. Variable defaults (`vardefault` tag in the struct) +1. `default` tag in the struct + +```go +type config struct { + Username string `vardefault:"username" flag:"username" description:"Your username"` +} + +var cfg = config{} + +func init() { + rconfig.SetVariableDefaults(rconfig.VarDefaultsFromYAMLFile("~/.myapp.yml")) + rconfig.Parse(&cfg) +} + +func main() { + fmt.Printf("Username = %s", cfg.Username) + // Output: Username = luzifer +} +``` + +## More info + +You can see the full reference documentation of the rconfig package [at godoc.org](https://godoc.org/github.com/Luzifer/rconfig), or through go's standard documentation system by running `godoc -http=:6060` and browsing to [http://localhost:6060/pkg/github.com/Luzifer/rconfig](http://localhost:6060/pkg/github.com/Luzifer/rconfig) after installation. diff --git a/vendor/github.com/Luzifer/rconfig/config.go b/vendor/github.com/Luzifer/rconfig/config.go new file mode 100644 index 0000000..dd37238 --- /dev/null +++ b/vendor/github.com/Luzifer/rconfig/config.go @@ -0,0 +1,338 @@ +// Package rconfig implements a CLI configuration reader with struct-embedded +// defaults, environment variables and posix compatible flag parsing using +// the pflag library. +package rconfig + +import ( + "errors" + "fmt" + "os" + "reflect" + "strconv" + "strings" + "time" + + "github.com/spf13/pflag" +) + +var ( + fs *pflag.FlagSet + variableDefaults map[string]string +) + +func init() { + variableDefaults = make(map[string]string) +} + +// Parse takes the pointer to a struct filled with variables which should be read +// from ENV, default or flag. The precedence in this is flag > ENV > default. So +// if a flag is specified on the CLI it will overwrite the ENV and otherwise ENV +// overwrites the default specified. +// +// For your configuration struct you can use the following struct-tags to control +// the behavior of rconfig: +// +// default: Set a default value +// vardefault: Read the default value from the variable defaults +// env: Read the value from this environment variable +// flag: Flag to read in format "long,short" (for example "listen,l") +// description: A help text for Usage output to guide your users +// +// The format you need to specify those values you can see in the example to this +// function. +// +func Parse(config interface{}) error { + return parse(config, nil) +} + +// Args returns the non-flag command-line arguments. +func Args() []string { + return fs.Args() +} + +// Usage prints a basic usage with the corresponding defaults for the flags to +// os.Stdout. The defaults are derived from the `default` struct-tag and the ENV. +func Usage() { + if fs != nil && fs.Parsed() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + fs.PrintDefaults() + } +} + +// SetVariableDefaults presets the parser with a map of default values to be used +// when specifying the vardefault tag +func SetVariableDefaults(defaults map[string]string) { + variableDefaults = defaults +} + +func parse(in interface{}, args []string) error { + if args == nil { + args = os.Args + } + + fs = pflag.NewFlagSet(os.Args[0], pflag.ExitOnError) + if err := execTags(in, fs); err != nil { + return err + } + + return fs.Parse(args) +} + +func execTags(in interface{}, fs *pflag.FlagSet) error { + if reflect.TypeOf(in).Kind() != reflect.Ptr { + return errors.New("Calling parser with non-pointer") + } + + if reflect.ValueOf(in).Elem().Kind() != reflect.Struct { + return errors.New("Calling parser with pointer to non-struct") + } + + st := reflect.ValueOf(in).Elem() + for i := 0; i < st.NumField(); i++ { + valField := st.Field(i) + typeField := st.Type().Field(i) + + if typeField.Tag.Get("default") == "" && typeField.Tag.Get("env") == "" && typeField.Tag.Get("flag") == "" && typeField.Type.Kind() != reflect.Struct { + // None of our supported tags is present and it's not a sub-struct + continue + } + + value := varDefault(typeField.Tag.Get("vardefault"), typeField.Tag.Get("default")) + value = envDefault(typeField.Tag.Get("env"), value) + parts := strings.Split(typeField.Tag.Get("flag"), ",") + + switch typeField.Type { + case reflect.TypeOf(time.Duration(0)): + v, err := time.ParseDuration(value) + if err != nil { + if value == "" { + v = time.Duration(0) + } else { + return err + } + } + + if typeField.Tag.Get("flag") != "" { + if len(parts) == 1 { + fs.DurationVar(valField.Addr().Interface().(*time.Duration), parts[0], v, typeField.Tag.Get("description")) + } else { + fs.DurationVarP(valField.Addr().Interface().(*time.Duration), parts[0], parts[1], v, typeField.Tag.Get("description")) + } + } else { + valField.Set(reflect.ValueOf(v)) + } + continue + } + + switch typeField.Type.Kind() { + case reflect.String: + if typeField.Tag.Get("flag") != "" { + if len(parts) == 1 { + fs.StringVar(valField.Addr().Interface().(*string), parts[0], value, typeField.Tag.Get("description")) + } else { + fs.StringVarP(valField.Addr().Interface().(*string), parts[0], parts[1], value, typeField.Tag.Get("description")) + } + } else { + valField.SetString(value) + } + + case reflect.Bool: + v := value == "true" + if typeField.Tag.Get("flag") != "" { + if len(parts) == 1 { + fs.BoolVar(valField.Addr().Interface().(*bool), parts[0], v, typeField.Tag.Get("description")) + } else { + fs.BoolVarP(valField.Addr().Interface().(*bool), parts[0], parts[1], v, typeField.Tag.Get("description")) + } + } else { + valField.SetBool(v) + } + + case reflect.Int, reflect.Int8, reflect.Int32, reflect.Int64: + vt, err := strconv.ParseInt(value, 10, 64) + if err != nil { + if value == "" { + vt = 0 + } else { + return err + } + } + if typeField.Tag.Get("flag") != "" { + registerFlagInt(typeField.Type.Kind(), fs, valField.Addr().Interface(), parts, vt, typeField.Tag.Get("description")) + } else { + valField.SetInt(vt) + } + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + vt, err := strconv.ParseUint(value, 10, 64) + if err != nil { + if value == "" { + vt = 0 + } else { + return err + } + } + if typeField.Tag.Get("flag") != "" { + registerFlagUint(typeField.Type.Kind(), fs, valField.Addr().Interface(), parts, vt, typeField.Tag.Get("description")) + } else { + valField.SetUint(vt) + } + + case reflect.Float32, reflect.Float64: + vt, err := strconv.ParseFloat(value, 64) + if err != nil { + if value == "" { + vt = 0.0 + } else { + return err + } + } + if typeField.Tag.Get("flag") != "" { + registerFlagFloat(typeField.Type.Kind(), fs, valField.Addr().Interface(), parts, vt, typeField.Tag.Get("description")) + } else { + valField.SetFloat(vt) + } + + case reflect.Struct: + if err := execTags(valField.Addr().Interface(), fs); err != nil { + return err + } + + case reflect.Slice: + switch typeField.Type.Elem().Kind() { + case reflect.Int: + def := []int{} + for _, v := range strings.Split(value, ",") { + it, err := strconv.ParseInt(strings.TrimSpace(v), 10, 64) + if err != nil { + return err + } + def = append(def, int(it)) + } + if len(parts) == 1 { + fs.IntSliceVar(valField.Addr().Interface().(*[]int), parts[0], def, typeField.Tag.Get("description")) + } else { + fs.IntSliceVarP(valField.Addr().Interface().(*[]int), parts[0], parts[1], def, typeField.Tag.Get("description")) + } + case reflect.String: + del := typeField.Tag.Get("delimiter") + if len(del) == 0 { + del = "," + } + def := strings.Split(value, del) + if len(parts) == 1 { + fs.StringSliceVar(valField.Addr().Interface().(*[]string), parts[0], def, typeField.Tag.Get("description")) + } else { + fs.StringSliceVarP(valField.Addr().Interface().(*[]string), parts[0], parts[1], def, typeField.Tag.Get("description")) + } + } + } + } + + return nil +} + +func registerFlagFloat(t reflect.Kind, fs *pflag.FlagSet, field interface{}, parts []string, vt float64, desc string) { + switch t { + case reflect.Float32: + if len(parts) == 1 { + fs.Float32Var(field.(*float32), parts[0], float32(vt), desc) + } else { + fs.Float32VarP(field.(*float32), parts[0], parts[1], float32(vt), desc) + } + case reflect.Float64: + if len(parts) == 1 { + fs.Float64Var(field.(*float64), parts[0], float64(vt), desc) + } else { + fs.Float64VarP(field.(*float64), parts[0], parts[1], float64(vt), desc) + } + } +} + +func registerFlagInt(t reflect.Kind, fs *pflag.FlagSet, field interface{}, parts []string, vt int64, desc string) { + switch t { + case reflect.Int: + if len(parts) == 1 { + fs.IntVar(field.(*int), parts[0], int(vt), desc) + } else { + fs.IntVarP(field.(*int), parts[0], parts[1], int(vt), desc) + } + case reflect.Int8: + if len(parts) == 1 { + fs.Int8Var(field.(*int8), parts[0], int8(vt), desc) + } else { + fs.Int8VarP(field.(*int8), parts[0], parts[1], int8(vt), desc) + } + case reflect.Int32: + if len(parts) == 1 { + fs.Int32Var(field.(*int32), parts[0], int32(vt), desc) + } else { + fs.Int32VarP(field.(*int32), parts[0], parts[1], int32(vt), desc) + } + case reflect.Int64: + if len(parts) == 1 { + fs.Int64Var(field.(*int64), parts[0], int64(vt), desc) + } else { + fs.Int64VarP(field.(*int64), parts[0], parts[1], int64(vt), desc) + } + } +} + +func registerFlagUint(t reflect.Kind, fs *pflag.FlagSet, field interface{}, parts []string, vt uint64, desc string) { + switch t { + case reflect.Uint: + if len(parts) == 1 { + fs.UintVar(field.(*uint), parts[0], uint(vt), desc) + } else { + fs.UintVarP(field.(*uint), parts[0], parts[1], uint(vt), desc) + } + case reflect.Uint8: + if len(parts) == 1 { + fs.Uint8Var(field.(*uint8), parts[0], uint8(vt), desc) + } else { + fs.Uint8VarP(field.(*uint8), parts[0], parts[1], uint8(vt), desc) + } + case reflect.Uint16: + if len(parts) == 1 { + fs.Uint16Var(field.(*uint16), parts[0], uint16(vt), desc) + } else { + fs.Uint16VarP(field.(*uint16), parts[0], parts[1], uint16(vt), desc) + } + case reflect.Uint32: + if len(parts) == 1 { + fs.Uint32Var(field.(*uint32), parts[0], uint32(vt), desc) + } else { + fs.Uint32VarP(field.(*uint32), parts[0], parts[1], uint32(vt), desc) + } + case reflect.Uint64: + if len(parts) == 1 { + fs.Uint64Var(field.(*uint64), parts[0], uint64(vt), desc) + } else { + fs.Uint64VarP(field.(*uint64), parts[0], parts[1], uint64(vt), desc) + } + } +} + +func envDefault(env, def string) string { + value := def + + if env != "" { + if e := os.Getenv(env); e != "" { + value = e + } + } + + return value +} + +func varDefault(name, def string) string { + value := def + + if name != "" { + if v, ok := variableDefaults[name]; ok { + value = v + } + } + + return value +} diff --git a/vendor/github.com/Luzifer/rconfig/vardefault_providers.go b/vendor/github.com/Luzifer/rconfig/vardefault_providers.go new file mode 100644 index 0000000..2199cfa --- /dev/null +++ b/vendor/github.com/Luzifer/rconfig/vardefault_providers.go @@ -0,0 +1,27 @@ +package rconfig + +import ( + "io/ioutil" + + "gopkg.in/yaml.v2" +) + +// VarDefaultsFromYAMLFile reads contents of a file and calls VarDefaultsFromYAML +func VarDefaultsFromYAMLFile(filename string) map[string]string { + data, err := ioutil.ReadFile(filename) + if err != nil { + return make(map[string]string) + } + + return VarDefaultsFromYAML(data) +} + +// VarDefaultsFromYAML creates a vardefaults map from YAML raw data +func VarDefaultsFromYAML(in []byte) map[string]string { + out := make(map[string]string) + err := yaml.Unmarshal(in, &out) + if err != nil { + return make(map[string]string) + } + return out +} diff --git a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/LICENSE b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/LICENSE new file mode 100644 index 0000000..9390069 --- /dev/null +++ b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2009-2016 Dropbox Inc., http://www.dropbox.com/ + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/async/types.go b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/async/types.go new file mode 100644 index 0000000..19daa33 --- /dev/null +++ b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/async/types.go @@ -0,0 +1,130 @@ +// Copyright (c) Dropbox, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package async : has no documentation (yet) +package async + +import ( + "encoding/json" + + "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox" +) + +// LaunchResultBase : Result returned by methods that launch an asynchronous +// job. A method who may either launch an asynchronous job, or complete the +// request synchronously, can use this union by extending it, and adding a +// 'complete' field with the type of the synchronous response. See +// `LaunchEmptyResult` for an example. +type LaunchResultBase struct { + dropbox.Tagged + // AsyncJobId : This response indicates that the processing is asynchronous. + // The string is an id that can be used to obtain the status of the + // asynchronous job. + AsyncJobId string `json:"async_job_id,omitempty"` +} + +// Valid tag values for LaunchResultBase +const ( + LaunchResultBaseAsyncJobId = "async_job_id" +) + +// UnmarshalJSON deserializes into a LaunchResultBase instance +func (u *LaunchResultBase) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "async_job_id": + if err := json.Unmarshal(body, &u.AsyncJobId); err != nil { + return err + } + + } + return nil +} + +// LaunchEmptyResult : Result returned by methods that may either launch an +// asynchronous job or complete synchronously. Upon synchronous completion of +// the job, no additional information is returned. +type LaunchEmptyResult struct { + dropbox.Tagged +} + +// Valid tag values for LaunchEmptyResult +const ( + LaunchEmptyResultComplete = "complete" +) + +// PollArg : Arguments for methods that poll the status of an asynchronous job. +type PollArg struct { + // AsyncJobId : Id of the asynchronous job. This is the value of a response + // returned from the method that launched the job. + AsyncJobId string `json:"async_job_id"` +} + +// NewPollArg returns a new PollArg instance +func NewPollArg(AsyncJobId string) *PollArg { + s := new(PollArg) + s.AsyncJobId = AsyncJobId + return s +} + +// PollResultBase : Result returned by methods that poll for the status of an +// asynchronous job. Unions that extend this union should add a 'complete' field +// with a type of the information returned upon job completion. See +// `PollEmptyResult` for an example. +type PollResultBase struct { + dropbox.Tagged +} + +// Valid tag values for PollResultBase +const ( + PollResultBaseInProgress = "in_progress" +) + +// PollEmptyResult : Result returned by methods that poll for the status of an +// asynchronous job. Upon completion of the job, no additional information is +// returned. +type PollEmptyResult struct { + dropbox.Tagged +} + +// Valid tag values for PollEmptyResult +const ( + PollEmptyResultComplete = "complete" +) + +// PollError : Error returned by methods for polling the status of asynchronous +// job. +type PollError struct { + dropbox.Tagged +} + +// Valid tag values for PollError +const ( + PollErrorInvalidAsyncJobId = "invalid_async_job_id" + PollErrorInternalError = "internal_error" + PollErrorOther = "other" +) diff --git a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files/client.go b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files/client.go new file mode 100644 index 0000000..1442efc --- /dev/null +++ b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files/client.go @@ -0,0 +1,3476 @@ +// Copyright (c) Dropbox, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package files + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "log" + "net/http" + + "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox" + "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/async" + "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/properties" +) + +// Client interface describes all routes in this namespace +type Client interface { + // AlphaGetMetadata : Returns the metadata for a file or folder. This is an + // alpha endpoint compatible with the properties API. Note: Metadata for the + // root folder is unsupported. + AlphaGetMetadata(arg *AlphaGetMetadataArg) (res IsMetadata, err error) + // AlphaUpload : Create a new file with the contents provided in the + // request. Note that this endpoint is part of the properties API alpha and + // is slightly different from `upload`. Do not use this to upload a file + // larger than 150 MB. Instead, create an upload session with + // `uploadSessionStart`. + AlphaUpload(arg *CommitInfoWithProperties, content io.Reader) (res *FileMetadata, err error) + // Copy : Copy a file or folder to a different location in the user's + // Dropbox. If the source path is a folder all its contents will be copied. + Copy(arg *RelocationArg) (res IsMetadata, err error) + // CopyBatch : Copy multiple files or folders to different locations at once + // in the user's Dropbox. If `RelocationBatchArg.allow_shared_folder` is + // false, this route is atomic. If on entry failes, the whole transaction + // will abort. If `RelocationBatchArg.allow_shared_folder` is true, not + // atomicity is guaranteed, but you will be able to copy the contents of + // shared folders to new locations. This route will return job ID + // immediately and do the async copy job in background. Please use + // `copyBatchCheck` to check the job status. + CopyBatch(arg *RelocationBatchArg) (res *async.LaunchEmptyResult, err error) + // CopyBatchCheck : Returns the status of an asynchronous job for + // `copyBatch`. If success, it returns list of results for each entry. + CopyBatchCheck(arg *async.PollArg) (res *RelocationBatchJobStatus, err error) + // CopyReferenceGet : Get a copy reference to a file or folder. This + // reference string can be used to save that file or folder to another + // user's Dropbox by passing it to `copyReferenceSave`. + CopyReferenceGet(arg *GetCopyReferenceArg) (res *GetCopyReferenceResult, err error) + // CopyReferenceSave : Save a copy reference returned by `copyReferenceGet` + // to the user's Dropbox. + CopyReferenceSave(arg *SaveCopyReferenceArg) (res *SaveCopyReferenceResult, err error) + // CreateFolder : Create a folder at a given path. + CreateFolder(arg *CreateFolderArg) (res *FolderMetadata, err error) + // Delete : Delete the file or folder at a given path. If the path is a + // folder, all its contents will be deleted too. A successful response + // indicates that the file or folder was deleted. The returned metadata will + // be the corresponding `FileMetadata` or `FolderMetadata` for the item at + // time of deletion, and not a `DeletedMetadata` object. + Delete(arg *DeleteArg) (res IsMetadata, err error) + // DeleteBatch : Delete multiple files/folders at once. This route is + // asynchronous, which returns a job ID immediately and runs the delete + // batch asynchronously. Use `deleteBatchCheck` to check the job status. + DeleteBatch(arg *DeleteBatchArg) (res *async.LaunchEmptyResult, err error) + // DeleteBatchCheck : Returns the status of an asynchronous job for + // `deleteBatch`. If success, it returns list of result for each entry. + DeleteBatchCheck(arg *async.PollArg) (res *DeleteBatchJobStatus, err error) + // Download : Download a file from a user's Dropbox. + Download(arg *DownloadArg) (res *FileMetadata, content io.ReadCloser, err error) + // GetMetadata : Returns the metadata for a file or folder. Note: Metadata + // for the root folder is unsupported. + GetMetadata(arg *GetMetadataArg) (res IsMetadata, err error) + // GetPreview : Get a preview for a file. Currently previews are only + // generated for the files with the following extensions: .doc, .docx, + // .docm, .ppt, .pps, .ppsx, .ppsm, .pptx, .pptm, .xls, .xlsx, .xlsm, .rtf. + GetPreview(arg *PreviewArg) (res *FileMetadata, content io.ReadCloser, err error) + // GetTemporaryLink : Get a temporary link to stream content of a file. This + // link will expire in four hours and afterwards you will get 410 Gone. + // Content-Type of the link is determined automatically by the file's mime + // type. + GetTemporaryLink(arg *GetTemporaryLinkArg) (res *GetTemporaryLinkResult, err error) + // GetThumbnail : Get a thumbnail for an image. This method currently + // supports files with the following file extensions: jpg, jpeg, png, tiff, + // tif, gif and bmp. Photos that are larger than 20MB in size won't be + // converted to a thumbnail. + GetThumbnail(arg *ThumbnailArg) (res *FileMetadata, content io.ReadCloser, err error) + // ListFolder : Starts returning the contents of a folder. If the result's + // `ListFolderResult.has_more` field is true, call `listFolderContinue` with + // the returned `ListFolderResult.cursor` to retrieve more entries. If + // you're using `ListFolderArg.recursive` set to true to keep a local cache + // of the contents of a Dropbox account, iterate through each entry in order + // and process them as follows to keep your local state in sync: For each + // `FileMetadata`, store the new entry at the given path in your local + // state. If the required parent folders don't exist yet, create them. If + // there's already something else at the given path, replace it and remove + // all its children. For each `FolderMetadata`, store the new entry at the + // given path in your local state. If the required parent folders don't + // exist yet, create them. If there's already something else at the given + // path, replace it but leave the children as they are. Check the new + // entry's `FolderSharingInfo.read_only` and set all its children's + // read-only statuses to match. For each `DeletedMetadata`, if your local + // state has something at the given path, remove it and all its children. If + // there's nothing at the given path, ignore this entry. + ListFolder(arg *ListFolderArg) (res *ListFolderResult, err error) + // ListFolderContinue : Once a cursor has been retrieved from `listFolder`, + // use this to paginate through all files and retrieve updates to the + // folder, following the same rules as documented for `listFolder`. + ListFolderContinue(arg *ListFolderContinueArg) (res *ListFolderResult, err error) + // ListFolderGetLatestCursor : A way to quickly get a cursor for the + // folder's state. Unlike `listFolder`, `listFolderGetLatestCursor` doesn't + // return any entries. This endpoint is for app which only needs to know + // about new files and modifications and doesn't need to know about files + // that already exist in Dropbox. + ListFolderGetLatestCursor(arg *ListFolderArg) (res *ListFolderGetLatestCursorResult, err error) + // ListFolderLongpoll : A longpoll endpoint to wait for changes on an + // account. In conjunction with `listFolderContinue`, this call gives you a + // low-latency way to monitor an account for file changes. The connection + // will block until there are changes available or a timeout occurs. This + // endpoint is useful mostly for client-side apps. If you're looking for + // server-side notifications, check out our `webhooks documentation` + // . + ListFolderLongpoll(arg *ListFolderLongpollArg) (res *ListFolderLongpollResult, err error) + // ListRevisions : Return revisions of a file. + ListRevisions(arg *ListRevisionsArg) (res *ListRevisionsResult, err error) + // Move : Move a file or folder to a different location in the user's + // Dropbox. If the source path is a folder all its contents will be moved. + Move(arg *RelocationArg) (res IsMetadata, err error) + // MoveBatch : Move multiple files or folders to different locations at once + // in the user's Dropbox. This route is 'all or nothing', which means if one + // entry fails, the whole transaction will abort. This route will return job + // ID immediately and do the async moving job in background. Please use + // `moveBatchCheck` to check the job status. + MoveBatch(arg *RelocationBatchArg) (res *async.LaunchEmptyResult, err error) + // MoveBatchCheck : Returns the status of an asynchronous job for + // `moveBatch`. If success, it returns list of results for each entry. + MoveBatchCheck(arg *async.PollArg) (res *RelocationBatchJobStatus, err error) + // PermanentlyDelete : Permanently delete the file or folder at a given path + // (see https://www.dropbox.com/en/help/40). Note: This endpoint is only + // available for Dropbox Business apps. + PermanentlyDelete(arg *DeleteArg) (err error) + // PropertiesAdd : Add custom properties to a file using a filled property + // template. See properties/template/add to create new property templates. + PropertiesAdd(arg *PropertyGroupWithPath) (err error) + // PropertiesOverwrite : Overwrite custom properties from a specified + // template associated with a file. + PropertiesOverwrite(arg *PropertyGroupWithPath) (err error) + // PropertiesRemove : Remove all custom properties from a specified template + // associated with a file. To remove specific property key value pairs, see + // `propertiesUpdate`. To update a property template, see + // properties/template/update. Property templates can't be removed once + // created. + PropertiesRemove(arg *RemovePropertiesArg) (err error) + // PropertiesTemplateGet : Get the schema for a specified template. + PropertiesTemplateGet(arg *properties.GetPropertyTemplateArg) (res *properties.GetPropertyTemplateResult, err error) + // PropertiesTemplateList : Get the property template identifiers for a + // user. To get the schema of each template use `propertiesTemplateGet`. + PropertiesTemplateList() (res *properties.ListPropertyTemplateIds, err error) + // PropertiesUpdate : Add, update or remove custom properties from a + // specified template associated with a file. Fields that already exist and + // not described in the request will not be modified. + PropertiesUpdate(arg *UpdatePropertyGroupArg) (err error) + // Restore : Restore a file to a specific revision. + Restore(arg *RestoreArg) (res *FileMetadata, err error) + // SaveUrl : Save a specified URL into a file in user's Dropbox. If the + // given path already exists, the file will be renamed to avoid the conflict + // (e.g. myfile (1).txt). + SaveUrl(arg *SaveUrlArg) (res *SaveUrlResult, err error) + // SaveUrlCheckJobStatus : Check the status of a `saveUrl` job. + SaveUrlCheckJobStatus(arg *async.PollArg) (res *SaveUrlJobStatus, err error) + // Search : Searches for files and folders. Note: Recent changes may not + // immediately be reflected in search results due to a short delay in + // indexing. + Search(arg *SearchArg) (res *SearchResult, err error) + // Upload : Create a new file with the contents provided in the request. Do + // not use this to upload a file larger than 150 MB. Instead, create an + // upload session with `uploadSessionStart`. + Upload(arg *CommitInfo, content io.Reader) (res *FileMetadata, err error) + // UploadSessionAppend : Append more data to an upload session. A single + // request should not upload more than 150 MB of file contents. + UploadSessionAppend(arg *UploadSessionCursor, content io.Reader) (err error) + // UploadSessionAppendV2 : Append more data to an upload session. When the + // parameter close is set, this call will close the session. A single + // request should not upload more than 150 MB of file contents. + UploadSessionAppendV2(arg *UploadSessionAppendArg, content io.Reader) (err error) + // UploadSessionFinish : Finish an upload session and save the uploaded data + // to the given file path. A single request should not upload more than 150 + // MB of file contents. + UploadSessionFinish(arg *UploadSessionFinishArg, content io.Reader) (res *FileMetadata, err error) + // UploadSessionFinishBatch : This route helps you commit many files at once + // into a user's Dropbox. Use `uploadSessionStart` and + // `uploadSessionAppendV2` to upload file contents. We recommend uploading + // many files in parallel to increase throughput. Once the file contents + // have been uploaded, rather than calling `uploadSessionFinish`, use this + // route to finish all your upload sessions in a single request. + // `UploadSessionStartArg.close` or `UploadSessionAppendArg.close` needs to + // be true for the last `uploadSessionStart` or `uploadSessionAppendV2` + // call. This route will return a job_id immediately and do the async commit + // job in background. Use `uploadSessionFinishBatchCheck` to check the job + // status. For the same account, this route should be executed serially. + // That means you should not start the next job before current job finishes. + // We allow up to 1000 entries in a single request. + UploadSessionFinishBatch(arg *UploadSessionFinishBatchArg) (res *async.LaunchEmptyResult, err error) + // UploadSessionFinishBatchCheck : Returns the status of an asynchronous job + // for `uploadSessionFinishBatch`. If success, it returns list of result for + // each entry. + UploadSessionFinishBatchCheck(arg *async.PollArg) (res *UploadSessionFinishBatchJobStatus, err error) + // UploadSessionStart : Upload sessions allow you to upload a single file in + // one or more requests, for example where the size of the file is greater + // than 150 MB. This call starts a new upload session with the given data. + // You can then use `uploadSessionAppendV2` to add more data and + // `uploadSessionFinish` to save all the data to a file in Dropbox. A single + // request should not upload more than 150 MB of file contents. + UploadSessionStart(arg *UploadSessionStartArg, content io.Reader) (res *UploadSessionStartResult, err error) +} + +type apiImpl dropbox.Context + +//AlphaGetMetadataAPIError is an error-wrapper for the alpha/get_metadata route +type AlphaGetMetadataAPIError struct { + dropbox.APIError + EndpointError *AlphaGetMetadataError `json:"error"` +} + +func (dbx *apiImpl) AlphaGetMetadata(arg *AlphaGetMetadataArg) (res IsMetadata, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "alpha/get_metadata"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + var tmp metadataUnion + err = json.Unmarshal(body, &tmp) + if err != nil { + return + } + switch tmp.Tag { + case "file": + res = tmp.File + + case "folder": + res = tmp.Folder + + case "deleted": + res = tmp.Deleted + + } + return + } + if resp.StatusCode == http.StatusConflict { + var apiError AlphaGetMetadataAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//AlphaUploadAPIError is an error-wrapper for the alpha/upload route +type AlphaUploadAPIError struct { + dropbox.APIError + EndpointError *UploadErrorWithProperties `json:"error"` +} + +func (dbx *apiImpl) AlphaUpload(arg *CommitInfoWithProperties, content io.Reader) (res *FileMetadata, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("content", "files", "alpha/upload"), content) + if err != nil { + return + } + + req.Header.Set("Dropbox-API-Arg", string(b)) + req.Header.Set("Content-Type", "application/octet-stream") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError AlphaUploadAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//CopyAPIError is an error-wrapper for the copy route +type CopyAPIError struct { + dropbox.APIError + EndpointError *RelocationError `json:"error"` +} + +func (dbx *apiImpl) Copy(arg *RelocationArg) (res IsMetadata, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "copy"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + var tmp metadataUnion + err = json.Unmarshal(body, &tmp) + if err != nil { + return + } + switch tmp.Tag { + case "file": + res = tmp.File + + case "folder": + res = tmp.Folder + + case "deleted": + res = tmp.Deleted + + } + return + } + if resp.StatusCode == http.StatusConflict { + var apiError CopyAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//CopyBatchAPIError is an error-wrapper for the copy_batch route +type CopyBatchAPIError struct { + dropbox.APIError + EndpointError struct{} `json:"error"` +} + +func (dbx *apiImpl) CopyBatch(arg *RelocationBatchArg) (res *async.LaunchEmptyResult, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "copy_batch"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError CopyBatchAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//CopyBatchCheckAPIError is an error-wrapper for the copy_batch/check route +type CopyBatchCheckAPIError struct { + dropbox.APIError + EndpointError *async.PollError `json:"error"` +} + +func (dbx *apiImpl) CopyBatchCheck(arg *async.PollArg) (res *RelocationBatchJobStatus, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "copy_batch/check"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError CopyBatchCheckAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//CopyReferenceGetAPIError is an error-wrapper for the copy_reference/get route +type CopyReferenceGetAPIError struct { + dropbox.APIError + EndpointError *GetCopyReferenceError `json:"error"` +} + +func (dbx *apiImpl) CopyReferenceGet(arg *GetCopyReferenceArg) (res *GetCopyReferenceResult, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "copy_reference/get"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError CopyReferenceGetAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//CopyReferenceSaveAPIError is an error-wrapper for the copy_reference/save route +type CopyReferenceSaveAPIError struct { + dropbox.APIError + EndpointError *SaveCopyReferenceError `json:"error"` +} + +func (dbx *apiImpl) CopyReferenceSave(arg *SaveCopyReferenceArg) (res *SaveCopyReferenceResult, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "copy_reference/save"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError CopyReferenceSaveAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//CreateFolderAPIError is an error-wrapper for the create_folder route +type CreateFolderAPIError struct { + dropbox.APIError + EndpointError *CreateFolderError `json:"error"` +} + +func (dbx *apiImpl) CreateFolder(arg *CreateFolderArg) (res *FolderMetadata, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "create_folder"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError CreateFolderAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//DeleteAPIError is an error-wrapper for the delete route +type DeleteAPIError struct { + dropbox.APIError + EndpointError *DeleteError `json:"error"` +} + +func (dbx *apiImpl) Delete(arg *DeleteArg) (res IsMetadata, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "delete"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + var tmp metadataUnion + err = json.Unmarshal(body, &tmp) + if err != nil { + return + } + switch tmp.Tag { + case "file": + res = tmp.File + + case "folder": + res = tmp.Folder + + case "deleted": + res = tmp.Deleted + + } + return + } + if resp.StatusCode == http.StatusConflict { + var apiError DeleteAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//DeleteBatchAPIError is an error-wrapper for the delete_batch route +type DeleteBatchAPIError struct { + dropbox.APIError + EndpointError struct{} `json:"error"` +} + +func (dbx *apiImpl) DeleteBatch(arg *DeleteBatchArg) (res *async.LaunchEmptyResult, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "delete_batch"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError DeleteBatchAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//DeleteBatchCheckAPIError is an error-wrapper for the delete_batch/check route +type DeleteBatchCheckAPIError struct { + dropbox.APIError + EndpointError *async.PollError `json:"error"` +} + +func (dbx *apiImpl) DeleteBatchCheck(arg *async.PollArg) (res *DeleteBatchJobStatus, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "delete_batch/check"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError DeleteBatchCheckAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//DownloadAPIError is an error-wrapper for the download route +type DownloadAPIError struct { + dropbox.APIError + EndpointError *DownloadError `json:"error"` +} + +func (dbx *apiImpl) Download(arg *DownloadArg) (res *FileMetadata, content io.ReadCloser, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("content", "files", "download"), nil) + if err != nil { + return + } + + req.Header.Set("Dropbox-API-Arg", string(b)) + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + body := []byte(resp.Header.Get("Dropbox-API-Result")) + content = resp.Body + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError DownloadAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//GetMetadataAPIError is an error-wrapper for the get_metadata route +type GetMetadataAPIError struct { + dropbox.APIError + EndpointError *GetMetadataError `json:"error"` +} + +func (dbx *apiImpl) GetMetadata(arg *GetMetadataArg) (res IsMetadata, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "get_metadata"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + var tmp metadataUnion + err = json.Unmarshal(body, &tmp) + if err != nil { + return + } + switch tmp.Tag { + case "file": + res = tmp.File + + case "folder": + res = tmp.Folder + + case "deleted": + res = tmp.Deleted + + } + return + } + if resp.StatusCode == http.StatusConflict { + var apiError GetMetadataAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//GetPreviewAPIError is an error-wrapper for the get_preview route +type GetPreviewAPIError struct { + dropbox.APIError + EndpointError *PreviewError `json:"error"` +} + +func (dbx *apiImpl) GetPreview(arg *PreviewArg) (res *FileMetadata, content io.ReadCloser, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("content", "files", "get_preview"), nil) + if err != nil { + return + } + + req.Header.Set("Dropbox-API-Arg", string(b)) + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + body := []byte(resp.Header.Get("Dropbox-API-Result")) + content = resp.Body + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError GetPreviewAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//GetTemporaryLinkAPIError is an error-wrapper for the get_temporary_link route +type GetTemporaryLinkAPIError struct { + dropbox.APIError + EndpointError *GetTemporaryLinkError `json:"error"` +} + +func (dbx *apiImpl) GetTemporaryLink(arg *GetTemporaryLinkArg) (res *GetTemporaryLinkResult, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "get_temporary_link"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError GetTemporaryLinkAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//GetThumbnailAPIError is an error-wrapper for the get_thumbnail route +type GetThumbnailAPIError struct { + dropbox.APIError + EndpointError *ThumbnailError `json:"error"` +} + +func (dbx *apiImpl) GetThumbnail(arg *ThumbnailArg) (res *FileMetadata, content io.ReadCloser, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("content", "files", "get_thumbnail"), nil) + if err != nil { + return + } + + req.Header.Set("Dropbox-API-Arg", string(b)) + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + body := []byte(resp.Header.Get("Dropbox-API-Result")) + content = resp.Body + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError GetThumbnailAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//ListFolderAPIError is an error-wrapper for the list_folder route +type ListFolderAPIError struct { + dropbox.APIError + EndpointError *ListFolderError `json:"error"` +} + +func (dbx *apiImpl) ListFolder(arg *ListFolderArg) (res *ListFolderResult, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "list_folder"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError ListFolderAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//ListFolderContinueAPIError is an error-wrapper for the list_folder/continue route +type ListFolderContinueAPIError struct { + dropbox.APIError + EndpointError *ListFolderContinueError `json:"error"` +} + +func (dbx *apiImpl) ListFolderContinue(arg *ListFolderContinueArg) (res *ListFolderResult, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "list_folder/continue"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError ListFolderContinueAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//ListFolderGetLatestCursorAPIError is an error-wrapper for the list_folder/get_latest_cursor route +type ListFolderGetLatestCursorAPIError struct { + dropbox.APIError + EndpointError *ListFolderError `json:"error"` +} + +func (dbx *apiImpl) ListFolderGetLatestCursor(arg *ListFolderArg) (res *ListFolderGetLatestCursorResult, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "list_folder/get_latest_cursor"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError ListFolderGetLatestCursorAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//ListFolderLongpollAPIError is an error-wrapper for the list_folder/longpoll route +type ListFolderLongpollAPIError struct { + dropbox.APIError + EndpointError *ListFolderLongpollError `json:"error"` +} + +func (dbx *apiImpl) ListFolderLongpoll(arg *ListFolderLongpollArg) (res *ListFolderLongpollResult, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("notify", "files", "list_folder/longpoll"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Del("Authorization") + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError ListFolderLongpollAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//ListRevisionsAPIError is an error-wrapper for the list_revisions route +type ListRevisionsAPIError struct { + dropbox.APIError + EndpointError *ListRevisionsError `json:"error"` +} + +func (dbx *apiImpl) ListRevisions(arg *ListRevisionsArg) (res *ListRevisionsResult, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "list_revisions"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError ListRevisionsAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//MoveAPIError is an error-wrapper for the move route +type MoveAPIError struct { + dropbox.APIError + EndpointError *RelocationError `json:"error"` +} + +func (dbx *apiImpl) Move(arg *RelocationArg) (res IsMetadata, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "move"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + var tmp metadataUnion + err = json.Unmarshal(body, &tmp) + if err != nil { + return + } + switch tmp.Tag { + case "file": + res = tmp.File + + case "folder": + res = tmp.Folder + + case "deleted": + res = tmp.Deleted + + } + return + } + if resp.StatusCode == http.StatusConflict { + var apiError MoveAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//MoveBatchAPIError is an error-wrapper for the move_batch route +type MoveBatchAPIError struct { + dropbox.APIError + EndpointError struct{} `json:"error"` +} + +func (dbx *apiImpl) MoveBatch(arg *RelocationBatchArg) (res *async.LaunchEmptyResult, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "move_batch"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError MoveBatchAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//MoveBatchCheckAPIError is an error-wrapper for the move_batch/check route +type MoveBatchCheckAPIError struct { + dropbox.APIError + EndpointError *async.PollError `json:"error"` +} + +func (dbx *apiImpl) MoveBatchCheck(arg *async.PollArg) (res *RelocationBatchJobStatus, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "move_batch/check"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError MoveBatchCheckAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//PermanentlyDeleteAPIError is an error-wrapper for the permanently_delete route +type PermanentlyDeleteAPIError struct { + dropbox.APIError + EndpointError *DeleteError `json:"error"` +} + +func (dbx *apiImpl) PermanentlyDelete(arg *DeleteArg) (err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "permanently_delete"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + return + } + if resp.StatusCode == http.StatusConflict { + var apiError PermanentlyDeleteAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//PropertiesAddAPIError is an error-wrapper for the properties/add route +type PropertiesAddAPIError struct { + dropbox.APIError + EndpointError *AddPropertiesError `json:"error"` +} + +func (dbx *apiImpl) PropertiesAdd(arg *PropertyGroupWithPath) (err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "properties/add"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + return + } + if resp.StatusCode == http.StatusConflict { + var apiError PropertiesAddAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//PropertiesOverwriteAPIError is an error-wrapper for the properties/overwrite route +type PropertiesOverwriteAPIError struct { + dropbox.APIError + EndpointError *InvalidPropertyGroupError `json:"error"` +} + +func (dbx *apiImpl) PropertiesOverwrite(arg *PropertyGroupWithPath) (err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "properties/overwrite"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + return + } + if resp.StatusCode == http.StatusConflict { + var apiError PropertiesOverwriteAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//PropertiesRemoveAPIError is an error-wrapper for the properties/remove route +type PropertiesRemoveAPIError struct { + dropbox.APIError + EndpointError *RemovePropertiesError `json:"error"` +} + +func (dbx *apiImpl) PropertiesRemove(arg *RemovePropertiesArg) (err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "properties/remove"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + return + } + if resp.StatusCode == http.StatusConflict { + var apiError PropertiesRemoveAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//PropertiesTemplateGetAPIError is an error-wrapper for the properties/template/get route +type PropertiesTemplateGetAPIError struct { + dropbox.APIError + EndpointError *properties.PropertyTemplateError `json:"error"` +} + +func (dbx *apiImpl) PropertiesTemplateGet(arg *properties.GetPropertyTemplateArg) (res *properties.GetPropertyTemplateResult, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "properties/template/get"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError PropertiesTemplateGetAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//PropertiesTemplateListAPIError is an error-wrapper for the properties/template/list route +type PropertiesTemplateListAPIError struct { + dropbox.APIError + EndpointError *properties.PropertyTemplateError `json:"error"` +} + +func (dbx *apiImpl) PropertiesTemplateList() (res *properties.ListPropertyTemplateIds, err error) { + cli := dbx.Client + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "properties/template/list"), nil) + if err != nil { + return + } + + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError PropertiesTemplateListAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//PropertiesUpdateAPIError is an error-wrapper for the properties/update route +type PropertiesUpdateAPIError struct { + dropbox.APIError + EndpointError *UpdatePropertiesError `json:"error"` +} + +func (dbx *apiImpl) PropertiesUpdate(arg *UpdatePropertyGroupArg) (err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "properties/update"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + return + } + if resp.StatusCode == http.StatusConflict { + var apiError PropertiesUpdateAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//RestoreAPIError is an error-wrapper for the restore route +type RestoreAPIError struct { + dropbox.APIError + EndpointError *RestoreError `json:"error"` +} + +func (dbx *apiImpl) Restore(arg *RestoreArg) (res *FileMetadata, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "restore"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError RestoreAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//SaveUrlAPIError is an error-wrapper for the save_url route +type SaveUrlAPIError struct { + dropbox.APIError + EndpointError *SaveUrlError `json:"error"` +} + +func (dbx *apiImpl) SaveUrl(arg *SaveUrlArg) (res *SaveUrlResult, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "save_url"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError SaveUrlAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//SaveUrlCheckJobStatusAPIError is an error-wrapper for the save_url/check_job_status route +type SaveUrlCheckJobStatusAPIError struct { + dropbox.APIError + EndpointError *async.PollError `json:"error"` +} + +func (dbx *apiImpl) SaveUrlCheckJobStatus(arg *async.PollArg) (res *SaveUrlJobStatus, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "save_url/check_job_status"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError SaveUrlCheckJobStatusAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//SearchAPIError is an error-wrapper for the search route +type SearchAPIError struct { + dropbox.APIError + EndpointError *SearchError `json:"error"` +} + +func (dbx *apiImpl) Search(arg *SearchArg) (res *SearchResult, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "search"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError SearchAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//UploadAPIError is an error-wrapper for the upload route +type UploadAPIError struct { + dropbox.APIError + EndpointError *UploadError `json:"error"` +} + +func (dbx *apiImpl) Upload(arg *CommitInfo, content io.Reader) (res *FileMetadata, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("content", "files", "upload"), content) + if err != nil { + return + } + + req.Header.Set("Dropbox-API-Arg", string(b)) + req.Header.Set("Content-Type", "application/octet-stream") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError UploadAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//UploadSessionAppendAPIError is an error-wrapper for the upload_session/append route +type UploadSessionAppendAPIError struct { + dropbox.APIError + EndpointError *UploadSessionLookupError `json:"error"` +} + +func (dbx *apiImpl) UploadSessionAppend(arg *UploadSessionCursor, content io.Reader) (err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("content", "files", "upload_session/append"), content) + if err != nil { + return + } + + req.Header.Set("Dropbox-API-Arg", string(b)) + req.Header.Set("Content-Type", "application/octet-stream") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + return + } + if resp.StatusCode == http.StatusConflict { + var apiError UploadSessionAppendAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//UploadSessionAppendV2APIError is an error-wrapper for the upload_session/append_v2 route +type UploadSessionAppendV2APIError struct { + dropbox.APIError + EndpointError *UploadSessionLookupError `json:"error"` +} + +func (dbx *apiImpl) UploadSessionAppendV2(arg *UploadSessionAppendArg, content io.Reader) (err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("content", "files", "upload_session/append_v2"), content) + if err != nil { + return + } + + req.Header.Set("Dropbox-API-Arg", string(b)) + req.Header.Set("Content-Type", "application/octet-stream") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + return + } + if resp.StatusCode == http.StatusConflict { + var apiError UploadSessionAppendV2APIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//UploadSessionFinishAPIError is an error-wrapper for the upload_session/finish route +type UploadSessionFinishAPIError struct { + dropbox.APIError + EndpointError *UploadSessionFinishError `json:"error"` +} + +func (dbx *apiImpl) UploadSessionFinish(arg *UploadSessionFinishArg, content io.Reader) (res *FileMetadata, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("content", "files", "upload_session/finish"), content) + if err != nil { + return + } + + req.Header.Set("Dropbox-API-Arg", string(b)) + req.Header.Set("Content-Type", "application/octet-stream") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError UploadSessionFinishAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//UploadSessionFinishBatchAPIError is an error-wrapper for the upload_session/finish_batch route +type UploadSessionFinishBatchAPIError struct { + dropbox.APIError + EndpointError struct{} `json:"error"` +} + +func (dbx *apiImpl) UploadSessionFinishBatch(arg *UploadSessionFinishBatchArg) (res *async.LaunchEmptyResult, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "upload_session/finish_batch"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError UploadSessionFinishBatchAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//UploadSessionFinishBatchCheckAPIError is an error-wrapper for the upload_session/finish_batch/check route +type UploadSessionFinishBatchCheckAPIError struct { + dropbox.APIError + EndpointError *async.PollError `json:"error"` +} + +func (dbx *apiImpl) UploadSessionFinishBatchCheck(arg *async.PollArg) (res *UploadSessionFinishBatchJobStatus, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("api", "files", "upload_session/finish_batch/check"), bytes.NewReader(b)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError UploadSessionFinishBatchCheckAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +//UploadSessionStartAPIError is an error-wrapper for the upload_session/start route +type UploadSessionStartAPIError struct { + dropbox.APIError + EndpointError struct{} `json:"error"` +} + +func (dbx *apiImpl) UploadSessionStart(arg *UploadSessionStartArg, content io.Reader) (res *UploadSessionStartResult, err error) { + cli := dbx.Client + + if dbx.Config.Verbose { + log.Printf("arg: %v", arg) + } + b, err := json.Marshal(arg) + if err != nil { + return + } + + req, err := http.NewRequest("POST", (*dropbox.Context)(dbx).GenerateURL("content", "files", "upload_session/start"), content) + if err != nil { + return + } + + req.Header.Set("Dropbox-API-Arg", string(b)) + req.Header.Set("Content-Type", "application/octet-stream") + if dbx.Config.AsMemberID != "" { + req.Header.Set("Dropbox-API-Select-User", dbx.Config.AsMemberID) + } + if dbx.Config.Verbose { + log.Printf("req: %v", req) + } + resp, err := cli.Do(req) + if dbx.Config.Verbose { + log.Printf("resp: %v", resp) + } + if err != nil { + return + } + + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if dbx.Config.Verbose { + log.Printf("body: %s", body) + } + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + var apiError UploadSessionStartAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + var apiError dropbox.APIError + if resp.StatusCode == http.StatusBadRequest { + apiError.ErrorSummary = string(body) + err = apiError + return + } + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return +} + +// New returns a Client implementation for this namespace +func New(c dropbox.Config) *apiImpl { + ctx := apiImpl(dropbox.NewContext(c)) + return &ctx +} diff --git a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files/metadata.go b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files/metadata.go new file mode 100644 index 0000000..6c84a83 --- /dev/null +++ b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files/metadata.go @@ -0,0 +1,75 @@ +// Copyright (c) Dropbox, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package files + +import "encoding/json" + +type listFolderResult struct { + Entries []metadataUnion `json:"entries"` + Cursor string `json:"cursor"` + HasMore bool `json:"has_more"` +} + +// UnmarshalJSON deserializes into a ListFolderResult instance +func (r *ListFolderResult) UnmarshalJSON(b []byte) error { + var l listFolderResult + if err := json.Unmarshal(b, &l); err != nil { + return err + } + r.Cursor = l.Cursor + r.HasMore = l.HasMore + r.Entries = make([]IsMetadata, len(l.Entries)) + for i, e := range l.Entries { + switch e.Tag { + case "file": + r.Entries[i] = e.File + case "folder": + r.Entries[i] = e.Folder + case "deleted": + r.Entries[i] = e.Deleted + } + } + return nil +} + +type searchMatch struct { + MatchType *SearchMatchType `json:"match_type"` + Metadata metadataUnion `json:"metadata"` +} + +// UnmarshalJSON deserializes into a SearchMatch instance +func (s *SearchMatch) UnmarshalJSON(b []byte) error { + var m searchMatch + if err := json.Unmarshal(b, &m); err != nil { + return err + } + s.MatchType = m.MatchType + e := m.Metadata + switch e.Tag { + case "file": + s.Metadata = e.File + case "folder": + s.Metadata = e.Folder + case "deleted": + s.Metadata = e.Deleted + } + return nil +} diff --git a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files/types.go b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files/types.go new file mode 100644 index 0000000..ec5ce37 --- /dev/null +++ b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files/types.go @@ -0,0 +1,2757 @@ +// Copyright (c) Dropbox, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package files : This namespace contains endpoints and data types for basic +// file operations. +package files + +import ( + "encoding/json" + "time" + + "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox" + "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/properties" +) + +// PropertiesError : has no documentation (yet) +type PropertiesError struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path *LookupError `json:"path,omitempty"` +} + +// Valid tag values for PropertiesError +const ( + PropertiesErrorPath = "path" +) + +// UnmarshalJSON deserializes into a PropertiesError instance +func (u *PropertiesError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path json.RawMessage `json:"path,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "path": + if err := json.Unmarshal(w.Path, &u.Path); err != nil { + return err + } + + } + return nil +} + +// InvalidPropertyGroupError : has no documentation (yet) +type InvalidPropertyGroupError struct { + dropbox.Tagged +} + +// Valid tag values for InvalidPropertyGroupError +const ( + InvalidPropertyGroupErrorPropertyFieldTooLarge = "property_field_too_large" + InvalidPropertyGroupErrorDoesNotFitTemplate = "does_not_fit_template" +) + +// AddPropertiesError : has no documentation (yet) +type AddPropertiesError struct { + dropbox.Tagged +} + +// Valid tag values for AddPropertiesError +const ( + AddPropertiesErrorPropertyGroupAlreadyExists = "property_group_already_exists" +) + +// GetMetadataArg : has no documentation (yet) +type GetMetadataArg struct { + // Path : The path of a file or folder on Dropbox. + Path string `json:"path"` + // IncludeMediaInfo : If true, `FileMetadata.media_info` is set for photo + // and video. + IncludeMediaInfo bool `json:"include_media_info"` + // IncludeDeleted : If true, `DeletedMetadata` will be returned for deleted + // file or folder, otherwise `LookupError.not_found` will be returned. + IncludeDeleted bool `json:"include_deleted"` + // IncludeHasExplicitSharedMembers : If true, the results will include a + // flag for each file indicating whether or not that file has any explicit + // members. + IncludeHasExplicitSharedMembers bool `json:"include_has_explicit_shared_members"` +} + +// NewGetMetadataArg returns a new GetMetadataArg instance +func NewGetMetadataArg(Path string) *GetMetadataArg { + s := new(GetMetadataArg) + s.Path = Path + s.IncludeMediaInfo = false + s.IncludeDeleted = false + s.IncludeHasExplicitSharedMembers = false + return s +} + +// AlphaGetMetadataArg : has no documentation (yet) +type AlphaGetMetadataArg struct { + GetMetadataArg + // IncludePropertyTemplates : If set to a valid list of template IDs, + // `FileMetadata.property_groups` is set for files with custom properties. + IncludePropertyTemplates []string `json:"include_property_templates,omitempty"` +} + +// NewAlphaGetMetadataArg returns a new AlphaGetMetadataArg instance +func NewAlphaGetMetadataArg(Path string) *AlphaGetMetadataArg { + s := new(AlphaGetMetadataArg) + s.Path = Path + s.IncludeMediaInfo = false + s.IncludeDeleted = false + s.IncludeHasExplicitSharedMembers = false + return s +} + +// GetMetadataError : has no documentation (yet) +type GetMetadataError struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path *LookupError `json:"path,omitempty"` +} + +// Valid tag values for GetMetadataError +const ( + GetMetadataErrorPath = "path" +) + +// UnmarshalJSON deserializes into a GetMetadataError instance +func (u *GetMetadataError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path json.RawMessage `json:"path,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "path": + if err := json.Unmarshal(w.Path, &u.Path); err != nil { + return err + } + + } + return nil +} + +// AlphaGetMetadataError : has no documentation (yet) +type AlphaGetMetadataError struct { + dropbox.Tagged + // PropertiesError : has no documentation (yet) + PropertiesError *LookUpPropertiesError `json:"properties_error,omitempty"` +} + +// Valid tag values for AlphaGetMetadataError +const ( + AlphaGetMetadataErrorPropertiesError = "properties_error" +) + +// UnmarshalJSON deserializes into a AlphaGetMetadataError instance +func (u *AlphaGetMetadataError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // PropertiesError : has no documentation (yet) + PropertiesError json.RawMessage `json:"properties_error,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "properties_error": + if err := json.Unmarshal(w.PropertiesError, &u.PropertiesError); err != nil { + return err + } + + } + return nil +} + +// CommitInfo : has no documentation (yet) +type CommitInfo struct { + // Path : Path in the user's Dropbox to save the file. + Path string `json:"path"` + // Mode : Selects what to do if the file already exists. + Mode *WriteMode `json:"mode"` + // Autorename : If there's a conflict, as determined by `mode`, have the + // Dropbox server try to autorename the file to avoid conflict. + Autorename bool `json:"autorename"` + // ClientModified : The value to store as the `client_modified` timestamp. + // Dropbox automatically records the time at which the file was written to + // the Dropbox servers. It can also record an additional timestamp, provided + // by Dropbox desktop clients, mobile clients, and API apps of when the file + // was actually created or modified. + ClientModified time.Time `json:"client_modified,omitempty"` + // Mute : Normally, users are made aware of any file modifications in their + // Dropbox account via notifications in the client software. If true, this + // tells the clients that this modification shouldn't result in a user + // notification. + Mute bool `json:"mute"` +} + +// NewCommitInfo returns a new CommitInfo instance +func NewCommitInfo(Path string) *CommitInfo { + s := new(CommitInfo) + s.Path = Path + s.Mode = &WriteMode{Tagged: dropbox.Tagged{"add"}} + s.Autorename = false + s.Mute = false + return s +} + +// CommitInfoWithProperties : has no documentation (yet) +type CommitInfoWithProperties struct { + CommitInfo + // PropertyGroups : List of custom properties to add to file. + PropertyGroups []*properties.PropertyGroup `json:"property_groups,omitempty"` +} + +// NewCommitInfoWithProperties returns a new CommitInfoWithProperties instance +func NewCommitInfoWithProperties(Path string) *CommitInfoWithProperties { + s := new(CommitInfoWithProperties) + s.Path = Path + s.Mode = &WriteMode{Tagged: dropbox.Tagged{"add"}} + s.Autorename = false + s.Mute = false + return s +} + +// CreateFolderArg : has no documentation (yet) +type CreateFolderArg struct { + // Path : Path in the user's Dropbox to create. + Path string `json:"path"` + // Autorename : If there's a conflict, have the Dropbox server try to + // autorename the folder to avoid the conflict. + Autorename bool `json:"autorename"` +} + +// NewCreateFolderArg returns a new CreateFolderArg instance +func NewCreateFolderArg(Path string) *CreateFolderArg { + s := new(CreateFolderArg) + s.Path = Path + s.Autorename = false + return s +} + +// CreateFolderError : has no documentation (yet) +type CreateFolderError struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path *WriteError `json:"path,omitempty"` +} + +// Valid tag values for CreateFolderError +const ( + CreateFolderErrorPath = "path" +) + +// UnmarshalJSON deserializes into a CreateFolderError instance +func (u *CreateFolderError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path json.RawMessage `json:"path,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "path": + if err := json.Unmarshal(w.Path, &u.Path); err != nil { + return err + } + + } + return nil +} + +// DeleteArg : has no documentation (yet) +type DeleteArg struct { + // Path : Path in the user's Dropbox to delete. + Path string `json:"path"` +} + +// NewDeleteArg returns a new DeleteArg instance +func NewDeleteArg(Path string) *DeleteArg { + s := new(DeleteArg) + s.Path = Path + return s +} + +// DeleteBatchArg : has no documentation (yet) +type DeleteBatchArg struct { + // Entries : has no documentation (yet) + Entries []*DeleteArg `json:"entries"` +} + +// NewDeleteBatchArg returns a new DeleteBatchArg instance +func NewDeleteBatchArg(Entries []*DeleteArg) *DeleteBatchArg { + s := new(DeleteBatchArg) + s.Entries = Entries + return s +} + +// DeleteBatchError : has no documentation (yet) +type DeleteBatchError struct { + dropbox.Tagged +} + +// Valid tag values for DeleteBatchError +const ( + DeleteBatchErrorTooManyWriteOperations = "too_many_write_operations" + DeleteBatchErrorOther = "other" +) + +// DeleteBatchJobStatus : has no documentation (yet) +type DeleteBatchJobStatus struct { + dropbox.Tagged + // Complete : The batch delete has finished. + Complete *DeleteBatchResult `json:"complete,omitempty"` + // Failed : The batch delete has failed. + Failed *DeleteBatchError `json:"failed,omitempty"` +} + +// Valid tag values for DeleteBatchJobStatus +const ( + DeleteBatchJobStatusComplete = "complete" + DeleteBatchJobStatusFailed = "failed" + DeleteBatchJobStatusOther = "other" +) + +// UnmarshalJSON deserializes into a DeleteBatchJobStatus instance +func (u *DeleteBatchJobStatus) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Complete : The batch delete has finished. + Complete json.RawMessage `json:"complete,omitempty"` + // Failed : The batch delete has failed. + Failed json.RawMessage `json:"failed,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "complete": + if err := json.Unmarshal(body, &u.Complete); err != nil { + return err + } + + case "failed": + if err := json.Unmarshal(w.Failed, &u.Failed); err != nil { + return err + } + + } + return nil +} + +// DeleteBatchResult : has no documentation (yet) +type DeleteBatchResult struct { + // Entries : has no documentation (yet) + Entries []*DeleteBatchResultEntry `json:"entries"` +} + +// NewDeleteBatchResult returns a new DeleteBatchResult instance +func NewDeleteBatchResult(Entries []*DeleteBatchResultEntry) *DeleteBatchResult { + s := new(DeleteBatchResult) + s.Entries = Entries + return s +} + +// DeleteBatchResultEntry : has no documentation (yet) +type DeleteBatchResultEntry struct { + dropbox.Tagged + // Success : has no documentation (yet) + Success *DeleteResult `json:"success,omitempty"` + // Failure : has no documentation (yet) + Failure *DeleteError `json:"failure,omitempty"` +} + +// Valid tag values for DeleteBatchResultEntry +const ( + DeleteBatchResultEntrySuccess = "success" + DeleteBatchResultEntryFailure = "failure" +) + +// UnmarshalJSON deserializes into a DeleteBatchResultEntry instance +func (u *DeleteBatchResultEntry) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Success : has no documentation (yet) + Success json.RawMessage `json:"success,omitempty"` + // Failure : has no documentation (yet) + Failure json.RawMessage `json:"failure,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "success": + if err := json.Unmarshal(body, &u.Success); err != nil { + return err + } + + case "failure": + if err := json.Unmarshal(w.Failure, &u.Failure); err != nil { + return err + } + + } + return nil +} + +// DeleteError : has no documentation (yet) +type DeleteError struct { + dropbox.Tagged + // PathLookup : has no documentation (yet) + PathLookup *LookupError `json:"path_lookup,omitempty"` + // PathWrite : has no documentation (yet) + PathWrite *WriteError `json:"path_write,omitempty"` +} + +// Valid tag values for DeleteError +const ( + DeleteErrorPathLookup = "path_lookup" + DeleteErrorPathWrite = "path_write" + DeleteErrorOther = "other" +) + +// UnmarshalJSON deserializes into a DeleteError instance +func (u *DeleteError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // PathLookup : has no documentation (yet) + PathLookup json.RawMessage `json:"path_lookup,omitempty"` + // PathWrite : has no documentation (yet) + PathWrite json.RawMessage `json:"path_write,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "path_lookup": + if err := json.Unmarshal(w.PathLookup, &u.PathLookup); err != nil { + return err + } + + case "path_write": + if err := json.Unmarshal(w.PathWrite, &u.PathWrite); err != nil { + return err + } + + } + return nil +} + +// DeleteResult : has no documentation (yet) +type DeleteResult struct { + // Metadata : has no documentation (yet) + Metadata IsMetadata `json:"metadata"` +} + +// NewDeleteResult returns a new DeleteResult instance +func NewDeleteResult(Metadata IsMetadata) *DeleteResult { + s := new(DeleteResult) + s.Metadata = Metadata + return s +} + +// Metadata : Metadata for a file or folder. +type Metadata struct { + // Name : The last component of the path (including extension). This never + // contains a slash. + Name string `json:"name"` + // PathLower : The lowercased full path in the user's Dropbox. This always + // starts with a slash. This field will be null if the file or folder is not + // mounted. + PathLower string `json:"path_lower,omitempty"` + // PathDisplay : The cased path to be used for display purposes only. In + // rare instances the casing will not correctly match the user's filesystem, + // but this behavior will match the path provided in the Core API v1. + // Changes to the casing of paths won't be returned by `listFolderContinue`. + // This field will be null if the file or folder is not mounted. + PathDisplay string `json:"path_display,omitempty"` + // ParentSharedFolderId : Deprecated. Please use + // `FileSharingInfo.parent_shared_folder_id` or + // `FolderSharingInfo.parent_shared_folder_id` instead. + ParentSharedFolderId string `json:"parent_shared_folder_id,omitempty"` +} + +// NewMetadata returns a new Metadata instance +func NewMetadata(Name string) *Metadata { + s := new(Metadata) + s.Name = Name + return s +} + +// IsMetadata is the interface type for Metadata and its subtypes +type IsMetadata interface { + IsMetadata() +} + +// IsMetadata implements the IsMetadata interface +func (u *Metadata) IsMetadata() {} + +type metadataUnion struct { + dropbox.Tagged + // File : has no documentation (yet) + File *FileMetadata `json:"file,omitempty"` + // Folder : has no documentation (yet) + Folder *FolderMetadata `json:"folder,omitempty"` + // Deleted : has no documentation (yet) + Deleted *DeletedMetadata `json:"deleted,omitempty"` +} + +// Valid tag values for Metadata +const ( + MetadataFile = "file" + MetadataFolder = "folder" + MetadataDeleted = "deleted" +) + +// UnmarshalJSON deserializes into a metadataUnion instance +func (u *metadataUnion) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // File : has no documentation (yet) + File json.RawMessage `json:"file,omitempty"` + // Folder : has no documentation (yet) + Folder json.RawMessage `json:"folder,omitempty"` + // Deleted : has no documentation (yet) + Deleted json.RawMessage `json:"deleted,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "file": + if err := json.Unmarshal(body, &u.File); err != nil { + return err + } + + case "folder": + if err := json.Unmarshal(body, &u.Folder); err != nil { + return err + } + + case "deleted": + if err := json.Unmarshal(body, &u.Deleted); err != nil { + return err + } + + } + return nil +} + +// DeletedMetadata : Indicates that there used to be a file or folder at this +// path, but it no longer exists. +type DeletedMetadata struct { + Metadata +} + +// NewDeletedMetadata returns a new DeletedMetadata instance +func NewDeletedMetadata(Name string) *DeletedMetadata { + s := new(DeletedMetadata) + s.Name = Name + return s +} + +// Dimensions : Dimensions for a photo or video. +type Dimensions struct { + // Height : Height of the photo/video. + Height uint64 `json:"height"` + // Width : Width of the photo/video. + Width uint64 `json:"width"` +} + +// NewDimensions returns a new Dimensions instance +func NewDimensions(Height uint64, Width uint64) *Dimensions { + s := new(Dimensions) + s.Height = Height + s.Width = Width + return s +} + +// DownloadArg : has no documentation (yet) +type DownloadArg struct { + // Path : The path of the file to download. + Path string `json:"path"` + // Rev : Deprecated. Please specify revision in `path` instead. + Rev string `json:"rev,omitempty"` +} + +// NewDownloadArg returns a new DownloadArg instance +func NewDownloadArg(Path string) *DownloadArg { + s := new(DownloadArg) + s.Path = Path + return s +} + +// DownloadError : has no documentation (yet) +type DownloadError struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path *LookupError `json:"path,omitempty"` +} + +// Valid tag values for DownloadError +const ( + DownloadErrorPath = "path" + DownloadErrorOther = "other" +) + +// UnmarshalJSON deserializes into a DownloadError instance +func (u *DownloadError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path json.RawMessage `json:"path,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "path": + if err := json.Unmarshal(w.Path, &u.Path); err != nil { + return err + } + + } + return nil +} + +// FileMetadata : has no documentation (yet) +type FileMetadata struct { + Metadata + // Id : A unique identifier for the file. + Id string `json:"id"` + // ClientModified : For files, this is the modification time set by the + // desktop client when the file was added to Dropbox. Since this time is not + // verified (the Dropbox server stores whatever the desktop client sends + // up), this should only be used for display purposes (such as sorting) and + // not, for example, to determine if a file has changed or not. + ClientModified time.Time `json:"client_modified"` + // ServerModified : The last time the file was modified on Dropbox. + ServerModified time.Time `json:"server_modified"` + // Rev : A unique identifier for the current revision of a file. This field + // is the same rev as elsewhere in the API and can be used to detect changes + // and avoid conflicts. + Rev string `json:"rev"` + // Size : The file size in bytes. + Size uint64 `json:"size"` + // MediaInfo : Additional information if the file is a photo or video. + MediaInfo *MediaInfo `json:"media_info,omitempty"` + // SharingInfo : Set if this file is contained in a shared folder. + SharingInfo *FileSharingInfo `json:"sharing_info,omitempty"` + // PropertyGroups : Additional information if the file has custom properties + // with the property template specified. + PropertyGroups []*properties.PropertyGroup `json:"property_groups,omitempty"` + // HasExplicitSharedMembers : This flag will only be present if + // include_has_explicit_shared_members is true in `listFolder` or + // `getMetadata`. If this flag is present, it will be true if this file has + // any explicit shared members. This is different from sharing_info in that + // this could be true in the case where a file has explicit members but is + // not contained within a shared folder. + HasExplicitSharedMembers bool `json:"has_explicit_shared_members,omitempty"` +} + +// NewFileMetadata returns a new FileMetadata instance +func NewFileMetadata(Name string, Id string, ClientModified time.Time, ServerModified time.Time, Rev string, Size uint64) *FileMetadata { + s := new(FileMetadata) + s.Name = Name + s.Id = Id + s.ClientModified = ClientModified + s.ServerModified = ServerModified + s.Rev = Rev + s.Size = Size + return s +} + +// SharingInfo : Sharing info for a file or folder. +type SharingInfo struct { + // ReadOnly : True if the file or folder is inside a read-only shared + // folder. + ReadOnly bool `json:"read_only"` +} + +// NewSharingInfo returns a new SharingInfo instance +func NewSharingInfo(ReadOnly bool) *SharingInfo { + s := new(SharingInfo) + s.ReadOnly = ReadOnly + return s +} + +// FileSharingInfo : Sharing info for a file which is contained by a shared +// folder. +type FileSharingInfo struct { + SharingInfo + // ParentSharedFolderId : ID of shared folder that holds this file. + ParentSharedFolderId string `json:"parent_shared_folder_id"` + // ModifiedBy : The last user who modified the file. This field will be null + // if the user's account has been deleted. + ModifiedBy string `json:"modified_by,omitempty"` +} + +// NewFileSharingInfo returns a new FileSharingInfo instance +func NewFileSharingInfo(ReadOnly bool, ParentSharedFolderId string) *FileSharingInfo { + s := new(FileSharingInfo) + s.ReadOnly = ReadOnly + s.ParentSharedFolderId = ParentSharedFolderId + return s +} + +// FolderMetadata : has no documentation (yet) +type FolderMetadata struct { + Metadata + // Id : A unique identifier for the folder. + Id string `json:"id"` + // SharedFolderId : Deprecated. Please use `sharing_info` instead. + SharedFolderId string `json:"shared_folder_id,omitempty"` + // SharingInfo : Set if the folder is contained in a shared folder or is a + // shared folder mount point. + SharingInfo *FolderSharingInfo `json:"sharing_info,omitempty"` + // PropertyGroups : Additional information if the file has custom properties + // with the property template specified. + PropertyGroups []*properties.PropertyGroup `json:"property_groups,omitempty"` +} + +// NewFolderMetadata returns a new FolderMetadata instance +func NewFolderMetadata(Name string, Id string) *FolderMetadata { + s := new(FolderMetadata) + s.Name = Name + s.Id = Id + return s +} + +// FolderSharingInfo : Sharing info for a folder which is contained in a shared +// folder or is a shared folder mount point. +type FolderSharingInfo struct { + SharingInfo + // ParentSharedFolderId : Set if the folder is contained by a shared folder. + ParentSharedFolderId string `json:"parent_shared_folder_id,omitempty"` + // SharedFolderId : If this folder is a shared folder mount point, the ID of + // the shared folder mounted at this location. + SharedFolderId string `json:"shared_folder_id,omitempty"` + // TraverseOnly : Specifies that the folder can only be traversed and the + // user can only see a limited subset of the contents of this folder because + // they don't have read access to this folder. They do, however, have access + // to some sub folder. + TraverseOnly bool `json:"traverse_only"` + // NoAccess : Specifies that the folder cannot be accessed by the user. + NoAccess bool `json:"no_access"` +} + +// NewFolderSharingInfo returns a new FolderSharingInfo instance +func NewFolderSharingInfo(ReadOnly bool) *FolderSharingInfo { + s := new(FolderSharingInfo) + s.ReadOnly = ReadOnly + s.TraverseOnly = false + s.NoAccess = false + return s +} + +// GetCopyReferenceArg : has no documentation (yet) +type GetCopyReferenceArg struct { + // Path : The path to the file or folder you want to get a copy reference + // to. + Path string `json:"path"` +} + +// NewGetCopyReferenceArg returns a new GetCopyReferenceArg instance +func NewGetCopyReferenceArg(Path string) *GetCopyReferenceArg { + s := new(GetCopyReferenceArg) + s.Path = Path + return s +} + +// GetCopyReferenceError : has no documentation (yet) +type GetCopyReferenceError struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path *LookupError `json:"path,omitempty"` +} + +// Valid tag values for GetCopyReferenceError +const ( + GetCopyReferenceErrorPath = "path" + GetCopyReferenceErrorOther = "other" +) + +// UnmarshalJSON deserializes into a GetCopyReferenceError instance +func (u *GetCopyReferenceError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path json.RawMessage `json:"path,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "path": + if err := json.Unmarshal(w.Path, &u.Path); err != nil { + return err + } + + } + return nil +} + +// GetCopyReferenceResult : has no documentation (yet) +type GetCopyReferenceResult struct { + // Metadata : Metadata of the file or folder. + Metadata IsMetadata `json:"metadata"` + // CopyReference : A copy reference to the file or folder. + CopyReference string `json:"copy_reference"` + // Expires : The expiration date of the copy reference. This value is + // currently set to be far enough in the future so that expiration is + // effectively not an issue. + Expires time.Time `json:"expires"` +} + +// NewGetCopyReferenceResult returns a new GetCopyReferenceResult instance +func NewGetCopyReferenceResult(Metadata IsMetadata, CopyReference string, Expires time.Time) *GetCopyReferenceResult { + s := new(GetCopyReferenceResult) + s.Metadata = Metadata + s.CopyReference = CopyReference + s.Expires = Expires + return s +} + +// GetTemporaryLinkArg : has no documentation (yet) +type GetTemporaryLinkArg struct { + // Path : The path to the file you want a temporary link to. + Path string `json:"path"` +} + +// NewGetTemporaryLinkArg returns a new GetTemporaryLinkArg instance +func NewGetTemporaryLinkArg(Path string) *GetTemporaryLinkArg { + s := new(GetTemporaryLinkArg) + s.Path = Path + return s +} + +// GetTemporaryLinkError : has no documentation (yet) +type GetTemporaryLinkError struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path *LookupError `json:"path,omitempty"` +} + +// Valid tag values for GetTemporaryLinkError +const ( + GetTemporaryLinkErrorPath = "path" + GetTemporaryLinkErrorOther = "other" +) + +// UnmarshalJSON deserializes into a GetTemporaryLinkError instance +func (u *GetTemporaryLinkError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path json.RawMessage `json:"path,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "path": + if err := json.Unmarshal(w.Path, &u.Path); err != nil { + return err + } + + } + return nil +} + +// GetTemporaryLinkResult : has no documentation (yet) +type GetTemporaryLinkResult struct { + // Metadata : Metadata of the file. + Metadata *FileMetadata `json:"metadata"` + // Link : The temporary link which can be used to stream content the file. + Link string `json:"link"` +} + +// NewGetTemporaryLinkResult returns a new GetTemporaryLinkResult instance +func NewGetTemporaryLinkResult(Metadata *FileMetadata, Link string) *GetTemporaryLinkResult { + s := new(GetTemporaryLinkResult) + s.Metadata = Metadata + s.Link = Link + return s +} + +// GpsCoordinates : GPS coordinates for a photo or video. +type GpsCoordinates struct { + // Latitude : Latitude of the GPS coordinates. + Latitude float64 `json:"latitude"` + // Longitude : Longitude of the GPS coordinates. + Longitude float64 `json:"longitude"` +} + +// NewGpsCoordinates returns a new GpsCoordinates instance +func NewGpsCoordinates(Latitude float64, Longitude float64) *GpsCoordinates { + s := new(GpsCoordinates) + s.Latitude = Latitude + s.Longitude = Longitude + return s +} + +// ListFolderArg : has no documentation (yet) +type ListFolderArg struct { + // Path : The path to the folder you want to see the contents of. + Path string `json:"path"` + // Recursive : If true, the list folder operation will be applied + // recursively to all subfolders and the response will contain contents of + // all subfolders. + Recursive bool `json:"recursive"` + // IncludeMediaInfo : If true, `FileMetadata.media_info` is set for photo + // and video. + IncludeMediaInfo bool `json:"include_media_info"` + // IncludeDeleted : If true, the results will include entries for files and + // folders that used to exist but were deleted. + IncludeDeleted bool `json:"include_deleted"` + // IncludeHasExplicitSharedMembers : If true, the results will include a + // flag for each file indicating whether or not that file has any explicit + // members. + IncludeHasExplicitSharedMembers bool `json:"include_has_explicit_shared_members"` +} + +// NewListFolderArg returns a new ListFolderArg instance +func NewListFolderArg(Path string) *ListFolderArg { + s := new(ListFolderArg) + s.Path = Path + s.Recursive = false + s.IncludeMediaInfo = false + s.IncludeDeleted = false + s.IncludeHasExplicitSharedMembers = false + return s +} + +// ListFolderContinueArg : has no documentation (yet) +type ListFolderContinueArg struct { + // Cursor : The cursor returned by your last call to `listFolder` or + // `listFolderContinue`. + Cursor string `json:"cursor"` +} + +// NewListFolderContinueArg returns a new ListFolderContinueArg instance +func NewListFolderContinueArg(Cursor string) *ListFolderContinueArg { + s := new(ListFolderContinueArg) + s.Cursor = Cursor + return s +} + +// ListFolderContinueError : has no documentation (yet) +type ListFolderContinueError struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path *LookupError `json:"path,omitempty"` +} + +// Valid tag values for ListFolderContinueError +const ( + ListFolderContinueErrorPath = "path" + ListFolderContinueErrorReset = "reset" + ListFolderContinueErrorOther = "other" +) + +// UnmarshalJSON deserializes into a ListFolderContinueError instance +func (u *ListFolderContinueError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path json.RawMessage `json:"path,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "path": + if err := json.Unmarshal(w.Path, &u.Path); err != nil { + return err + } + + } + return nil +} + +// ListFolderError : has no documentation (yet) +type ListFolderError struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path *LookupError `json:"path,omitempty"` +} + +// Valid tag values for ListFolderError +const ( + ListFolderErrorPath = "path" + ListFolderErrorOther = "other" +) + +// UnmarshalJSON deserializes into a ListFolderError instance +func (u *ListFolderError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path json.RawMessage `json:"path,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "path": + if err := json.Unmarshal(w.Path, &u.Path); err != nil { + return err + } + + } + return nil +} + +// ListFolderGetLatestCursorResult : has no documentation (yet) +type ListFolderGetLatestCursorResult struct { + // Cursor : Pass the cursor into `listFolderContinue` to see what's changed + // in the folder since your previous query. + Cursor string `json:"cursor"` +} + +// NewListFolderGetLatestCursorResult returns a new ListFolderGetLatestCursorResult instance +func NewListFolderGetLatestCursorResult(Cursor string) *ListFolderGetLatestCursorResult { + s := new(ListFolderGetLatestCursorResult) + s.Cursor = Cursor + return s +} + +// ListFolderLongpollArg : has no documentation (yet) +type ListFolderLongpollArg struct { + // Cursor : A cursor as returned by `listFolder` or `listFolderContinue`. + // Cursors retrieved by setting `ListFolderArg.include_media_info` to true + // are not supported. + Cursor string `json:"cursor"` + // Timeout : A timeout in seconds. The request will block for at most this + // length of time, plus up to 90 seconds of random jitter added to avoid the + // thundering herd problem. Care should be taken when using this parameter, + // as some network infrastructure does not support long timeouts. + Timeout uint64 `json:"timeout"` +} + +// NewListFolderLongpollArg returns a new ListFolderLongpollArg instance +func NewListFolderLongpollArg(Cursor string) *ListFolderLongpollArg { + s := new(ListFolderLongpollArg) + s.Cursor = Cursor + s.Timeout = 30 + return s +} + +// ListFolderLongpollError : has no documentation (yet) +type ListFolderLongpollError struct { + dropbox.Tagged +} + +// Valid tag values for ListFolderLongpollError +const ( + ListFolderLongpollErrorReset = "reset" + ListFolderLongpollErrorOther = "other" +) + +// ListFolderLongpollResult : has no documentation (yet) +type ListFolderLongpollResult struct { + // Changes : Indicates whether new changes are available. If true, call + // `listFolderContinue` to retrieve the changes. + Changes bool `json:"changes"` + // Backoff : If present, backoff for at least this many seconds before + // calling `listFolderLongpoll` again. + Backoff uint64 `json:"backoff,omitempty"` +} + +// NewListFolderLongpollResult returns a new ListFolderLongpollResult instance +func NewListFolderLongpollResult(Changes bool) *ListFolderLongpollResult { + s := new(ListFolderLongpollResult) + s.Changes = Changes + return s +} + +// ListFolderResult : has no documentation (yet) +type ListFolderResult struct { + // Entries : The files and (direct) subfolders in the folder. + Entries []IsMetadata `json:"entries"` + // Cursor : Pass the cursor into `listFolderContinue` to see what's changed + // in the folder since your previous query. + Cursor string `json:"cursor"` + // HasMore : If true, then there are more entries available. Pass the cursor + // to `listFolderContinue` to retrieve the rest. + HasMore bool `json:"has_more"` +} + +// NewListFolderResult returns a new ListFolderResult instance +func NewListFolderResult(Entries []IsMetadata, Cursor string, HasMore bool) *ListFolderResult { + s := new(ListFolderResult) + s.Entries = Entries + s.Cursor = Cursor + s.HasMore = HasMore + return s +} + +// ListRevisionsArg : has no documentation (yet) +type ListRevisionsArg struct { + // Path : The path to the file you want to see the revisions of. + Path string `json:"path"` + // Limit : The maximum number of revision entries returned. + Limit uint64 `json:"limit"` +} + +// NewListRevisionsArg returns a new ListRevisionsArg instance +func NewListRevisionsArg(Path string) *ListRevisionsArg { + s := new(ListRevisionsArg) + s.Path = Path + s.Limit = 10 + return s +} + +// ListRevisionsError : has no documentation (yet) +type ListRevisionsError struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path *LookupError `json:"path,omitempty"` +} + +// Valid tag values for ListRevisionsError +const ( + ListRevisionsErrorPath = "path" + ListRevisionsErrorOther = "other" +) + +// UnmarshalJSON deserializes into a ListRevisionsError instance +func (u *ListRevisionsError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path json.RawMessage `json:"path,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "path": + if err := json.Unmarshal(w.Path, &u.Path); err != nil { + return err + } + + } + return nil +} + +// ListRevisionsResult : has no documentation (yet) +type ListRevisionsResult struct { + // IsDeleted : If the file is deleted. + IsDeleted bool `json:"is_deleted"` + // Entries : The revisions for the file. Only non-delete revisions will show + // up here. + Entries []*FileMetadata `json:"entries"` +} + +// NewListRevisionsResult returns a new ListRevisionsResult instance +func NewListRevisionsResult(IsDeleted bool, Entries []*FileMetadata) *ListRevisionsResult { + s := new(ListRevisionsResult) + s.IsDeleted = IsDeleted + s.Entries = Entries + return s +} + +// LookUpPropertiesError : has no documentation (yet) +type LookUpPropertiesError struct { + dropbox.Tagged +} + +// Valid tag values for LookUpPropertiesError +const ( + LookUpPropertiesErrorPropertyGroupNotFound = "property_group_not_found" +) + +// LookupError : has no documentation (yet) +type LookupError struct { + dropbox.Tagged + // MalformedPath : has no documentation (yet) + MalformedPath string `json:"malformed_path,omitempty"` + // InvalidPathRoot : The path root parameter provided is invalid. + InvalidPathRoot *PathRootError `json:"invalid_path_root,omitempty"` +} + +// Valid tag values for LookupError +const ( + LookupErrorMalformedPath = "malformed_path" + LookupErrorNotFound = "not_found" + LookupErrorNotFile = "not_file" + LookupErrorNotFolder = "not_folder" + LookupErrorRestrictedContent = "restricted_content" + LookupErrorInvalidPathRoot = "invalid_path_root" + LookupErrorOther = "other" +) + +// UnmarshalJSON deserializes into a LookupError instance +func (u *LookupError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // MalformedPath : has no documentation (yet) + MalformedPath json.RawMessage `json:"malformed_path,omitempty"` + // InvalidPathRoot : The path root parameter provided is invalid. + InvalidPathRoot json.RawMessage `json:"invalid_path_root,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "malformed_path": + if err := json.Unmarshal(body, &u.MalformedPath); err != nil { + return err + } + + case "invalid_path_root": + if err := json.Unmarshal(body, &u.InvalidPathRoot); err != nil { + return err + } + + } + return nil +} + +// MediaInfo : has no documentation (yet) +type MediaInfo struct { + dropbox.Tagged + // Metadata : The metadata for the photo/video. + Metadata IsMediaMetadata `json:"metadata,omitempty"` +} + +// Valid tag values for MediaInfo +const ( + MediaInfoPending = "pending" + MediaInfoMetadata = "metadata" +) + +// UnmarshalJSON deserializes into a MediaInfo instance +func (u *MediaInfo) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Metadata : The metadata for the photo/video. + Metadata json.RawMessage `json:"metadata,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "metadata": + if err := json.Unmarshal(body, &u.Metadata); err != nil { + return err + } + + } + return nil +} + +// MediaMetadata : Metadata for a photo or video. +type MediaMetadata struct { + // Dimensions : Dimension of the photo/video. + Dimensions *Dimensions `json:"dimensions,omitempty"` + // Location : The GPS coordinate of the photo/video. + Location *GpsCoordinates `json:"location,omitempty"` + // TimeTaken : The timestamp when the photo/video is taken. + TimeTaken time.Time `json:"time_taken,omitempty"` +} + +// NewMediaMetadata returns a new MediaMetadata instance +func NewMediaMetadata() *MediaMetadata { + s := new(MediaMetadata) + return s +} + +// IsMediaMetadata is the interface type for MediaMetadata and its subtypes +type IsMediaMetadata interface { + IsMediaMetadata() +} + +// IsMediaMetadata implements the IsMediaMetadata interface +func (u *MediaMetadata) IsMediaMetadata() {} + +type mediaMetadataUnion struct { + dropbox.Tagged + // Photo : has no documentation (yet) + Photo *PhotoMetadata `json:"photo,omitempty"` + // Video : has no documentation (yet) + Video *VideoMetadata `json:"video,omitempty"` +} + +// Valid tag values for MediaMetadata +const ( + MediaMetadataPhoto = "photo" + MediaMetadataVideo = "video" +) + +// UnmarshalJSON deserializes into a mediaMetadataUnion instance +func (u *mediaMetadataUnion) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Photo : has no documentation (yet) + Photo json.RawMessage `json:"photo,omitempty"` + // Video : has no documentation (yet) + Video json.RawMessage `json:"video,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "photo": + if err := json.Unmarshal(body, &u.Photo); err != nil { + return err + } + + case "video": + if err := json.Unmarshal(body, &u.Video); err != nil { + return err + } + + } + return nil +} + +// PathRootError : has no documentation (yet) +type PathRootError struct { + // PathRoot : The user's latest path root value. None if the user no longer + // has a path root. + PathRoot string `json:"path_root,omitempty"` +} + +// NewPathRootError returns a new PathRootError instance +func NewPathRootError() *PathRootError { + s := new(PathRootError) + return s +} + +// PhotoMetadata : Metadata for a photo. +type PhotoMetadata struct { + MediaMetadata +} + +// NewPhotoMetadata returns a new PhotoMetadata instance +func NewPhotoMetadata() *PhotoMetadata { + s := new(PhotoMetadata) + return s +} + +// PreviewArg : has no documentation (yet) +type PreviewArg struct { + // Path : The path of the file to preview. + Path string `json:"path"` + // Rev : Deprecated. Please specify revision in `path` instead. + Rev string `json:"rev,omitempty"` +} + +// NewPreviewArg returns a new PreviewArg instance +func NewPreviewArg(Path string) *PreviewArg { + s := new(PreviewArg) + s.Path = Path + return s +} + +// PreviewError : has no documentation (yet) +type PreviewError struct { + dropbox.Tagged + // Path : An error occurs when downloading metadata for the file. + Path *LookupError `json:"path,omitempty"` +} + +// Valid tag values for PreviewError +const ( + PreviewErrorPath = "path" + PreviewErrorInProgress = "in_progress" + PreviewErrorUnsupportedExtension = "unsupported_extension" + PreviewErrorUnsupportedContent = "unsupported_content" +) + +// UnmarshalJSON deserializes into a PreviewError instance +func (u *PreviewError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Path : An error occurs when downloading metadata for the file. + Path json.RawMessage `json:"path,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "path": + if err := json.Unmarshal(w.Path, &u.Path); err != nil { + return err + } + + } + return nil +} + +// PropertyGroupUpdate : has no documentation (yet) +type PropertyGroupUpdate struct { + // TemplateId : A unique identifier for a property template. + TemplateId string `json:"template_id"` + // AddOrUpdateFields : List of property fields to update if the field + // already exists. If the field doesn't exist, add the field to the property + // group. + AddOrUpdateFields []*properties.PropertyField `json:"add_or_update_fields,omitempty"` + // RemoveFields : List of property field names to remove from property group + // if the field exists. + RemoveFields []string `json:"remove_fields,omitempty"` +} + +// NewPropertyGroupUpdate returns a new PropertyGroupUpdate instance +func NewPropertyGroupUpdate(TemplateId string) *PropertyGroupUpdate { + s := new(PropertyGroupUpdate) + s.TemplateId = TemplateId + return s +} + +// PropertyGroupWithPath : has no documentation (yet) +type PropertyGroupWithPath struct { + // Path : A unique identifier for the file. + Path string `json:"path"` + // PropertyGroups : Filled custom property templates associated with a file. + PropertyGroups []*properties.PropertyGroup `json:"property_groups"` +} + +// NewPropertyGroupWithPath returns a new PropertyGroupWithPath instance +func NewPropertyGroupWithPath(Path string, PropertyGroups []*properties.PropertyGroup) *PropertyGroupWithPath { + s := new(PropertyGroupWithPath) + s.Path = Path + s.PropertyGroups = PropertyGroups + return s +} + +// RelocationPath : has no documentation (yet) +type RelocationPath struct { + // FromPath : Path in the user's Dropbox to be copied or moved. + FromPath string `json:"from_path"` + // ToPath : Path in the user's Dropbox that is the destination. + ToPath string `json:"to_path"` +} + +// NewRelocationPath returns a new RelocationPath instance +func NewRelocationPath(FromPath string, ToPath string) *RelocationPath { + s := new(RelocationPath) + s.FromPath = FromPath + s.ToPath = ToPath + return s +} + +// RelocationArg : has no documentation (yet) +type RelocationArg struct { + RelocationPath + // AllowSharedFolder : If true, `copy` will copy contents in shared folder, + // otherwise `RelocationError.cant_copy_shared_folder` will be returned if + // `from_path` contains shared folder. This field is always true for `move`. + AllowSharedFolder bool `json:"allow_shared_folder"` + // Autorename : If there's a conflict, have the Dropbox server try to + // autorename the file to avoid the conflict. + Autorename bool `json:"autorename"` +} + +// NewRelocationArg returns a new RelocationArg instance +func NewRelocationArg(FromPath string, ToPath string) *RelocationArg { + s := new(RelocationArg) + s.FromPath = FromPath + s.ToPath = ToPath + s.AllowSharedFolder = false + s.Autorename = false + return s +} + +// RelocationBatchArg : has no documentation (yet) +type RelocationBatchArg struct { + // Entries : List of entries to be moved or copied. Each entry is + // `RelocationPath`. + Entries []*RelocationPath `json:"entries"` + // AllowSharedFolder : If true, `copyBatch` will copy contents in shared + // folder, otherwise `RelocationError.cant_copy_shared_folder` will be + // returned if `RelocationPath.from_path` contains shared folder. This + // field is always true for `moveBatch`. + AllowSharedFolder bool `json:"allow_shared_folder"` + // Autorename : If there's a conflict with any file, have the Dropbox server + // try to autorename that file to avoid the conflict. + Autorename bool `json:"autorename"` +} + +// NewRelocationBatchArg returns a new RelocationBatchArg instance +func NewRelocationBatchArg(Entries []*RelocationPath) *RelocationBatchArg { + s := new(RelocationBatchArg) + s.Entries = Entries + s.AllowSharedFolder = false + s.Autorename = false + return s +} + +// RelocationError : has no documentation (yet) +type RelocationError struct { + dropbox.Tagged + // FromLookup : has no documentation (yet) + FromLookup *LookupError `json:"from_lookup,omitempty"` + // FromWrite : has no documentation (yet) + FromWrite *WriteError `json:"from_write,omitempty"` + // To : has no documentation (yet) + To *WriteError `json:"to,omitempty"` +} + +// Valid tag values for RelocationError +const ( + RelocationErrorFromLookup = "from_lookup" + RelocationErrorFromWrite = "from_write" + RelocationErrorTo = "to" + RelocationErrorCantCopySharedFolder = "cant_copy_shared_folder" + RelocationErrorCantNestSharedFolder = "cant_nest_shared_folder" + RelocationErrorCantMoveFolderIntoItself = "cant_move_folder_into_itself" + RelocationErrorTooManyFiles = "too_many_files" + RelocationErrorOther = "other" +) + +// UnmarshalJSON deserializes into a RelocationError instance +func (u *RelocationError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // FromLookup : has no documentation (yet) + FromLookup json.RawMessage `json:"from_lookup,omitempty"` + // FromWrite : has no documentation (yet) + FromWrite json.RawMessage `json:"from_write,omitempty"` + // To : has no documentation (yet) + To json.RawMessage `json:"to,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "from_lookup": + if err := json.Unmarshal(w.FromLookup, &u.FromLookup); err != nil { + return err + } + + case "from_write": + if err := json.Unmarshal(w.FromWrite, &u.FromWrite); err != nil { + return err + } + + case "to": + if err := json.Unmarshal(w.To, &u.To); err != nil { + return err + } + + } + return nil +} + +// RelocationBatchError : has no documentation (yet) +type RelocationBatchError struct { + dropbox.Tagged +} + +// Valid tag values for RelocationBatchError +const ( + RelocationBatchErrorDuplicatedOrNestedPaths = "duplicated_or_nested_paths" + RelocationBatchErrorTooManyWriteOperations = "too_many_write_operations" +) + +// RelocationBatchJobStatus : has no documentation (yet) +type RelocationBatchJobStatus struct { + dropbox.Tagged + // Complete : The copy or move batch job has finished. + Complete *RelocationBatchResult `json:"complete,omitempty"` + // Failed : The copy or move batch job has failed with exception. + Failed *RelocationBatchError `json:"failed,omitempty"` +} + +// Valid tag values for RelocationBatchJobStatus +const ( + RelocationBatchJobStatusComplete = "complete" + RelocationBatchJobStatusFailed = "failed" +) + +// UnmarshalJSON deserializes into a RelocationBatchJobStatus instance +func (u *RelocationBatchJobStatus) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Complete : The copy or move batch job has finished. + Complete json.RawMessage `json:"complete,omitempty"` + // Failed : The copy or move batch job has failed with exception. + Failed json.RawMessage `json:"failed,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "complete": + if err := json.Unmarshal(body, &u.Complete); err != nil { + return err + } + + case "failed": + if err := json.Unmarshal(w.Failed, &u.Failed); err != nil { + return err + } + + } + return nil +} + +// RelocationBatchResult : has no documentation (yet) +type RelocationBatchResult struct { + // Entries : has no documentation (yet) + Entries []*RelocationResult `json:"entries"` +} + +// NewRelocationBatchResult returns a new RelocationBatchResult instance +func NewRelocationBatchResult(Entries []*RelocationResult) *RelocationBatchResult { + s := new(RelocationBatchResult) + s.Entries = Entries + return s +} + +// RelocationResult : has no documentation (yet) +type RelocationResult struct { + // Metadata : has no documentation (yet) + Metadata IsMetadata `json:"metadata"` +} + +// NewRelocationResult returns a new RelocationResult instance +func NewRelocationResult(Metadata IsMetadata) *RelocationResult { + s := new(RelocationResult) + s.Metadata = Metadata + return s +} + +// RemovePropertiesArg : has no documentation (yet) +type RemovePropertiesArg struct { + // Path : A unique identifier for the file. + Path string `json:"path"` + // PropertyTemplateIds : A list of identifiers for a property template + // created by route properties/template/add. + PropertyTemplateIds []string `json:"property_template_ids"` +} + +// NewRemovePropertiesArg returns a new RemovePropertiesArg instance +func NewRemovePropertiesArg(Path string, PropertyTemplateIds []string) *RemovePropertiesArg { + s := new(RemovePropertiesArg) + s.Path = Path + s.PropertyTemplateIds = PropertyTemplateIds + return s +} + +// RemovePropertiesError : has no documentation (yet) +type RemovePropertiesError struct { + dropbox.Tagged + // PropertyGroupLookup : has no documentation (yet) + PropertyGroupLookup *LookUpPropertiesError `json:"property_group_lookup,omitempty"` +} + +// Valid tag values for RemovePropertiesError +const ( + RemovePropertiesErrorPropertyGroupLookup = "property_group_lookup" +) + +// UnmarshalJSON deserializes into a RemovePropertiesError instance +func (u *RemovePropertiesError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // PropertyGroupLookup : has no documentation (yet) + PropertyGroupLookup json.RawMessage `json:"property_group_lookup,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "property_group_lookup": + if err := json.Unmarshal(w.PropertyGroupLookup, &u.PropertyGroupLookup); err != nil { + return err + } + + } + return nil +} + +// RestoreArg : has no documentation (yet) +type RestoreArg struct { + // Path : The path to the file you want to restore. + Path string `json:"path"` + // Rev : The revision to restore for the file. + Rev string `json:"rev"` +} + +// NewRestoreArg returns a new RestoreArg instance +func NewRestoreArg(Path string, Rev string) *RestoreArg { + s := new(RestoreArg) + s.Path = Path + s.Rev = Rev + return s +} + +// RestoreError : has no documentation (yet) +type RestoreError struct { + dropbox.Tagged + // PathLookup : An error occurs when downloading metadata for the file. + PathLookup *LookupError `json:"path_lookup,omitempty"` + // PathWrite : An error occurs when trying to restore the file to that path. + PathWrite *WriteError `json:"path_write,omitempty"` +} + +// Valid tag values for RestoreError +const ( + RestoreErrorPathLookup = "path_lookup" + RestoreErrorPathWrite = "path_write" + RestoreErrorInvalidRevision = "invalid_revision" + RestoreErrorOther = "other" +) + +// UnmarshalJSON deserializes into a RestoreError instance +func (u *RestoreError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // PathLookup : An error occurs when downloading metadata for the file. + PathLookup json.RawMessage `json:"path_lookup,omitempty"` + // PathWrite : An error occurs when trying to restore the file to that + // path. + PathWrite json.RawMessage `json:"path_write,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "path_lookup": + if err := json.Unmarshal(w.PathLookup, &u.PathLookup); err != nil { + return err + } + + case "path_write": + if err := json.Unmarshal(w.PathWrite, &u.PathWrite); err != nil { + return err + } + + } + return nil +} + +// SaveCopyReferenceArg : has no documentation (yet) +type SaveCopyReferenceArg struct { + // CopyReference : A copy reference returned by `copyReferenceGet`. + CopyReference string `json:"copy_reference"` + // Path : Path in the user's Dropbox that is the destination. + Path string `json:"path"` +} + +// NewSaveCopyReferenceArg returns a new SaveCopyReferenceArg instance +func NewSaveCopyReferenceArg(CopyReference string, Path string) *SaveCopyReferenceArg { + s := new(SaveCopyReferenceArg) + s.CopyReference = CopyReference + s.Path = Path + return s +} + +// SaveCopyReferenceError : has no documentation (yet) +type SaveCopyReferenceError struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path *WriteError `json:"path,omitempty"` +} + +// Valid tag values for SaveCopyReferenceError +const ( + SaveCopyReferenceErrorPath = "path" + SaveCopyReferenceErrorInvalidCopyReference = "invalid_copy_reference" + SaveCopyReferenceErrorNoPermission = "no_permission" + SaveCopyReferenceErrorNotFound = "not_found" + SaveCopyReferenceErrorTooManyFiles = "too_many_files" + SaveCopyReferenceErrorOther = "other" +) + +// UnmarshalJSON deserializes into a SaveCopyReferenceError instance +func (u *SaveCopyReferenceError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path json.RawMessage `json:"path,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "path": + if err := json.Unmarshal(w.Path, &u.Path); err != nil { + return err + } + + } + return nil +} + +// SaveCopyReferenceResult : has no documentation (yet) +type SaveCopyReferenceResult struct { + // Metadata : The metadata of the saved file or folder in the user's + // Dropbox. + Metadata IsMetadata `json:"metadata"` +} + +// NewSaveCopyReferenceResult returns a new SaveCopyReferenceResult instance +func NewSaveCopyReferenceResult(Metadata IsMetadata) *SaveCopyReferenceResult { + s := new(SaveCopyReferenceResult) + s.Metadata = Metadata + return s +} + +// SaveUrlArg : has no documentation (yet) +type SaveUrlArg struct { + // Path : The path in Dropbox where the URL will be saved to. + Path string `json:"path"` + // Url : The URL to be saved. + Url string `json:"url"` +} + +// NewSaveUrlArg returns a new SaveUrlArg instance +func NewSaveUrlArg(Path string, Url string) *SaveUrlArg { + s := new(SaveUrlArg) + s.Path = Path + s.Url = Url + return s +} + +// SaveUrlError : has no documentation (yet) +type SaveUrlError struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path *WriteError `json:"path,omitempty"` +} + +// Valid tag values for SaveUrlError +const ( + SaveUrlErrorPath = "path" + SaveUrlErrorDownloadFailed = "download_failed" + SaveUrlErrorInvalidUrl = "invalid_url" + SaveUrlErrorNotFound = "not_found" + SaveUrlErrorOther = "other" +) + +// UnmarshalJSON deserializes into a SaveUrlError instance +func (u *SaveUrlError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path json.RawMessage `json:"path,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "path": + if err := json.Unmarshal(w.Path, &u.Path); err != nil { + return err + } + + } + return nil +} + +// SaveUrlJobStatus : has no documentation (yet) +type SaveUrlJobStatus struct { + dropbox.Tagged + // Complete : Metadata of the file where the URL is saved to. + Complete *FileMetadata `json:"complete,omitempty"` + // Failed : has no documentation (yet) + Failed *SaveUrlError `json:"failed,omitempty"` +} + +// Valid tag values for SaveUrlJobStatus +const ( + SaveUrlJobStatusComplete = "complete" + SaveUrlJobStatusFailed = "failed" +) + +// UnmarshalJSON deserializes into a SaveUrlJobStatus instance +func (u *SaveUrlJobStatus) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Complete : Metadata of the file where the URL is saved to. + Complete json.RawMessage `json:"complete,omitempty"` + // Failed : has no documentation (yet) + Failed json.RawMessage `json:"failed,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "complete": + if err := json.Unmarshal(body, &u.Complete); err != nil { + return err + } + + case "failed": + if err := json.Unmarshal(w.Failed, &u.Failed); err != nil { + return err + } + + } + return nil +} + +// SaveUrlResult : has no documentation (yet) +type SaveUrlResult struct { + dropbox.Tagged + // Complete : Metadata of the file where the URL is saved to. + Complete *FileMetadata `json:"complete,omitempty"` +} + +// Valid tag values for SaveUrlResult +const ( + SaveUrlResultComplete = "complete" +) + +// UnmarshalJSON deserializes into a SaveUrlResult instance +func (u *SaveUrlResult) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Complete : Metadata of the file where the URL is saved to. + Complete json.RawMessage `json:"complete,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "complete": + if err := json.Unmarshal(body, &u.Complete); err != nil { + return err + } + + } + return nil +} + +// SearchArg : has no documentation (yet) +type SearchArg struct { + // Path : The path in the user's Dropbox to search. Should probably be a + // folder. + Path string `json:"path"` + // Query : The string to search for. The search string is split on spaces + // into multiple tokens. For file name searching, the last token is used for + // prefix matching (i.e. "bat c" matches "bat cave" but not "batman car"). + Query string `json:"query"` + // Start : The starting index within the search results (used for paging). + Start uint64 `json:"start"` + // MaxResults : The maximum number of search results to return. + MaxResults uint64 `json:"max_results"` + // Mode : The search mode (filename, filename_and_content, or + // deleted_filename). Note that searching file content is only available for + // Dropbox Business accounts. + Mode *SearchMode `json:"mode"` +} + +// NewSearchArg returns a new SearchArg instance +func NewSearchArg(Path string, Query string) *SearchArg { + s := new(SearchArg) + s.Path = Path + s.Query = Query + s.Start = 0 + s.MaxResults = 100 + s.Mode = &SearchMode{Tagged: dropbox.Tagged{"filename"}} + return s +} + +// SearchError : has no documentation (yet) +type SearchError struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path *LookupError `json:"path,omitempty"` +} + +// Valid tag values for SearchError +const ( + SearchErrorPath = "path" + SearchErrorOther = "other" +) + +// UnmarshalJSON deserializes into a SearchError instance +func (u *SearchError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path json.RawMessage `json:"path,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "path": + if err := json.Unmarshal(w.Path, &u.Path); err != nil { + return err + } + + } + return nil +} + +// SearchMatch : has no documentation (yet) +type SearchMatch struct { + // MatchType : The type of the match. + MatchType *SearchMatchType `json:"match_type"` + // Metadata : The metadata for the matched file or folder. + Metadata IsMetadata `json:"metadata"` +} + +// NewSearchMatch returns a new SearchMatch instance +func NewSearchMatch(MatchType *SearchMatchType, Metadata IsMetadata) *SearchMatch { + s := new(SearchMatch) + s.MatchType = MatchType + s.Metadata = Metadata + return s +} + +// SearchMatchType : Indicates what type of match was found for a given item. +type SearchMatchType struct { + dropbox.Tagged +} + +// Valid tag values for SearchMatchType +const ( + SearchMatchTypeFilename = "filename" + SearchMatchTypeContent = "content" + SearchMatchTypeBoth = "both" +) + +// SearchMode : has no documentation (yet) +type SearchMode struct { + dropbox.Tagged +} + +// Valid tag values for SearchMode +const ( + SearchModeFilename = "filename" + SearchModeFilenameAndContent = "filename_and_content" + SearchModeDeletedFilename = "deleted_filename" +) + +// SearchResult : has no documentation (yet) +type SearchResult struct { + // Matches : A list (possibly empty) of matches for the query. + Matches []*SearchMatch `json:"matches"` + // More : Used for paging. If true, indicates there is another page of + // results available that can be fetched by calling `search` again. + More bool `json:"more"` + // Start : Used for paging. Value to set the start argument to when calling + // `search` to fetch the next page of results. + Start uint64 `json:"start"` +} + +// NewSearchResult returns a new SearchResult instance +func NewSearchResult(Matches []*SearchMatch, More bool, Start uint64) *SearchResult { + s := new(SearchResult) + s.Matches = Matches + s.More = More + s.Start = Start + return s +} + +// ThumbnailArg : has no documentation (yet) +type ThumbnailArg struct { + // Path : The path to the image file you want to thumbnail. + Path string `json:"path"` + // Format : The format for the thumbnail image, jpeg (default) or png. For + // images that are photos, jpeg should be preferred, while png is better + // for screenshots and digital arts. + Format *ThumbnailFormat `json:"format"` + // Size : The size for the thumbnail image. + Size *ThumbnailSize `json:"size"` +} + +// NewThumbnailArg returns a new ThumbnailArg instance +func NewThumbnailArg(Path string) *ThumbnailArg { + s := new(ThumbnailArg) + s.Path = Path + s.Format = &ThumbnailFormat{Tagged: dropbox.Tagged{"jpeg"}} + s.Size = &ThumbnailSize{Tagged: dropbox.Tagged{"w64h64"}} + return s +} + +// ThumbnailError : has no documentation (yet) +type ThumbnailError struct { + dropbox.Tagged + // Path : An error occurs when downloading metadata for the image. + Path *LookupError `json:"path,omitempty"` +} + +// Valid tag values for ThumbnailError +const ( + ThumbnailErrorPath = "path" + ThumbnailErrorUnsupportedExtension = "unsupported_extension" + ThumbnailErrorUnsupportedImage = "unsupported_image" + ThumbnailErrorConversionError = "conversion_error" +) + +// UnmarshalJSON deserializes into a ThumbnailError instance +func (u *ThumbnailError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Path : An error occurs when downloading metadata for the image. + Path json.RawMessage `json:"path,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "path": + if err := json.Unmarshal(w.Path, &u.Path); err != nil { + return err + } + + } + return nil +} + +// ThumbnailFormat : has no documentation (yet) +type ThumbnailFormat struct { + dropbox.Tagged +} + +// Valid tag values for ThumbnailFormat +const ( + ThumbnailFormatJpeg = "jpeg" + ThumbnailFormatPng = "png" +) + +// ThumbnailSize : has no documentation (yet) +type ThumbnailSize struct { + dropbox.Tagged +} + +// Valid tag values for ThumbnailSize +const ( + ThumbnailSizeW32h32 = "w32h32" + ThumbnailSizeW64h64 = "w64h64" + ThumbnailSizeW128h128 = "w128h128" + ThumbnailSizeW640h480 = "w640h480" + ThumbnailSizeW1024h768 = "w1024h768" +) + +// UpdatePropertiesError : has no documentation (yet) +type UpdatePropertiesError struct { + dropbox.Tagged + // PropertyGroupLookup : has no documentation (yet) + PropertyGroupLookup *LookUpPropertiesError `json:"property_group_lookup,omitempty"` +} + +// Valid tag values for UpdatePropertiesError +const ( + UpdatePropertiesErrorPropertyGroupLookup = "property_group_lookup" +) + +// UnmarshalJSON deserializes into a UpdatePropertiesError instance +func (u *UpdatePropertiesError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // PropertyGroupLookup : has no documentation (yet) + PropertyGroupLookup json.RawMessage `json:"property_group_lookup,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "property_group_lookup": + if err := json.Unmarshal(w.PropertyGroupLookup, &u.PropertyGroupLookup); err != nil { + return err + } + + } + return nil +} + +// UpdatePropertyGroupArg : has no documentation (yet) +type UpdatePropertyGroupArg struct { + // Path : A unique identifier for the file. + Path string `json:"path"` + // UpdatePropertyGroups : Filled custom property templates associated with a + // file. + UpdatePropertyGroups []*PropertyGroupUpdate `json:"update_property_groups"` +} + +// NewUpdatePropertyGroupArg returns a new UpdatePropertyGroupArg instance +func NewUpdatePropertyGroupArg(Path string, UpdatePropertyGroups []*PropertyGroupUpdate) *UpdatePropertyGroupArg { + s := new(UpdatePropertyGroupArg) + s.Path = Path + s.UpdatePropertyGroups = UpdatePropertyGroups + return s +} + +// UploadError : has no documentation (yet) +type UploadError struct { + dropbox.Tagged + // Path : Unable to save the uploaded contents to a file. + Path *UploadWriteFailed `json:"path,omitempty"` +} + +// Valid tag values for UploadError +const ( + UploadErrorPath = "path" + UploadErrorOther = "other" +) + +// UnmarshalJSON deserializes into a UploadError instance +func (u *UploadError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Path : Unable to save the uploaded contents to a file. + Path json.RawMessage `json:"path,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "path": + if err := json.Unmarshal(body, &u.Path); err != nil { + return err + } + + } + return nil +} + +// UploadErrorWithProperties : has no documentation (yet) +type UploadErrorWithProperties struct { + dropbox.Tagged + // PropertiesError : has no documentation (yet) + PropertiesError *InvalidPropertyGroupError `json:"properties_error,omitempty"` +} + +// Valid tag values for UploadErrorWithProperties +const ( + UploadErrorWithPropertiesPropertiesError = "properties_error" +) + +// UnmarshalJSON deserializes into a UploadErrorWithProperties instance +func (u *UploadErrorWithProperties) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // PropertiesError : has no documentation (yet) + PropertiesError json.RawMessage `json:"properties_error,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "properties_error": + if err := json.Unmarshal(w.PropertiesError, &u.PropertiesError); err != nil { + return err + } + + } + return nil +} + +// UploadSessionAppendArg : has no documentation (yet) +type UploadSessionAppendArg struct { + // Cursor : Contains the upload session ID and the offset. + Cursor *UploadSessionCursor `json:"cursor"` + // Close : If true, the current session will be closed, at which point you + // won't be able to call `uploadSessionAppendV2` anymore with the current + // session. + Close bool `json:"close"` +} + +// NewUploadSessionAppendArg returns a new UploadSessionAppendArg instance +func NewUploadSessionAppendArg(Cursor *UploadSessionCursor) *UploadSessionAppendArg { + s := new(UploadSessionAppendArg) + s.Cursor = Cursor + s.Close = false + return s +} + +// UploadSessionCursor : has no documentation (yet) +type UploadSessionCursor struct { + // SessionId : The upload session ID (returned by `uploadSessionStart`). + SessionId string `json:"session_id"` + // Offset : The amount of data that has been uploaded so far. We use this to + // make sure upload data isn't lost or duplicated in the event of a network + // error. + Offset uint64 `json:"offset"` +} + +// NewUploadSessionCursor returns a new UploadSessionCursor instance +func NewUploadSessionCursor(SessionId string, Offset uint64) *UploadSessionCursor { + s := new(UploadSessionCursor) + s.SessionId = SessionId + s.Offset = Offset + return s +} + +// UploadSessionFinishArg : has no documentation (yet) +type UploadSessionFinishArg struct { + // Cursor : Contains the upload session ID and the offset. + Cursor *UploadSessionCursor `json:"cursor"` + // Commit : Contains the path and other optional modifiers for the commit. + Commit *CommitInfo `json:"commit"` +} + +// NewUploadSessionFinishArg returns a new UploadSessionFinishArg instance +func NewUploadSessionFinishArg(Cursor *UploadSessionCursor, Commit *CommitInfo) *UploadSessionFinishArg { + s := new(UploadSessionFinishArg) + s.Cursor = Cursor + s.Commit = Commit + return s +} + +// UploadSessionFinishBatchArg : has no documentation (yet) +type UploadSessionFinishBatchArg struct { + // Entries : Commit information for each file in the batch. + Entries []*UploadSessionFinishArg `json:"entries"` +} + +// NewUploadSessionFinishBatchArg returns a new UploadSessionFinishBatchArg instance +func NewUploadSessionFinishBatchArg(Entries []*UploadSessionFinishArg) *UploadSessionFinishBatchArg { + s := new(UploadSessionFinishBatchArg) + s.Entries = Entries + return s +} + +// UploadSessionFinishBatchJobStatus : has no documentation (yet) +type UploadSessionFinishBatchJobStatus struct { + dropbox.Tagged + // Complete : The `uploadSessionFinishBatch` has finished. + Complete *UploadSessionFinishBatchResult `json:"complete,omitempty"` +} + +// Valid tag values for UploadSessionFinishBatchJobStatus +const ( + UploadSessionFinishBatchJobStatusComplete = "complete" +) + +// UnmarshalJSON deserializes into a UploadSessionFinishBatchJobStatus instance +func (u *UploadSessionFinishBatchJobStatus) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Complete : The `uploadSessionFinishBatch` has finished. + Complete json.RawMessage `json:"complete,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "complete": + if err := json.Unmarshal(body, &u.Complete); err != nil { + return err + } + + } + return nil +} + +// UploadSessionFinishBatchResult : has no documentation (yet) +type UploadSessionFinishBatchResult struct { + // Entries : Commit result for each file in the batch. + Entries []*UploadSessionFinishBatchResultEntry `json:"entries"` +} + +// NewUploadSessionFinishBatchResult returns a new UploadSessionFinishBatchResult instance +func NewUploadSessionFinishBatchResult(Entries []*UploadSessionFinishBatchResultEntry) *UploadSessionFinishBatchResult { + s := new(UploadSessionFinishBatchResult) + s.Entries = Entries + return s +} + +// UploadSessionFinishBatchResultEntry : has no documentation (yet) +type UploadSessionFinishBatchResultEntry struct { + dropbox.Tagged + // Success : has no documentation (yet) + Success *FileMetadata `json:"success,omitempty"` + // Failure : has no documentation (yet) + Failure *UploadSessionFinishError `json:"failure,omitempty"` +} + +// Valid tag values for UploadSessionFinishBatchResultEntry +const ( + UploadSessionFinishBatchResultEntrySuccess = "success" + UploadSessionFinishBatchResultEntryFailure = "failure" +) + +// UnmarshalJSON deserializes into a UploadSessionFinishBatchResultEntry instance +func (u *UploadSessionFinishBatchResultEntry) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Success : has no documentation (yet) + Success json.RawMessage `json:"success,omitempty"` + // Failure : has no documentation (yet) + Failure json.RawMessage `json:"failure,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "success": + if err := json.Unmarshal(body, &u.Success); err != nil { + return err + } + + case "failure": + if err := json.Unmarshal(w.Failure, &u.Failure); err != nil { + return err + } + + } + return nil +} + +// UploadSessionFinishError : has no documentation (yet) +type UploadSessionFinishError struct { + dropbox.Tagged + // LookupFailed : The session arguments are incorrect; the value explains + // the reason. + LookupFailed *UploadSessionLookupError `json:"lookup_failed,omitempty"` + // Path : Unable to save the uploaded contents to a file. + Path *WriteError `json:"path,omitempty"` +} + +// Valid tag values for UploadSessionFinishError +const ( + UploadSessionFinishErrorLookupFailed = "lookup_failed" + UploadSessionFinishErrorPath = "path" + UploadSessionFinishErrorTooManySharedFolderTargets = "too_many_shared_folder_targets" + UploadSessionFinishErrorOther = "other" +) + +// UnmarshalJSON deserializes into a UploadSessionFinishError instance +func (u *UploadSessionFinishError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // LookupFailed : The session arguments are incorrect; the value + // explains the reason. + LookupFailed json.RawMessage `json:"lookup_failed,omitempty"` + // Path : Unable to save the uploaded contents to a file. + Path json.RawMessage `json:"path,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "lookup_failed": + if err := json.Unmarshal(w.LookupFailed, &u.LookupFailed); err != nil { + return err + } + + case "path": + if err := json.Unmarshal(w.Path, &u.Path); err != nil { + return err + } + + } + return nil +} + +// UploadSessionLookupError : has no documentation (yet) +type UploadSessionLookupError struct { + dropbox.Tagged + // IncorrectOffset : The specified offset was incorrect. See the value for + // the correct offset. This error may occur when a previous request was + // received and processed successfully but the client did not receive the + // response, e.g. due to a network error. + IncorrectOffset *UploadSessionOffsetError `json:"incorrect_offset,omitempty"` +} + +// Valid tag values for UploadSessionLookupError +const ( + UploadSessionLookupErrorNotFound = "not_found" + UploadSessionLookupErrorIncorrectOffset = "incorrect_offset" + UploadSessionLookupErrorClosed = "closed" + UploadSessionLookupErrorNotClosed = "not_closed" + UploadSessionLookupErrorOther = "other" +) + +// UnmarshalJSON deserializes into a UploadSessionLookupError instance +func (u *UploadSessionLookupError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // IncorrectOffset : The specified offset was incorrect. See the value + // for the correct offset. This error may occur when a previous request + // was received and processed successfully but the client did not + // receive the response, e.g. due to a network error. + IncorrectOffset json.RawMessage `json:"incorrect_offset,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "incorrect_offset": + if err := json.Unmarshal(body, &u.IncorrectOffset); err != nil { + return err + } + + } + return nil +} + +// UploadSessionOffsetError : has no documentation (yet) +type UploadSessionOffsetError struct { + // CorrectOffset : The offset up to which data has been collected. + CorrectOffset uint64 `json:"correct_offset"` +} + +// NewUploadSessionOffsetError returns a new UploadSessionOffsetError instance +func NewUploadSessionOffsetError(CorrectOffset uint64) *UploadSessionOffsetError { + s := new(UploadSessionOffsetError) + s.CorrectOffset = CorrectOffset + return s +} + +// UploadSessionStartArg : has no documentation (yet) +type UploadSessionStartArg struct { + // Close : If true, the current session will be closed, at which point you + // won't be able to call `uploadSessionAppendV2` anymore with the current + // session. + Close bool `json:"close"` +} + +// NewUploadSessionStartArg returns a new UploadSessionStartArg instance +func NewUploadSessionStartArg() *UploadSessionStartArg { + s := new(UploadSessionStartArg) + s.Close = false + return s +} + +// UploadSessionStartResult : has no documentation (yet) +type UploadSessionStartResult struct { + // SessionId : A unique identifier for the upload session. Pass this to + // `uploadSessionAppendV2` and `uploadSessionFinish`. + SessionId string `json:"session_id"` +} + +// NewUploadSessionStartResult returns a new UploadSessionStartResult instance +func NewUploadSessionStartResult(SessionId string) *UploadSessionStartResult { + s := new(UploadSessionStartResult) + s.SessionId = SessionId + return s +} + +// UploadWriteFailed : has no documentation (yet) +type UploadWriteFailed struct { + // Reason : The reason why the file couldn't be saved. + Reason *WriteError `json:"reason"` + // UploadSessionId : The upload session ID; this may be used to retry the + // commit. + UploadSessionId string `json:"upload_session_id"` +} + +// NewUploadWriteFailed returns a new UploadWriteFailed instance +func NewUploadWriteFailed(Reason *WriteError, UploadSessionId string) *UploadWriteFailed { + s := new(UploadWriteFailed) + s.Reason = Reason + s.UploadSessionId = UploadSessionId + return s +} + +// VideoMetadata : Metadata for a video. +type VideoMetadata struct { + MediaMetadata + // Duration : The duration of the video in milliseconds. + Duration uint64 `json:"duration,omitempty"` +} + +// NewVideoMetadata returns a new VideoMetadata instance +func NewVideoMetadata() *VideoMetadata { + s := new(VideoMetadata) + return s +} + +// WriteConflictError : has no documentation (yet) +type WriteConflictError struct { + dropbox.Tagged +} + +// Valid tag values for WriteConflictError +const ( + WriteConflictErrorFile = "file" + WriteConflictErrorFolder = "folder" + WriteConflictErrorFileAncestor = "file_ancestor" + WriteConflictErrorOther = "other" +) + +// WriteError : has no documentation (yet) +type WriteError struct { + dropbox.Tagged + // MalformedPath : has no documentation (yet) + MalformedPath string `json:"malformed_path,omitempty"` + // Conflict : Couldn't write to the target path because there was something + // in the way. + Conflict *WriteConflictError `json:"conflict,omitempty"` +} + +// Valid tag values for WriteError +const ( + WriteErrorMalformedPath = "malformed_path" + WriteErrorConflict = "conflict" + WriteErrorNoWritePermission = "no_write_permission" + WriteErrorInsufficientSpace = "insufficient_space" + WriteErrorDisallowedName = "disallowed_name" + WriteErrorOther = "other" +) + +// UnmarshalJSON deserializes into a WriteError instance +func (u *WriteError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // MalformedPath : has no documentation (yet) + MalformedPath json.RawMessage `json:"malformed_path,omitempty"` + // Conflict : Couldn't write to the target path because there was + // something in the way. + Conflict json.RawMessage `json:"conflict,omitempty"` + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "malformed_path": + if err := json.Unmarshal(body, &u.MalformedPath); err != nil { + return err + } + + case "conflict": + if err := json.Unmarshal(w.Conflict, &u.Conflict); err != nil { + return err + } + + } + return nil +} + +// WriteMode : Your intent when writing a file to some path. This is used to +// determine what constitutes a conflict and what the autorename strategy is. In +// some situations, the conflict behavior is identical: (a) If the target path +// doesn't contain anything, the file is always written; no conflict. (b) If the +// target path contains a folder, it's always a conflict. (c) If the target path +// contains a file with identical contents, nothing gets written; no conflict. +// The conflict checking differs in the case where there's a file at the target +// path with contents different from the contents you're trying to write. +type WriteMode struct { + dropbox.Tagged + // Update : Overwrite if the given "rev" matches the existing file's "rev". + // The autorename strategy is to append the string "conflicted copy" to the + // file name. For example, "document.txt" might become "document (conflicted + // copy).txt" or "document (Panda's conflicted copy).txt". + Update string `json:"update,omitempty"` +} + +// Valid tag values for WriteMode +const ( + WriteModeAdd = "add" + WriteModeOverwrite = "overwrite" + WriteModeUpdate = "update" +) + +// UnmarshalJSON deserializes into a WriteMode instance +func (u *WriteMode) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "update": + if err := json.Unmarshal(body, &u.Update); err != nil { + return err + } + + } + return nil +} diff --git a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/properties/types.go b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/properties/types.go new file mode 100644 index 0000000..926f5ca --- /dev/null +++ b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/properties/types.go @@ -0,0 +1,211 @@ +// Copyright (c) Dropbox, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package properties : This namespace contains helper entities for property and +// property/template endpoints. +package properties + +import ( + "encoding/json" + + "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox" +) + +// GetPropertyTemplateArg : has no documentation (yet) +type GetPropertyTemplateArg struct { + // TemplateId : An identifier for property template added by route + // properties/template/add. + TemplateId string `json:"template_id"` +} + +// NewGetPropertyTemplateArg returns a new GetPropertyTemplateArg instance +func NewGetPropertyTemplateArg(TemplateId string) *GetPropertyTemplateArg { + s := new(GetPropertyTemplateArg) + s.TemplateId = TemplateId + return s +} + +// PropertyGroupTemplate : Describes property templates that can be filled and +// associated with a file. +type PropertyGroupTemplate struct { + // Name : A display name for the property template. Property template names + // can be up to 256 bytes. + Name string `json:"name"` + // Description : Description for new property template. Property template + // descriptions can be up to 1024 bytes. + Description string `json:"description"` + // Fields : This is a list of custom properties associated with a property + // template. There can be up to 64 properties in a single property template. + Fields []*PropertyFieldTemplate `json:"fields"` +} + +// NewPropertyGroupTemplate returns a new PropertyGroupTemplate instance +func NewPropertyGroupTemplate(Name string, Description string, Fields []*PropertyFieldTemplate) *PropertyGroupTemplate { + s := new(PropertyGroupTemplate) + s.Name = Name + s.Description = Description + s.Fields = Fields + return s +} + +// GetPropertyTemplateResult : The Property template for the specified template. +type GetPropertyTemplateResult struct { + PropertyGroupTemplate +} + +// NewGetPropertyTemplateResult returns a new GetPropertyTemplateResult instance +func NewGetPropertyTemplateResult(Name string, Description string, Fields []*PropertyFieldTemplate) *GetPropertyTemplateResult { + s := new(GetPropertyTemplateResult) + s.Name = Name + s.Description = Description + s.Fields = Fields + return s +} + +// ListPropertyTemplateIds : has no documentation (yet) +type ListPropertyTemplateIds struct { + // TemplateIds : List of identifiers for templates added by route + // properties/template/add. + TemplateIds []string `json:"template_ids"` +} + +// NewListPropertyTemplateIds returns a new ListPropertyTemplateIds instance +func NewListPropertyTemplateIds(TemplateIds []string) *ListPropertyTemplateIds { + s := new(ListPropertyTemplateIds) + s.TemplateIds = TemplateIds + return s +} + +// PropertyTemplateError : has no documentation (yet) +type PropertyTemplateError struct { + dropbox.Tagged + // TemplateNotFound : Property template does not exist for given identifier. + TemplateNotFound string `json:"template_not_found,omitempty"` +} + +// Valid tag values for PropertyTemplateError +const ( + PropertyTemplateErrorTemplateNotFound = "template_not_found" + PropertyTemplateErrorRestrictedContent = "restricted_content" + PropertyTemplateErrorOther = "other" +) + +// UnmarshalJSON deserializes into a PropertyTemplateError instance +func (u *PropertyTemplateError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + } + var w wrap + if err := json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "template_not_found": + if err := json.Unmarshal(body, &u.TemplateNotFound); err != nil { + return err + } + + } + return nil +} + +// ModifyPropertyTemplateError : has no documentation (yet) +type ModifyPropertyTemplateError struct { + dropbox.Tagged +} + +// Valid tag values for ModifyPropertyTemplateError +const ( + ModifyPropertyTemplateErrorConflictingPropertyNames = "conflicting_property_names" + ModifyPropertyTemplateErrorTooManyProperties = "too_many_properties" + ModifyPropertyTemplateErrorTooManyTemplates = "too_many_templates" + ModifyPropertyTemplateErrorTemplateAttributeTooLarge = "template_attribute_too_large" +) + +// PropertyField : has no documentation (yet) +type PropertyField struct { + // Name : This is the name or key of a custom property in a property + // template. File property names can be up to 256 bytes. + Name string `json:"name"` + // Value : Value of a custom property attached to a file. Values can be up + // to 1024 bytes. + Value string `json:"value"` +} + +// NewPropertyField returns a new PropertyField instance +func NewPropertyField(Name string, Value string) *PropertyField { + s := new(PropertyField) + s.Name = Name + s.Value = Value + return s +} + +// PropertyFieldTemplate : Describe a single property field type which that can +// be part of a property template. +type PropertyFieldTemplate struct { + // Name : This is the name or key of a custom property in a property + // template. File property names can be up to 256 bytes. + Name string `json:"name"` + // Description : This is the description for a custom property in a property + // template. File property description can be up to 1024 bytes. + Description string `json:"description"` + // Type : This is the data type of the value of this property. This type + // will be enforced upon property creation and modifications. + Type *PropertyType `json:"type"` +} + +// NewPropertyFieldTemplate returns a new PropertyFieldTemplate instance +func NewPropertyFieldTemplate(Name string, Description string, Type *PropertyType) *PropertyFieldTemplate { + s := new(PropertyFieldTemplate) + s.Name = Name + s.Description = Description + s.Type = Type + return s +} + +// PropertyGroup : Collection of custom properties in filled property templates. +type PropertyGroup struct { + // TemplateId : A unique identifier for a property template type. + TemplateId string `json:"template_id"` + // Fields : This is a list of custom properties associated with a file. + // There can be up to 32 properties for a template. + Fields []*PropertyField `json:"fields"` +} + +// NewPropertyGroup returns a new PropertyGroup instance +func NewPropertyGroup(TemplateId string, Fields []*PropertyField) *PropertyGroup { + s := new(PropertyGroup) + s.TemplateId = TemplateId + s.Fields = Fields + return s +} + +// PropertyType : Data type of the given property added. This endpoint is in +// beta and only properties of type strings is supported. +type PropertyType struct { + dropbox.Tagged +} + +// Valid tag values for PropertyType +const ( + PropertyTypeString = "string" + PropertyTypeOther = "other" +) diff --git a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sdk.go b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sdk.go new file mode 100644 index 0000000..5813c03 --- /dev/null +++ b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sdk.go @@ -0,0 +1,118 @@ +// Copyright (c) Dropbox, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package dropbox + +import ( + "fmt" + "net/http" + + "golang.org/x/oauth2" +) + +const ( + apiVersion = 2 + defaultDomain = ".dropboxapi.com" + hostAPI = "api" + hostContent = "content" + hostNotify = "notify" + sdkVersion = "1.0.0-beta" + specVersion = "fee0a62" +) + +// Version returns the current SDK version and API Spec version +func Version() (string, string) { + return sdkVersion, specVersion +} + +// Config contains parameters for configuring the SDK. +type Config struct { + // OAuth2 access token + Token string + // Enable verbose logging in SDK + Verbose bool + // Used with APIs that support operations as another user + AsMemberID string + // No need to set -- for testing only + Domain string +} + +// Context is the base client context used to implement per-namespace clients. +type Context struct { + Client *http.Client + Config Config + hostMap map[string]string +} + +// GenerateURL returns the appropriate URL for given namespace/route. +func (c *Context) GenerateURL(host string, namespace string, route string) string { + fqHost := c.hostMap[host] + return fmt.Sprintf("https://%s/%d/%s/%s", fqHost, apiVersion, namespace, route) +} + +// NewContext returns a new Context with the given Config. +func NewContext(c Config) Context { + domain := c.Domain + if domain == "" { + domain = defaultDomain + } + + hostMap := map[string]string{ + hostAPI: hostAPI + domain, + hostContent: hostContent + domain, + hostNotify: hostNotify + domain, + } + var conf = &oauth2.Config{Endpoint: OAuthEndpoint(domain)} + tok := &oauth2.Token{AccessToken: c.Token} + return Context{conf.Client(oauth2.NoContext, tok), c, hostMap} +} + +// OAuthEndpoint constructs an `oauth2.Endpoint` for the given domain +func OAuthEndpoint(domain string) oauth2.Endpoint { + if domain == "" { + domain = defaultDomain + } + authURL := fmt.Sprintf("https://meta%s/1/oauth2/authorize", domain) + tokenURL := fmt.Sprintf("https://api%s/1/oauth2/token", domain) + if domain == defaultDomain { + authURL = "https://www.dropbox.com/1/oauth2/authorize" + } + return oauth2.Endpoint{AuthURL: authURL, TokenURL: tokenURL} +} + +// Tagged is used for tagged unions. +type Tagged struct { + Tag string `json:".tag"` +} + +// APIError is the base type for endpoint-specific errors. +type APIError struct { + ErrorSummary string `json:"error_summary"` +} + +func (e APIError) Error() string { + return e.ErrorSummary +} + +func init() { + // These are not registered in the oauth library by default + oauth2.RegisterBrokenAuthHeaderProvider("https://api.dropboxapi.com") + oauth2.RegisterBrokenAuthHeaderProvider("https://api-dbdev.dev.corp.dropbox.com") +} diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml new file mode 100644 index 0000000..0a7c136 --- /dev/null +++ b/vendor/github.com/spf13/pflag/.travis.yml @@ -0,0 +1,21 @@ +sudo: false + +language: go + +go: + - 1.5.4 + - 1.6.3 + - 1.7 + - tip + +matrix: + allow_failures: + - go: tip +install: + - go get github.com/golang/lint/golint + - export PATH=$GOPATH/bin:$PATH + - go install ./... + +script: + - verify/all.sh -v + - go test ./... diff --git a/vendor/github.com/spf13/pflag/LICENSE b/vendor/github.com/spf13/pflag/LICENSE new file mode 100644 index 0000000..63ed1cf --- /dev/null +++ b/vendor/github.com/spf13/pflag/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 Alex Ogier. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md new file mode 100644 index 0000000..08ad945 --- /dev/null +++ b/vendor/github.com/spf13/pflag/README.md @@ -0,0 +1,275 @@ +[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag) + +## Description + +pflag is a drop-in replacement for Go's flag package, implementing +POSIX/GNU-style --flags. + +pflag is compatible with the [GNU extensions to the POSIX recommendations +for command-line options][1]. For a more precise description, see the +"Command-line flag syntax" section below. + +[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html + +pflag is available under the same style of BSD license as the Go language, +which can be found in the LICENSE file. + +## Installation + +pflag is available using the standard `go get` command. + +Install by running: + + go get github.com/spf13/pflag + +Run tests by running: + + go test github.com/spf13/pflag + +## Usage + +pflag is a drop-in replacement of Go's native flag package. If you import +pflag under the name "flag" then all code should continue to function +with no changes. + +``` go +import flag "github.com/spf13/pflag" +``` + +There is one exception to this: if you directly instantiate the Flag struct +there is one more field "Shorthand" that you will need to set. +Most code never instantiates this struct directly, and instead uses +functions such as String(), BoolVar(), and Var(), and is therefore +unaffected. + +Define flags using flag.String(), Bool(), Int(), etc. + +This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + +``` go +var ip *int = flag.Int("flagname", 1234, "help message for flagname") +``` + +If you like, you can bind the flag to a variable using the Var() functions. + +``` go +var flagvar int +func init() { + flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") +} +``` + +Or you can create custom flags that satisfy the Value interface (with +pointer receivers) and couple them to flag parsing by + +``` go +flag.Var(&flagVal, "name", "help message for flagname") +``` + +For such flags, the default value is just the initial value of the variable. + +After all flags are defined, call + +``` go +flag.Parse() +``` + +to parse the command line into the defined flags. + +Flags may then be used directly. If you're using the flags themselves, +they are all pointers; if you bind to variables, they're values. + +``` go +fmt.Println("ip has value ", *ip) +fmt.Println("flagvar has value ", flagvar) +``` + +There are helpers function to get values later if you have the FlagSet but +it was difficult to keep up with all of the flag pointers in your code. +If you have a pflag.FlagSet with a flag called 'flagname' of type int you +can use GetInt() to get the int value. But notice that 'flagname' must exist +and it must be an int. GetString("flagname") will fail. + +``` go +i, err := flagset.GetInt("flagname") +``` + +After parsing, the arguments after the flag are available as the +slice flag.Args() or individually as flag.Arg(i). +The arguments are indexed from 0 through flag.NArg()-1. + +The pflag package also defines some new functions that are not in flag, +that give one-letter shorthands for flags. You can use these by appending +'P' to the name of any function that defines a flag. + +``` go +var ip = flag.IntP("flagname", "f", 1234, "help message") +var flagvar bool +func init() { + flag.BoolVarP("boolname", "b", true, "help message") +} +flag.VarP(&flagVar, "varname", "v", 1234, "help message") +``` + +Shorthand letters can be used with single dashes on the command line. +Boolean shorthand flags can be combined with other shorthand flags. + +The default set of command-line flags is controlled by +top-level functions. The FlagSet type allows one to define +independent sets of flags, such as to implement subcommands +in a command-line interface. The methods of FlagSet are +analogous to the top-level functions for the command-line +flag set. + +## Setting no option default values for flags + +After you create a flag it is possible to set the pflag.NoOptDefVal for +the given flag. Doing this changes the meaning of the flag slightly. If +a flag has a NoOptDefVal and the flag is set on the command line without +an option the flag will be set to the NoOptDefVal. For example given: + +``` go +var ip = flag.IntP("flagname", "f", 1234, "help message") +flag.Lookup("flagname").NoOptDefVal = "4321" +``` + +Would result in something like + +| Parsed Arguments | Resulting Value | +| ------------- | ------------- | +| --flagname=1357 | ip=1357 | +| --flagname | ip=4321 | +| [nothing] | ip=1234 | + +## Command line flag syntax + +``` +--flag // boolean flags, or flags with no option default values +--flag x // only on flags without a default value +--flag=x +``` + +Unlike the flag package, a single dash before an option means something +different than a double dash. Single dashes signify a series of shorthand +letters for flags. All but the last shorthand letter must be boolean flags +or a flag with a default value + +``` +// boolean or flags where the 'no option default value' is set +-f +-f=true +-abc +but +-b true is INVALID + +// non-boolean and flags without a 'no option default value' +-n 1234 +-n=1234 +-n1234 + +// mixed +-abcs "hello" +-absd="hello" +-abcs1234 +``` + +Flag parsing stops after the terminator "--". Unlike the flag package, +flags can be interspersed with arguments anywhere on the command line +before this terminator. + +Integer flags accept 1234, 0664, 0x1234 and may be negative. +Boolean flags (in their long form) accept 1, 0, t, f, true, false, +TRUE, FALSE, True, False. +Duration flags accept any input valid for time.ParseDuration. + +## Mutating or "Normalizing" Flag names + +It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow. + +**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag + +``` go +func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { + from := []string{"-", "_"} + to := "." + for _, sep := range from { + name = strings.Replace(name, sep, to, -1) + } + return pflag.NormalizedName(name) +} + +myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc) +``` + +**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name + +``` go +func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { + switch name { + case "old-flag-name": + name = "new-flag-name" + break + } + return pflag.NormalizedName(name) +} + +myFlagSet.SetNormalizeFunc(aliasNormalizeFunc) +``` + +## Deprecating a flag or its shorthand +It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used. + +**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead. +```go +// deprecate a flag by specifying its name and a usage message +flags.MarkDeprecated("badflag", "please use --good-flag instead") +``` +This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used. + +**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n". +```go +// deprecate a flag shorthand by specifying its flag name and a usage message +flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only") +``` +This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used. + +Note that usage message is essential here, and it should not be empty. + +## Hidden flags +It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text. + +**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available. +```go +// hide a flag by specifying its name +flags.MarkHidden("secretFlag") +``` + +## Supporting Go flags when using pflag +In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary +to support flags defined by third-party dependencies (e.g. `golang/glog`). + +**Example**: You want to add the Go flags to the `CommandLine` flagset +```go +import ( + goflag "flag" + flag "github.com/spf13/pflag" +) + +var ip *int = flag.Int("flagname", 1234, "help message for flagname") + +func main() { + flag.CommandLine.AddGoFlagSet(goflag.CommandLine) + flag.Parse() +} +``` + +## More info + +You can see the full reference documentation of the pflag package +[at godoc.org][3], or through go's standard documentation system by +running `godoc -http=:6060` and browsing to +[http://localhost:6060/pkg/github.com/ogier/pflag][2] after +installation. + +[2]: http://localhost:6060/pkg/github.com/ogier/pflag +[3]: http://godoc.org/github.com/ogier/pflag diff --git a/vendor/github.com/spf13/pflag/bool.go b/vendor/github.com/spf13/pflag/bool.go new file mode 100644 index 0000000..c4c5c0b --- /dev/null +++ b/vendor/github.com/spf13/pflag/bool.go @@ -0,0 +1,94 @@ +package pflag + +import "strconv" + +// optional interface to indicate boolean flags that can be +// supplied without "=value" text +type boolFlag interface { + Value + IsBoolFlag() bool +} + +// -- bool Value +type boolValue bool + +func newBoolValue(val bool, p *bool) *boolValue { + *p = val + return (*boolValue)(p) +} + +func (b *boolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + *b = boolValue(v) + return err +} + +func (b *boolValue) Type() string { + return "bool" +} + +func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) } + +func (b *boolValue) IsBoolFlag() bool { return true } + +func boolConv(sval string) (interface{}, error) { + return strconv.ParseBool(sval) +} + +// GetBool return the bool value of a flag with the given name +func (f *FlagSet) GetBool(name string) (bool, error) { + val, err := f.getFlagType(name, "bool", boolConv) + if err != nil { + return false, err + } + return val.(bool), nil +} + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) { + f.BoolVarP(p, name, "", value, usage) +} + +// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) { + flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage) + flag.NoOptDefVal = "true" +} + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func BoolVar(p *bool, name string, value bool, usage string) { + BoolVarP(p, name, "", value, usage) +} + +// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. +func BoolVarP(p *bool, name, shorthand string, value bool, usage string) { + flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage) + flag.NoOptDefVal = "true" +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func (f *FlagSet) Bool(name string, value bool, usage string) *bool { + return f.BoolP(name, "", value, usage) +} + +// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool { + p := new(bool) + f.BoolVarP(p, name, shorthand, value, usage) + return p +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func Bool(name string, value bool, usage string) *bool { + return BoolP(name, "", value, usage) +} + +// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. +func BoolP(name, shorthand string, value bool, usage string) *bool { + b := CommandLine.BoolP(name, shorthand, value, usage) + return b +} diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go new file mode 100644 index 0000000..d22be41 --- /dev/null +++ b/vendor/github.com/spf13/pflag/count.go @@ -0,0 +1,94 @@ +package pflag + +import "strconv" + +// -- count Value +type countValue int + +func newCountValue(val int, p *int) *countValue { + *p = val + return (*countValue)(p) +} + +func (i *countValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + // -1 means that no specific value was passed, so increment + if v == -1 { + *i = countValue(*i + 1) + } else { + *i = countValue(v) + } + return err +} + +func (i *countValue) Type() string { + return "count" +} + +func (i *countValue) String() string { return strconv.Itoa(int(*i)) } + +func countConv(sval string) (interface{}, error) { + i, err := strconv.Atoi(sval) + if err != nil { + return nil, err + } + return i, nil +} + +// GetCount return the int value of a flag with the given name +func (f *FlagSet) GetCount(name string) (int, error) { + val, err := f.getFlagType(name, "count", countConv) + if err != nil { + return 0, err + } + return val.(int), nil +} + +// CountVar defines a count flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +// A count flag will add 1 to its value evey time it is found on the command line +func (f *FlagSet) CountVar(p *int, name string, usage string) { + f.CountVarP(p, name, "", usage) +} + +// CountVarP is like CountVar only take a shorthand for the flag name. +func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) { + flag := f.VarPF(newCountValue(0, p), name, shorthand, usage) + flag.NoOptDefVal = "-1" +} + +// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set +func CountVar(p *int, name string, usage string) { + CommandLine.CountVar(p, name, usage) +} + +// CountVarP is like CountVar only take a shorthand for the flag name. +func CountVarP(p *int, name, shorthand string, usage string) { + CommandLine.CountVarP(p, name, shorthand, usage) +} + +// Count defines a count flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +// A count flag will add 1 to its value evey time it is found on the command line +func (f *FlagSet) Count(name string, usage string) *int { + p := new(int) + f.CountVarP(p, name, "", usage) + return p +} + +// CountP is like Count only takes a shorthand for the flag name. +func (f *FlagSet) CountP(name, shorthand string, usage string) *int { + p := new(int) + f.CountVarP(p, name, shorthand, usage) + return p +} + +// Count like Count only the flag is placed on the CommandLine isntead of a given flag set +func Count(name string, usage string) *int { + return CommandLine.CountP(name, "", usage) +} + +// CountP is like Count only takes a shorthand for the flag name. +func CountP(name, shorthand string, usage string) *int { + return CommandLine.CountP(name, shorthand, usage) +} diff --git a/vendor/github.com/spf13/pflag/duration.go b/vendor/github.com/spf13/pflag/duration.go new file mode 100644 index 0000000..e9debef --- /dev/null +++ b/vendor/github.com/spf13/pflag/duration.go @@ -0,0 +1,86 @@ +package pflag + +import ( + "time" +) + +// -- time.Duration Value +type durationValue time.Duration + +func newDurationValue(val time.Duration, p *time.Duration) *durationValue { + *p = val + return (*durationValue)(p) +} + +func (d *durationValue) Set(s string) error { + v, err := time.ParseDuration(s) + *d = durationValue(v) + return err +} + +func (d *durationValue) Type() string { + return "duration" +} + +func (d *durationValue) String() string { return (*time.Duration)(d).String() } + +func durationConv(sval string) (interface{}, error) { + return time.ParseDuration(sval) +} + +// GetDuration return the duration value of a flag with the given name +func (f *FlagSet) GetDuration(name string) (time.Duration, error) { + val, err := f.getFlagType(name, "duration", durationConv) + if err != nil { + return 0, err + } + return val.(time.Duration), nil +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + f.VarP(newDurationValue(value, p), name, "", usage) +} + +// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { + f.VarP(newDurationValue(value, p), name, shorthand, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + CommandLine.VarP(newDurationValue(value, p), name, "", usage) +} + +// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. +func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { + CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage) +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + f.DurationVarP(p, name, "", value, usage) + return p +} + +// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + f.DurationVarP(p, name, shorthand, value, usage) + return p +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func Duration(name string, value time.Duration, usage string) *time.Duration { + return CommandLine.DurationP(name, "", value, usage) +} + +// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. +func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { + return CommandLine.DurationP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go new file mode 100644 index 0000000..b0b0d46 --- /dev/null +++ b/vendor/github.com/spf13/pflag/flag.go @@ -0,0 +1,947 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pflag is a drop-in replacement for Go's flag package, implementing +POSIX/GNU-style --flags. + +pflag is compatible with the GNU extensions to the POSIX recommendations +for command-line options. See +http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html + +Usage: + +pflag is a drop-in replacement of Go's native flag package. If you import +pflag under the name "flag" then all code should continue to function +with no changes. + + import flag "github.com/ogier/pflag" + + There is one exception to this: if you directly instantiate the Flag struct +there is one more field "Shorthand" that you will need to set. +Most code never instantiates this struct directly, and instead uses +functions such as String(), BoolVar(), and Var(), and is therefore +unaffected. + +Define flags using flag.String(), Bool(), Int(), etc. + +This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + var ip = flag.Int("flagname", 1234, "help message for flagname") +If you like, you can bind the flag to a variable using the Var() functions. + var flagvar int + func init() { + flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") + } +Or you can create custom flags that satisfy the Value interface (with +pointer receivers) and couple them to flag parsing by + flag.Var(&flagVal, "name", "help message for flagname") +For such flags, the default value is just the initial value of the variable. + +After all flags are defined, call + flag.Parse() +to parse the command line into the defined flags. + +Flags may then be used directly. If you're using the flags themselves, +they are all pointers; if you bind to variables, they're values. + fmt.Println("ip has value ", *ip) + fmt.Println("flagvar has value ", flagvar) + +After parsing, the arguments after the flag are available as the +slice flag.Args() or individually as flag.Arg(i). +The arguments are indexed from 0 through flag.NArg()-1. + +The pflag package also defines some new functions that are not in flag, +that give one-letter shorthands for flags. You can use these by appending +'P' to the name of any function that defines a flag. + var ip = flag.IntP("flagname", "f", 1234, "help message") + var flagvar bool + func init() { + flag.BoolVarP("boolname", "b", true, "help message") + } + flag.VarP(&flagVar, "varname", "v", 1234, "help message") +Shorthand letters can be used with single dashes on the command line. +Boolean shorthand flags can be combined with other shorthand flags. + +Command line flag syntax: + --flag // boolean flags only + --flag=x + +Unlike the flag package, a single dash before an option means something +different than a double dash. Single dashes signify a series of shorthand +letters for flags. All but the last shorthand letter must be boolean flags. + // boolean flags + -f + -abc + // non-boolean flags + -n 1234 + -Ifile + // mixed + -abcs "hello" + -abcn1234 + +Flag parsing stops after the terminator "--". Unlike the flag package, +flags can be interspersed with arguments anywhere on the command line +before this terminator. + +Integer flags accept 1234, 0664, 0x1234 and may be negative. +Boolean flags (in their long form) accept 1, 0, t, f, true, false, +TRUE, FALSE, True, False. +Duration flags accept any input valid for time.ParseDuration. + +The default set of command-line flags is controlled by +top-level functions. The FlagSet type allows one to define +independent sets of flags, such as to implement subcommands +in a command-line interface. The methods of FlagSet are +analogous to the top-level functions for the command-line +flag set. +*/ +package pflag + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "sort" + "strings" +) + +// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. +var ErrHelp = errors.New("pflag: help requested") + +// ErrorHandling defines how to handle flag parsing errors. +type ErrorHandling int + +const ( + // ContinueOnError will return an err from Parse() if an error is found + ContinueOnError ErrorHandling = iota + // ExitOnError will call os.Exit(2) if an error is found when parsing + ExitOnError + // PanicOnError will panic() if an error is found when parsing flags + PanicOnError +) + +// NormalizedName is a flag name that has been normalized according to rules +// for the FlagSet (e.g. making '-' and '_' equivalent). +type NormalizedName string + +// A FlagSet represents a set of defined flags. +type FlagSet struct { + // Usage is the function called when an error occurs while parsing flags. + // The field is a function (not a method) that may be changed to point to + // a custom error handler. + Usage func() + + name string + parsed bool + actual map[NormalizedName]*Flag + formal map[NormalizedName]*Flag + shorthands map[byte]*Flag + args []string // arguments after flags + argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no -- + exitOnError bool // does the program exit if there's an error? + errorHandling ErrorHandling + output io.Writer // nil means stderr; use out() accessor + interspersed bool // allow interspersed option/non-option args + normalizeNameFunc func(f *FlagSet, name string) NormalizedName +} + +// A Flag represents the state of a flag. +type Flag struct { + Name string // name as it appears on command line + Shorthand string // one-letter abbreviated flag + Usage string // help message + Value Value // value as set + DefValue string // default value (as text); for usage message + Changed bool // If the user set the value (or if left to default) + NoOptDefVal string //default value (as text); if the flag is on the command line without any options + Deprecated string // If this flag is deprecated, this string is the new or now thing to use + Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text + ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use + Annotations map[string][]string // used by cobra.Command bash autocomple code +} + +// Value is the interface to the dynamic value stored in a flag. +// (The default value is represented as a string.) +type Value interface { + String() string + Set(string) error + Type() string +} + +// sortFlags returns the flags as a slice in lexicographical sorted order. +func sortFlags(flags map[NormalizedName]*Flag) []*Flag { + list := make(sort.StringSlice, len(flags)) + i := 0 + for k := range flags { + list[i] = string(k) + i++ + } + list.Sort() + result := make([]*Flag, len(list)) + for i, name := range list { + result[i] = flags[NormalizedName(name)] + } + return result +} + +// SetNormalizeFunc allows you to add a function which can translate flag names. +// Flags added to the FlagSet will be translated and then when anything tries to +// look up the flag that will also be translated. So it would be possible to create +// a flag named "getURL" and have it translated to "geturl". A user could then pass +// "--getUrl" which may also be translated to "geturl" and everything will work. +func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) { + f.normalizeNameFunc = n + for k, v := range f.formal { + delete(f.formal, k) + nname := f.normalizeFlagName(string(k)) + f.formal[nname] = v + v.Name = string(nname) + } +} + +// GetNormalizeFunc returns the previously set NormalizeFunc of a function which +// does no translation, if not set previously. +func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName { + if f.normalizeNameFunc != nil { + return f.normalizeNameFunc + } + return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) } +} + +func (f *FlagSet) normalizeFlagName(name string) NormalizedName { + n := f.GetNormalizeFunc() + return n(f, name) +} + +func (f *FlagSet) out() io.Writer { + if f.output == nil { + return os.Stderr + } + return f.output +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (f *FlagSet) SetOutput(output io.Writer) { + f.output = output +} + +// VisitAll visits the flags in lexicographical order, calling fn for each. +// It visits all flags, even those not set. +func (f *FlagSet) VisitAll(fn func(*Flag)) { + for _, flag := range sortFlags(f.formal) { + fn(flag) + } +} + +// HasFlags returns a bool to indicate if the FlagSet has any flags definied. +func (f *FlagSet) HasFlags() bool { + return len(f.formal) > 0 +} + +// HasAvailableFlags returns a bool to indicate if the FlagSet has any flags +// definied that are not hidden or deprecated. +func (f *FlagSet) HasAvailableFlags() bool { + for _, flag := range f.formal { + if !flag.Hidden && len(flag.Deprecated) == 0 { + return true + } + } + return false +} + +// VisitAll visits the command-line flags in lexicographical order, calling +// fn for each. It visits all flags, even those not set. +func VisitAll(fn func(*Flag)) { + CommandLine.VisitAll(fn) +} + +// Visit visits the flags in lexicographical order, calling fn for each. +// It visits only those flags that have been set. +func (f *FlagSet) Visit(fn func(*Flag)) { + for _, flag := range sortFlags(f.actual) { + fn(flag) + } +} + +// Visit visits the command-line flags in lexicographical order, calling fn +// for each. It visits only those flags that have been set. +func Visit(fn func(*Flag)) { + CommandLine.Visit(fn) +} + +// Lookup returns the Flag structure of the named flag, returning nil if none exists. +func (f *FlagSet) Lookup(name string) *Flag { + return f.lookup(f.normalizeFlagName(name)) +} + +// lookup returns the Flag structure of the named flag, returning nil if none exists. +func (f *FlagSet) lookup(name NormalizedName) *Flag { + return f.formal[name] +} + +// func to return a given type for a given flag name +func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) { + flag := f.Lookup(name) + if flag == nil { + err := fmt.Errorf("flag accessed but not defined: %s", name) + return nil, err + } + + if flag.Value.Type() != ftype { + err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type()) + return nil, err + } + + sval := flag.Value.String() + result, err := convFunc(sval) + if err != nil { + return nil, err + } + return result, nil +} + +// ArgsLenAtDash will return the length of f.Args at the moment when a -- was +// found during arg parsing. This allows your program to know which args were +// before the -- and which came after. +func (f *FlagSet) ArgsLenAtDash() int { + return f.argsLenAtDash +} + +// MarkDeprecated indicated that a flag is deprecated in your program. It will +// continue to function but will not show up in help or usage messages. Using +// this flag will also print the given usageMessage. +func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag %q does not exist", name) + } + if len(usageMessage) == 0 { + return fmt.Errorf("deprecated message for flag %q must be set", name) + } + flag.Deprecated = usageMessage + return nil +} + +// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your +// program. It will continue to function but will not show up in help or usage +// messages. Using this flag will also print the given usageMessage. +func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag %q does not exist", name) + } + if len(usageMessage) == 0 { + return fmt.Errorf("deprecated message for flag %q must be set", name) + } + flag.ShorthandDeprecated = usageMessage + return nil +} + +// MarkHidden sets a flag to 'hidden' in your program. It will continue to +// function but will not show up in help or usage messages. +func (f *FlagSet) MarkHidden(name string) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag %q does not exist", name) + } + flag.Hidden = true + return nil +} + +// Lookup returns the Flag structure of the named command-line flag, +// returning nil if none exists. +func Lookup(name string) *Flag { + return CommandLine.Lookup(name) +} + +// Set sets the value of the named flag. +func (f *FlagSet) Set(name, value string) error { + normalName := f.normalizeFlagName(name) + flag, ok := f.formal[normalName] + if !ok { + return fmt.Errorf("no such flag -%v", name) + } + err := flag.Value.Set(value) + if err != nil { + return err + } + if f.actual == nil { + f.actual = make(map[NormalizedName]*Flag) + } + f.actual[normalName] = flag + flag.Changed = true + if len(flag.Deprecated) > 0 { + fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) + } + return nil +} + +// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet. +// This is sometimes used by spf13/cobra programs which want to generate additional +// bash completion information. +func (f *FlagSet) SetAnnotation(name, key string, values []string) error { + normalName := f.normalizeFlagName(name) + flag, ok := f.formal[normalName] + if !ok { + return fmt.Errorf("no such flag -%v", name) + } + if flag.Annotations == nil { + flag.Annotations = map[string][]string{} + } + flag.Annotations[key] = values + return nil +} + +// Changed returns true if the flag was explicitly set during Parse() and false +// otherwise +func (f *FlagSet) Changed(name string) bool { + flag := f.Lookup(name) + // If a flag doesn't exist, it wasn't changed.... + if flag == nil { + return false + } + return flag.Changed +} + +// Set sets the value of the named command-line flag. +func Set(name, value string) error { + return CommandLine.Set(name, value) +} + +// PrintDefaults prints, to standard error unless configured +// otherwise, the default values of all defined flags in the set. +func (f *FlagSet) PrintDefaults() { + usages := f.FlagUsages() + fmt.Fprintf(f.out(), "%s", usages) +} + +// defaultIsZeroValue returns true if the default value for this flag represents +// a zero value. +func (f *Flag) defaultIsZeroValue() bool { + switch f.Value.(type) { + case boolFlag: + return f.DefValue == "false" + case *durationValue: + // Beginning in Go 1.7, duration zero values are "0s" + return f.DefValue == "0" || f.DefValue == "0s" + case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value: + return f.DefValue == "0" + case *stringValue: + return f.DefValue == "" + case *ipValue, *ipMaskValue, *ipNetValue: + return f.DefValue == "" + case *intSliceValue, *stringSliceValue, *stringArrayValue: + return f.DefValue == "[]" + default: + switch f.Value.String() { + case "false": + return true + case "": + return true + case "": + return true + case "0": + return true + } + return false + } +} + +// UnquoteUsage extracts a back-quoted name from the usage +// string for a flag and returns it and the un-quoted usage. +// Given "a `name` to show" it returns ("name", "a name to show"). +// If there are no back quotes, the name is an educated guess of the +// type of the flag's value, or the empty string if the flag is boolean. +func UnquoteUsage(flag *Flag) (name string, usage string) { + // Look for a back-quoted name, but avoid the strings package. + usage = flag.Usage + for i := 0; i < len(usage); i++ { + if usage[i] == '`' { + for j := i + 1; j < len(usage); j++ { + if usage[j] == '`' { + name = usage[i+1 : j] + usage = usage[:i] + name + usage[j+1:] + return name, usage + } + } + break // Only one back quote; use type name. + } + } + + name = flag.Value.Type() + switch name { + case "bool": + name = "" + case "float64": + name = "float" + case "int64": + name = "int" + case "uint64": + name = "uint" + } + + return +} + +// FlagUsages Returns a string containing the usage information for all flags in +// the FlagSet +func (f *FlagSet) FlagUsages() string { + x := new(bytes.Buffer) + + lines := make([]string, 0, len(f.formal)) + + maxlen := 0 + f.VisitAll(func(flag *Flag) { + if len(flag.Deprecated) > 0 || flag.Hidden { + return + } + + line := "" + if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 { + line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name) + } else { + line = fmt.Sprintf(" --%s", flag.Name) + } + + varname, usage := UnquoteUsage(flag) + if len(varname) > 0 { + line += " " + varname + } + if len(flag.NoOptDefVal) > 0 { + switch flag.Value.Type() { + case "string": + line += fmt.Sprintf("[=%q]", flag.NoOptDefVal) + case "bool": + if flag.NoOptDefVal != "true" { + line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) + } + default: + line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) + } + } + + // This special character will be replaced with spacing once the + // correct alignment is calculated + line += "\x00" + if len(line) > maxlen { + maxlen = len(line) + } + + line += usage + if !flag.defaultIsZeroValue() { + if flag.Value.Type() == "string" { + line += fmt.Sprintf(" (default %q)", flag.DefValue) + } else { + line += fmt.Sprintf(" (default %s)", flag.DefValue) + } + } + + lines = append(lines, line) + }) + + for _, line := range lines { + sidx := strings.Index(line, "\x00") + spacing := strings.Repeat(" ", maxlen-sidx) + fmt.Fprintln(x, line[:sidx], spacing, line[sidx+1:]) + } + + return x.String() +} + +// PrintDefaults prints to standard error the default values of all defined command-line flags. +func PrintDefaults() { + CommandLine.PrintDefaults() +} + +// defaultUsage is the default function to print a usage message. +func defaultUsage(f *FlagSet) { + fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) + f.PrintDefaults() +} + +// NOTE: Usage is not just defaultUsage(CommandLine) +// because it serves (via godoc flag Usage) as the example +// for how to write your own usage function. + +// Usage prints to standard error a usage message documenting all defined command-line flags. +// The function is a variable that may be changed to point to a custom function. +// By default it prints a simple header and calls PrintDefaults; for details about the +// format of the output and how to control it, see the documentation for PrintDefaults. +var Usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + PrintDefaults() +} + +// NFlag returns the number of flags that have been set. +func (f *FlagSet) NFlag() int { return len(f.actual) } + +// NFlag returns the number of command-line flags that have been set. +func NFlag() int { return len(CommandLine.actual) } + +// Arg returns the i'th argument. Arg(0) is the first remaining argument +// after flags have been processed. +func (f *FlagSet) Arg(i int) string { + if i < 0 || i >= len(f.args) { + return "" + } + return f.args[i] +} + +// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument +// after flags have been processed. +func Arg(i int) string { + return CommandLine.Arg(i) +} + +// NArg is the number of arguments remaining after flags have been processed. +func (f *FlagSet) NArg() int { return len(f.args) } + +// NArg is the number of arguments remaining after flags have been processed. +func NArg() int { return len(CommandLine.args) } + +// Args returns the non-flag arguments. +func (f *FlagSet) Args() []string { return f.args } + +// Args returns the non-flag command-line arguments. +func Args() []string { return CommandLine.args } + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func (f *FlagSet) Var(value Value, name string, usage string) { + f.VarP(value, name, "", usage) +} + +// VarPF is like VarP, but returns the flag created +func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag { + // Remember the default value as a string; it won't change. + flag := &Flag{ + Name: name, + Shorthand: shorthand, + Usage: usage, + Value: value, + DefValue: value.String(), + } + f.AddFlag(flag) + return flag +} + +// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) VarP(value Value, name, shorthand, usage string) { + _ = f.VarPF(value, name, shorthand, usage) +} + +// AddFlag will add the flag to the FlagSet +func (f *FlagSet) AddFlag(flag *Flag) { + // Call normalizeFlagName function only once + normalizedFlagName := f.normalizeFlagName(flag.Name) + + _, alreadythere := f.formal[normalizedFlagName] + if alreadythere { + msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) + fmt.Fprintln(f.out(), msg) + panic(msg) // Happens only if flags are declared with identical names + } + if f.formal == nil { + f.formal = make(map[NormalizedName]*Flag) + } + + flag.Name = string(normalizedFlagName) + f.formal[normalizedFlagName] = flag + + if len(flag.Shorthand) == 0 { + return + } + if len(flag.Shorthand) > 1 { + fmt.Fprintf(f.out(), "%s shorthand more than ASCII character: %s\n", f.name, flag.Shorthand) + panic("shorthand is more than one character") + } + if f.shorthands == nil { + f.shorthands = make(map[byte]*Flag) + } + c := flag.Shorthand[0] + old, alreadythere := f.shorthands[c] + if alreadythere { + fmt.Fprintf(f.out(), "%s shorthand reused: %q for %s already used for %s\n", f.name, c, flag.Name, old.Name) + panic("shorthand redefinition") + } + f.shorthands[c] = flag +} + +// AddFlagSet adds one FlagSet to another. If a flag is already present in f +// the flag from newSet will be ignored +func (f *FlagSet) AddFlagSet(newSet *FlagSet) { + if newSet == nil { + return + } + newSet.VisitAll(func(flag *Flag) { + if f.Lookup(flag.Name) == nil { + f.AddFlag(flag) + } + }) +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func Var(value Value, name string, usage string) { + CommandLine.VarP(value, name, "", usage) +} + +// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. +func VarP(value Value, name, shorthand, usage string) { + CommandLine.VarP(value, name, shorthand, usage) +} + +// failf prints to standard error a formatted error and usage message and +// returns the error. +func (f *FlagSet) failf(format string, a ...interface{}) error { + err := fmt.Errorf(format, a...) + fmt.Fprintln(f.out(), err) + f.usage() + return err +} + +// usage calls the Usage method for the flag set, or the usage function if +// the flag set is CommandLine. +func (f *FlagSet) usage() { + if f == CommandLine { + Usage() + } else if f.Usage == nil { + defaultUsage(f) + } else { + f.Usage() + } +} + +func (f *FlagSet) setFlag(flag *Flag, value string, origArg string) error { + if err := flag.Value.Set(value); err != nil { + return f.failf("invalid argument %q for %s: %v", value, origArg, err) + } + // mark as visited for Visit() + if f.actual == nil { + f.actual = make(map[NormalizedName]*Flag) + } + f.actual[f.normalizeFlagName(flag.Name)] = flag + flag.Changed = true + if len(flag.Deprecated) > 0 { + fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) + } + if len(flag.ShorthandDeprecated) > 0 && containsShorthand(origArg, flag.Shorthand) { + fmt.Fprintf(os.Stderr, "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) + } + return nil +} + +func containsShorthand(arg, shorthand string) bool { + // filter out flags -- + if strings.HasPrefix(arg, "-") { + return false + } + arg = strings.SplitN(arg, "=", 2)[0] + return strings.Contains(arg, shorthand) +} + +func (f *FlagSet) parseLongArg(s string, args []string) (a []string, err error) { + a = args + name := s[2:] + if len(name) == 0 || name[0] == '-' || name[0] == '=' { + err = f.failf("bad flag syntax: %s", s) + return + } + split := strings.SplitN(name, "=", 2) + name = split[0] + flag, alreadythere := f.formal[f.normalizeFlagName(name)] + if !alreadythere { + if name == "help" { // special case for nice help message. + f.usage() + return a, ErrHelp + } + err = f.failf("unknown flag: --%s", name) + return + } + var value string + if len(split) == 2 { + // '--flag=arg' + value = split[1] + } else if len(flag.NoOptDefVal) > 0 { + // '--flag' (arg was optional) + value = flag.NoOptDefVal + } else if len(a) > 0 { + // '--flag arg' + value = a[0] + a = a[1:] + } else { + // '--flag' (arg was required) + err = f.failf("flag needs an argument: %s", s) + return + } + err = f.setFlag(flag, value, s) + return +} + +func (f *FlagSet) parseSingleShortArg(shorthands string, args []string) (outShorts string, outArgs []string, err error) { + if strings.HasPrefix(shorthands, "test.") { + return + } + outArgs = args + outShorts = shorthands[1:] + c := shorthands[0] + + flag, alreadythere := f.shorthands[c] + if !alreadythere { + if c == 'h' { // special case for nice help message. + f.usage() + err = ErrHelp + return + } + //TODO continue on error + err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) + return + } + var value string + if len(shorthands) > 2 && shorthands[1] == '=' { + value = shorthands[2:] + outShorts = "" + } else if len(flag.NoOptDefVal) > 0 { + value = flag.NoOptDefVal + } else if len(shorthands) > 1 { + value = shorthands[1:] + outShorts = "" + } else if len(args) > 0 { + value = args[0] + outArgs = args[1:] + } else { + err = f.failf("flag needs an argument: %q in -%s", c, shorthands) + return + } + err = f.setFlag(flag, value, shorthands) + return +} + +func (f *FlagSet) parseShortArg(s string, args []string) (a []string, err error) { + a = args + shorthands := s[1:] + + for len(shorthands) > 0 { + shorthands, a, err = f.parseSingleShortArg(shorthands, args) + if err != nil { + return + } + } + + return +} + +func (f *FlagSet) parseArgs(args []string) (err error) { + for len(args) > 0 { + s := args[0] + args = args[1:] + if len(s) == 0 || s[0] != '-' || len(s) == 1 { + if !f.interspersed { + f.args = append(f.args, s) + f.args = append(f.args, args...) + return nil + } + f.args = append(f.args, s) + continue + } + + if s[1] == '-' { + if len(s) == 2 { // "--" terminates the flags + f.argsLenAtDash = len(f.args) + f.args = append(f.args, args...) + break + } + args, err = f.parseLongArg(s, args) + } else { + args, err = f.parseShortArg(s, args) + } + if err != nil { + return + } + } + return +} + +// Parse parses flag definitions from the argument list, which should not +// include the command name. Must be called after all flags in the FlagSet +// are defined and before flags are accessed by the program. +// The return value will be ErrHelp if -help was set but not defined. +func (f *FlagSet) Parse(arguments []string) error { + f.parsed = true + f.args = make([]string, 0, len(arguments)) + err := f.parseArgs(arguments) + if err != nil { + switch f.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + } + return nil +} + +// Parsed reports whether f.Parse has been called. +func (f *FlagSet) Parsed() bool { + return f.parsed +} + +// Parse parses the command-line flags from os.Args[1:]. Must be called +// after all flags are defined and before flags are accessed by the program. +func Parse() { + // Ignore errors; CommandLine is set for ExitOnError. + CommandLine.Parse(os.Args[1:]) +} + +// SetInterspersed sets whether to support interspersed option/non-option arguments. +func SetInterspersed(interspersed bool) { + CommandLine.SetInterspersed(interspersed) +} + +// Parsed returns true if the command-line flags have been parsed. +func Parsed() bool { + return CommandLine.Parsed() +} + +// CommandLine is the default set of command-line flags, parsed from os.Args. +var CommandLine = NewFlagSet(os.Args[0], ExitOnError) + +// NewFlagSet returns a new, empty flag set with the specified name and +// error handling property. +func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { + f := &FlagSet{ + name: name, + errorHandling: errorHandling, + argsLenAtDash: -1, + interspersed: true, + } + return f +} + +// SetInterspersed sets whether to support interspersed option/non-option arguments. +func (f *FlagSet) SetInterspersed(interspersed bool) { + f.interspersed = interspersed +} + +// Init sets the name and error handling property for a flag set. +// By default, the zero FlagSet uses an empty name and the +// ContinueOnError error handling policy. +func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { + f.name = name + f.errorHandling = errorHandling + f.argsLenAtDash = -1 +} diff --git a/vendor/github.com/spf13/pflag/float32.go b/vendor/github.com/spf13/pflag/float32.go new file mode 100644 index 0000000..a243f81 --- /dev/null +++ b/vendor/github.com/spf13/pflag/float32.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- float32 Value +type float32Value float32 + +func newFloat32Value(val float32, p *float32) *float32Value { + *p = val + return (*float32Value)(p) +} + +func (f *float32Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 32) + *f = float32Value(v) + return err +} + +func (f *float32Value) Type() string { + return "float32" +} + +func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) } + +func float32Conv(sval string) (interface{}, error) { + v, err := strconv.ParseFloat(sval, 32) + if err != nil { + return 0, err + } + return float32(v), nil +} + +// GetFloat32 return the float32 value of a flag with the given name +func (f *FlagSet) GetFloat32(name string) (float32, error) { + val, err := f.getFlagType(name, "float32", float32Conv) + if err != nil { + return 0, err + } + return val.(float32), nil +} + +// Float32Var defines a float32 flag with specified name, default value, and usage string. +// The argument p points to a float32 variable in which to store the value of the flag. +func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) { + f.VarP(newFloat32Value(value, p), name, "", usage) +} + +// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) { + f.VarP(newFloat32Value(value, p), name, shorthand, usage) +} + +// Float32Var defines a float32 flag with specified name, default value, and usage string. +// The argument p points to a float32 variable in which to store the value of the flag. +func Float32Var(p *float32, name string, value float32, usage string) { + CommandLine.VarP(newFloat32Value(value, p), name, "", usage) +} + +// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. +func Float32VarP(p *float32, name, shorthand string, value float32, usage string) { + CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage) +} + +// Float32 defines a float32 flag with specified name, default value, and usage string. +// The return value is the address of a float32 variable that stores the value of the flag. +func (f *FlagSet) Float32(name string, value float32, usage string) *float32 { + p := new(float32) + f.Float32VarP(p, name, "", value, usage) + return p +} + +// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 { + p := new(float32) + f.Float32VarP(p, name, shorthand, value, usage) + return p +} + +// Float32 defines a float32 flag with specified name, default value, and usage string. +// The return value is the address of a float32 variable that stores the value of the flag. +func Float32(name string, value float32, usage string) *float32 { + return CommandLine.Float32P(name, "", value, usage) +} + +// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. +func Float32P(name, shorthand string, value float32, usage string) *float32 { + return CommandLine.Float32P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/float64.go b/vendor/github.com/spf13/pflag/float64.go new file mode 100644 index 0000000..04b5492 --- /dev/null +++ b/vendor/github.com/spf13/pflag/float64.go @@ -0,0 +1,84 @@ +package pflag + +import "strconv" + +// -- float64 Value +type float64Value float64 + +func newFloat64Value(val float64, p *float64) *float64Value { + *p = val + return (*float64Value)(p) +} + +func (f *float64Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 64) + *f = float64Value(v) + return err +} + +func (f *float64Value) Type() string { + return "float64" +} + +func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) } + +func float64Conv(sval string) (interface{}, error) { + return strconv.ParseFloat(sval, 64) +} + +// GetFloat64 return the float64 value of a flag with the given name +func (f *FlagSet) GetFloat64(name string) (float64, error) { + val, err := f.getFlagType(name, "float64", float64Conv) + if err != nil { + return 0, err + } + return val.(float64), nil +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) { + f.VarP(newFloat64Value(value, p), name, "", usage) +} + +// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) { + f.VarP(newFloat64Value(value, p), name, shorthand, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func Float64Var(p *float64, name string, value float64, usage string) { + CommandLine.VarP(newFloat64Value(value, p), name, "", usage) +} + +// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. +func Float64VarP(p *float64, name, shorthand string, value float64, usage string) { + CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage) +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { + p := new(float64) + f.Float64VarP(p, name, "", value, usage) + return p +} + +// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 { + p := new(float64) + f.Float64VarP(p, name, shorthand, value, usage) + return p +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func Float64(name string, value float64, usage string) *float64 { + return CommandLine.Float64P(name, "", value, usage) +} + +// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. +func Float64P(name, shorthand string, value float64, usage string) *float64 { + return CommandLine.Float64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go new file mode 100644 index 0000000..b056147 --- /dev/null +++ b/vendor/github.com/spf13/pflag/golangflag.go @@ -0,0 +1,104 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + goflag "flag" + "fmt" + "reflect" + "strings" +) + +var _ = fmt.Print + +// flagValueWrapper implements pflag.Value around a flag.Value. The main +// difference here is the addition of the Type method that returns a string +// name of the type. As this is generally unknown, we approximate that with +// reflection. +type flagValueWrapper struct { + inner goflag.Value + flagType string +} + +// We are just copying the boolFlag interface out of goflag as that is what +// they use to decide if a flag should get "true" when no arg is given. +type goBoolFlag interface { + goflag.Value + IsBoolFlag() bool +} + +func wrapFlagValue(v goflag.Value) Value { + // If the flag.Value happens to also be a pflag.Value, just use it directly. + if pv, ok := v.(Value); ok { + return pv + } + + pv := &flagValueWrapper{ + inner: v, + } + + t := reflect.TypeOf(v) + if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr { + t = t.Elem() + } + + pv.flagType = strings.TrimSuffix(t.Name(), "Value") + return pv +} + +func (v *flagValueWrapper) String() string { + return v.inner.String() +} + +func (v *flagValueWrapper) Set(s string) error { + return v.inner.Set(s) +} + +func (v *flagValueWrapper) Type() string { + return v.flagType +} + +// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag +// If the *flag.Flag.Name was a single character (ex: `v`) it will be accessiblei +// with both `-v` and `--v` in flags. If the golang flag was more than a single +// character (ex: `verbose`) it will only be accessible via `--verbose` +func PFlagFromGoFlag(goflag *goflag.Flag) *Flag { + // Remember the default value as a string; it won't change. + flag := &Flag{ + Name: goflag.Name, + Usage: goflag.Usage, + Value: wrapFlagValue(goflag.Value), + // Looks like golang flags don't set DefValue correctly :-( + //DefValue: goflag.DefValue, + DefValue: goflag.Value.String(), + } + // Ex: if the golang flag was -v, allow both -v and --v to work + if len(flag.Name) == 1 { + flag.Shorthand = flag.Name + } + if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() { + flag.NoOptDefVal = "true" + } + return flag +} + +// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet +func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) { + if f.Lookup(goflag.Name) != nil { + return + } + newflag := PFlagFromGoFlag(goflag) + f.AddFlag(newflag) +} + +// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet +func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { + if newSet == nil { + return + } + newSet.VisitAll(func(goflag *goflag.Flag) { + f.AddGoFlag(goflag) + }) +} diff --git a/vendor/github.com/spf13/pflag/int.go b/vendor/github.com/spf13/pflag/int.go new file mode 100644 index 0000000..1474b89 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int.go @@ -0,0 +1,84 @@ +package pflag + +import "strconv" + +// -- int Value +type intValue int + +func newIntValue(val int, p *int) *intValue { + *p = val + return (*intValue)(p) +} + +func (i *intValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = intValue(v) + return err +} + +func (i *intValue) Type() string { + return "int" +} + +func (i *intValue) String() string { return strconv.Itoa(int(*i)) } + +func intConv(sval string) (interface{}, error) { + return strconv.Atoi(sval) +} + +// GetInt return the int value of a flag with the given name +func (f *FlagSet) GetInt(name string) (int, error) { + val, err := f.getFlagType(name, "int", intConv) + if err != nil { + return 0, err + } + return val.(int), nil +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func (f *FlagSet) IntVar(p *int, name string, value int, usage string) { + f.VarP(newIntValue(value, p), name, "", usage) +} + +// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) { + f.VarP(newIntValue(value, p), name, shorthand, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func IntVar(p *int, name string, value int, usage string) { + CommandLine.VarP(newIntValue(value, p), name, "", usage) +} + +// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. +func IntVarP(p *int, name, shorthand string, value int, usage string) { + CommandLine.VarP(newIntValue(value, p), name, shorthand, usage) +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func (f *FlagSet) Int(name string, value int, usage string) *int { + p := new(int) + f.IntVarP(p, name, "", value, usage) + return p +} + +// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int { + p := new(int) + f.IntVarP(p, name, shorthand, value, usage) + return p +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func Int(name string, value int, usage string) *int { + return CommandLine.IntP(name, "", value, usage) +} + +// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. +func IntP(name, shorthand string, value int, usage string) *int { + return CommandLine.IntP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int32.go b/vendor/github.com/spf13/pflag/int32.go new file mode 100644 index 0000000..9b95944 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int32.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- int32 Value +type int32Value int32 + +func newInt32Value(val int32, p *int32) *int32Value { + *p = val + return (*int32Value)(p) +} + +func (i *int32Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 32) + *i = int32Value(v) + return err +} + +func (i *int32Value) Type() string { + return "int32" +} + +func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) } + +func int32Conv(sval string) (interface{}, error) { + v, err := strconv.ParseInt(sval, 0, 32) + if err != nil { + return 0, err + } + return int32(v), nil +} + +// GetInt32 return the int32 value of a flag with the given name +func (f *FlagSet) GetInt32(name string) (int32, error) { + val, err := f.getFlagType(name, "int32", int32Conv) + if err != nil { + return 0, err + } + return val.(int32), nil +} + +// Int32Var defines an int32 flag with specified name, default value, and usage string. +// The argument p points to an int32 variable in which to store the value of the flag. +func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) { + f.VarP(newInt32Value(value, p), name, "", usage) +} + +// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) { + f.VarP(newInt32Value(value, p), name, shorthand, usage) +} + +// Int32Var defines an int32 flag with specified name, default value, and usage string. +// The argument p points to an int32 variable in which to store the value of the flag. +func Int32Var(p *int32, name string, value int32, usage string) { + CommandLine.VarP(newInt32Value(value, p), name, "", usage) +} + +// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. +func Int32VarP(p *int32, name, shorthand string, value int32, usage string) { + CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage) +} + +// Int32 defines an int32 flag with specified name, default value, and usage string. +// The return value is the address of an int32 variable that stores the value of the flag. +func (f *FlagSet) Int32(name string, value int32, usage string) *int32 { + p := new(int32) + f.Int32VarP(p, name, "", value, usage) + return p +} + +// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 { + p := new(int32) + f.Int32VarP(p, name, shorthand, value, usage) + return p +} + +// Int32 defines an int32 flag with specified name, default value, and usage string. +// The return value is the address of an int32 variable that stores the value of the flag. +func Int32(name string, value int32, usage string) *int32 { + return CommandLine.Int32P(name, "", value, usage) +} + +// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. +func Int32P(name, shorthand string, value int32, usage string) *int32 { + return CommandLine.Int32P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int64.go b/vendor/github.com/spf13/pflag/int64.go new file mode 100644 index 0000000..0026d78 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int64.go @@ -0,0 +1,84 @@ +package pflag + +import "strconv" + +// -- int64 Value +type int64Value int64 + +func newInt64Value(val int64, p *int64) *int64Value { + *p = val + return (*int64Value)(p) +} + +func (i *int64Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = int64Value(v) + return err +} + +func (i *int64Value) Type() string { + return "int64" +} + +func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) } + +func int64Conv(sval string) (interface{}, error) { + return strconv.ParseInt(sval, 0, 64) +} + +// GetInt64 return the int64 value of a flag with the given name +func (f *FlagSet) GetInt64(name string) (int64, error) { + val, err := f.getFlagType(name, "int64", int64Conv) + if err != nil { + return 0, err + } + return val.(int64), nil +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) { + f.VarP(newInt64Value(value, p), name, "", usage) +} + +// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) { + f.VarP(newInt64Value(value, p), name, shorthand, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func Int64Var(p *int64, name string, value int64, usage string) { + CommandLine.VarP(newInt64Value(value, p), name, "", usage) +} + +// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. +func Int64VarP(p *int64, name, shorthand string, value int64, usage string) { + CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage) +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func (f *FlagSet) Int64(name string, value int64, usage string) *int64 { + p := new(int64) + f.Int64VarP(p, name, "", value, usage) + return p +} + +// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 { + p := new(int64) + f.Int64VarP(p, name, shorthand, value, usage) + return p +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func Int64(name string, value int64, usage string) *int64 { + return CommandLine.Int64P(name, "", value, usage) +} + +// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. +func Int64P(name, shorthand string, value int64, usage string) *int64 { + return CommandLine.Int64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int8.go b/vendor/github.com/spf13/pflag/int8.go new file mode 100644 index 0000000..4da9222 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int8.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- int8 Value +type int8Value int8 + +func newInt8Value(val int8, p *int8) *int8Value { + *p = val + return (*int8Value)(p) +} + +func (i *int8Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 8) + *i = int8Value(v) + return err +} + +func (i *int8Value) Type() string { + return "int8" +} + +func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) } + +func int8Conv(sval string) (interface{}, error) { + v, err := strconv.ParseInt(sval, 0, 8) + if err != nil { + return 0, err + } + return int8(v), nil +} + +// GetInt8 return the int8 value of a flag with the given name +func (f *FlagSet) GetInt8(name string) (int8, error) { + val, err := f.getFlagType(name, "int8", int8Conv) + if err != nil { + return 0, err + } + return val.(int8), nil +} + +// Int8Var defines an int8 flag with specified name, default value, and usage string. +// The argument p points to an int8 variable in which to store the value of the flag. +func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) { + f.VarP(newInt8Value(value, p), name, "", usage) +} + +// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) { + f.VarP(newInt8Value(value, p), name, shorthand, usage) +} + +// Int8Var defines an int8 flag with specified name, default value, and usage string. +// The argument p points to an int8 variable in which to store the value of the flag. +func Int8Var(p *int8, name string, value int8, usage string) { + CommandLine.VarP(newInt8Value(value, p), name, "", usage) +} + +// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. +func Int8VarP(p *int8, name, shorthand string, value int8, usage string) { + CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage) +} + +// Int8 defines an int8 flag with specified name, default value, and usage string. +// The return value is the address of an int8 variable that stores the value of the flag. +func (f *FlagSet) Int8(name string, value int8, usage string) *int8 { + p := new(int8) + f.Int8VarP(p, name, "", value, usage) + return p +} + +// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 { + p := new(int8) + f.Int8VarP(p, name, shorthand, value, usage) + return p +} + +// Int8 defines an int8 flag with specified name, default value, and usage string. +// The return value is the address of an int8 variable that stores the value of the flag. +func Int8(name string, value int8, usage string) *int8 { + return CommandLine.Int8P(name, "", value, usage) +} + +// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. +func Int8P(name, shorthand string, value int8, usage string) *int8 { + return CommandLine.Int8P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go new file mode 100644 index 0000000..1e7c9ed --- /dev/null +++ b/vendor/github.com/spf13/pflag/int_slice.go @@ -0,0 +1,128 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- intSlice Value +type intSliceValue struct { + value *[]int + changed bool +} + +func newIntSliceValue(val []int, p *[]int) *intSliceValue { + isv := new(intSliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *intSliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]int, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.Atoi(d) + if err != nil { + return err + } + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *intSliceValue) Type() string { + return "intSlice" +} + +func (s *intSliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func intSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []int{}, nil + } + ss := strings.Split(val, ",") + out := make([]int, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.Atoi(d) + if err != nil { + return nil, err + } + + } + return out, nil +} + +// GetIntSlice return the []int value of a flag with the given name +func (f *FlagSet) GetIntSlice(name string) ([]int, error) { + val, err := f.getFlagType(name, "intSlice", intSliceConv) + if err != nil { + return []int{}, err + } + return val.([]int), nil +} + +// IntSliceVar defines a intSlice flag with specified name, default value, and usage string. +// The argument p points to a []int variable in which to store the value of the flag. +func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) { + f.VarP(newIntSliceValue(value, p), name, "", usage) +} + +// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { + f.VarP(newIntSliceValue(value, p), name, shorthand, usage) +} + +// IntSliceVar defines a int[] flag with specified name, default value, and usage string. +// The argument p points to a int[] variable in which to store the value of the flag. +func IntSliceVar(p *[]int, name string, value []int, usage string) { + CommandLine.VarP(newIntSliceValue(value, p), name, "", usage) +} + +// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. +func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { + CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage) +} + +// IntSlice defines a []int flag with specified name, default value, and usage string. +// The return value is the address of a []int variable that stores the value of the flag. +func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int { + p := []int{} + f.IntSliceVarP(&p, name, "", value, usage) + return &p +} + +// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int { + p := []int{} + f.IntSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// IntSlice defines a []int flag with specified name, default value, and usage string. +// The return value is the address of a []int variable that stores the value of the flag. +func IntSlice(name string, value []int, usage string) *[]int { + return CommandLine.IntSliceP(name, "", value, usage) +} + +// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. +func IntSliceP(name, shorthand string, value []int, usage string) *[]int { + return CommandLine.IntSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go new file mode 100644 index 0000000..88a1743 --- /dev/null +++ b/vendor/github.com/spf13/pflag/ip.go @@ -0,0 +1,96 @@ +package pflag + +import ( + "fmt" + "net" + "strings" +) + +var _ = strings.TrimSpace + +// -- net.IP value +type ipValue net.IP + +func newIPValue(val net.IP, p *net.IP) *ipValue { + *p = val + return (*ipValue)(p) +} + +func (i *ipValue) String() string { return net.IP(*i).String() } +func (i *ipValue) Set(s string) error { + ip := net.ParseIP(strings.TrimSpace(s)) + if ip == nil { + return fmt.Errorf("failed to parse IP: %q", s) + } + *i = ipValue(ip) + return nil +} + +func (i *ipValue) Type() string { + return "ip" +} + +func ipConv(sval string) (interface{}, error) { + ip := net.ParseIP(sval) + if ip != nil { + return ip, nil + } + return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval) +} + +// GetIP return the net.IP value of a flag with the given name +func (f *FlagSet) GetIP(name string) (net.IP, error) { + val, err := f.getFlagType(name, "ip", ipConv) + if err != nil { + return nil, err + } + return val.(net.IP), nil +} + +// IPVar defines an net.IP flag with specified name, default value, and usage string. +// The argument p points to an net.IP variable in which to store the value of the flag. +func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) { + f.VarP(newIPValue(value, p), name, "", usage) +} + +// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { + f.VarP(newIPValue(value, p), name, shorthand, usage) +} + +// IPVar defines an net.IP flag with specified name, default value, and usage string. +// The argument p points to an net.IP variable in which to store the value of the flag. +func IPVar(p *net.IP, name string, value net.IP, usage string) { + CommandLine.VarP(newIPValue(value, p), name, "", usage) +} + +// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. +func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { + CommandLine.VarP(newIPValue(value, p), name, shorthand, usage) +} + +// IP defines an net.IP flag with specified name, default value, and usage string. +// The return value is the address of an net.IP variable that stores the value of the flag. +func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP { + p := new(net.IP) + f.IPVarP(p, name, "", value, usage) + return p +} + +// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP { + p := new(net.IP) + f.IPVarP(p, name, shorthand, value, usage) + return p +} + +// IP defines an net.IP flag with specified name, default value, and usage string. +// The return value is the address of an net.IP variable that stores the value of the flag. +func IP(name string, value net.IP, usage string) *net.IP { + return CommandLine.IPP(name, "", value, usage) +} + +// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. +func IPP(name, shorthand string, value net.IP, usage string) *net.IP { + return CommandLine.IPP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/ipmask.go b/vendor/github.com/spf13/pflag/ipmask.go new file mode 100644 index 0000000..5bd44bd --- /dev/null +++ b/vendor/github.com/spf13/pflag/ipmask.go @@ -0,0 +1,122 @@ +package pflag + +import ( + "fmt" + "net" + "strconv" +) + +// -- net.IPMask value +type ipMaskValue net.IPMask + +func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue { + *p = val + return (*ipMaskValue)(p) +} + +func (i *ipMaskValue) String() string { return net.IPMask(*i).String() } +func (i *ipMaskValue) Set(s string) error { + ip := ParseIPv4Mask(s) + if ip == nil { + return fmt.Errorf("failed to parse IP mask: %q", s) + } + *i = ipMaskValue(ip) + return nil +} + +func (i *ipMaskValue) Type() string { + return "ipMask" +} + +// ParseIPv4Mask written in IP form (e.g. 255.255.255.0). +// This function should really belong to the net package. +func ParseIPv4Mask(s string) net.IPMask { + mask := net.ParseIP(s) + if mask == nil { + if len(s) != 8 { + return nil + } + // net.IPMask.String() actually outputs things like ffffff00 + // so write a horrible parser for that as well :-( + m := []int{} + for i := 0; i < 4; i++ { + b := "0x" + s[2*i:2*i+2] + d, err := strconv.ParseInt(b, 0, 0) + if err != nil { + return nil + } + m = append(m, int(d)) + } + s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3]) + mask = net.ParseIP(s) + if mask == nil { + return nil + } + } + return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15]) +} + +func parseIPv4Mask(sval string) (interface{}, error) { + mask := ParseIPv4Mask(sval) + if mask == nil { + return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval) + } + return mask, nil +} + +// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name +func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) { + val, err := f.getFlagType(name, "ipMask", parseIPv4Mask) + if err != nil { + return nil, err + } + return val.(net.IPMask), nil +} + +// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. +// The argument p points to an net.IPMask variable in which to store the value of the flag. +func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { + f.VarP(newIPMaskValue(value, p), name, "", usage) +} + +// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { + f.VarP(newIPMaskValue(value, p), name, shorthand, usage) +} + +// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. +// The argument p points to an net.IPMask variable in which to store the value of the flag. +func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { + CommandLine.VarP(newIPMaskValue(value, p), name, "", usage) +} + +// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. +func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { + CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage) +} + +// IPMask defines an net.IPMask flag with specified name, default value, and usage string. +// The return value is the address of an net.IPMask variable that stores the value of the flag. +func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask { + p := new(net.IPMask) + f.IPMaskVarP(p, name, "", value, usage) + return p +} + +// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { + p := new(net.IPMask) + f.IPMaskVarP(p, name, shorthand, value, usage) + return p +} + +// IPMask defines an net.IPMask flag with specified name, default value, and usage string. +// The return value is the address of an net.IPMask variable that stores the value of the flag. +func IPMask(name string, value net.IPMask, usage string) *net.IPMask { + return CommandLine.IPMaskP(name, "", value, usage) +} + +// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash. +func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { + return CommandLine.IPMaskP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/ipnet.go b/vendor/github.com/spf13/pflag/ipnet.go new file mode 100644 index 0000000..149b764 --- /dev/null +++ b/vendor/github.com/spf13/pflag/ipnet.go @@ -0,0 +1,100 @@ +package pflag + +import ( + "fmt" + "net" + "strings" +) + +// IPNet adapts net.IPNet for use as a flag. +type ipNetValue net.IPNet + +func (ipnet ipNetValue) String() string { + n := net.IPNet(ipnet) + return n.String() +} + +func (ipnet *ipNetValue) Set(value string) error { + _, n, err := net.ParseCIDR(strings.TrimSpace(value)) + if err != nil { + return err + } + *ipnet = ipNetValue(*n) + return nil +} + +func (*ipNetValue) Type() string { + return "ipNet" +} + +var _ = strings.TrimSpace + +func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue { + *p = val + return (*ipNetValue)(p) +} + +func ipNetConv(sval string) (interface{}, error) { + _, n, err := net.ParseCIDR(strings.TrimSpace(sval)) + if err == nil { + return *n, nil + } + return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval) +} + +// GetIPNet return the net.IPNet value of a flag with the given name +func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) { + val, err := f.getFlagType(name, "ipNet", ipNetConv) + if err != nil { + return net.IPNet{}, err + } + return val.(net.IPNet), nil +} + +// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. +// The argument p points to an net.IPNet variable in which to store the value of the flag. +func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { + f.VarP(newIPNetValue(value, p), name, "", usage) +} + +// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { + f.VarP(newIPNetValue(value, p), name, shorthand, usage) +} + +// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. +// The argument p points to an net.IPNet variable in which to store the value of the flag. +func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { + CommandLine.VarP(newIPNetValue(value, p), name, "", usage) +} + +// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. +func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { + CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage) +} + +// IPNet defines an net.IPNet flag with specified name, default value, and usage string. +// The return value is the address of an net.IPNet variable that stores the value of the flag. +func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet { + p := new(net.IPNet) + f.IPNetVarP(p, name, "", value, usage) + return p +} + +// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { + p := new(net.IPNet) + f.IPNetVarP(p, name, shorthand, value, usage) + return p +} + +// IPNet defines an net.IPNet flag with specified name, default value, and usage string. +// The return value is the address of an net.IPNet variable that stores the value of the flag. +func IPNet(name string, value net.IPNet, usage string) *net.IPNet { + return CommandLine.IPNetP(name, "", value, usage) +} + +// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. +func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { + return CommandLine.IPNetP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/string.go b/vendor/github.com/spf13/pflag/string.go new file mode 100644 index 0000000..04e0a26 --- /dev/null +++ b/vendor/github.com/spf13/pflag/string.go @@ -0,0 +1,80 @@ +package pflag + +// -- string Value +type stringValue string + +func newStringValue(val string, p *string) *stringValue { + *p = val + return (*stringValue)(p) +} + +func (s *stringValue) Set(val string) error { + *s = stringValue(val) + return nil +} +func (s *stringValue) Type() string { + return "string" +} + +func (s *stringValue) String() string { return string(*s) } + +func stringConv(sval string) (interface{}, error) { + return sval, nil +} + +// GetString return the string value of a flag with the given name +func (f *FlagSet) GetString(name string) (string, error) { + val, err := f.getFlagType(name, "string", stringConv) + if err != nil { + return "", err + } + return val.(string), nil +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func (f *FlagSet) StringVar(p *string, name string, value string, usage string) { + f.VarP(newStringValue(value, p), name, "", usage) +} + +// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) { + f.VarP(newStringValue(value, p), name, shorthand, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func StringVar(p *string, name string, value string, usage string) { + CommandLine.VarP(newStringValue(value, p), name, "", usage) +} + +// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. +func StringVarP(p *string, name, shorthand string, value string, usage string) { + CommandLine.VarP(newStringValue(value, p), name, shorthand, usage) +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func (f *FlagSet) String(name string, value string, usage string) *string { + p := new(string) + f.StringVarP(p, name, "", value, usage) + return p +} + +// StringP is like String, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string { + p := new(string) + f.StringVarP(p, name, shorthand, value, usage) + return p +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func String(name string, value string, usage string) *string { + return CommandLine.StringP(name, "", value, usage) +} + +// StringP is like String, but accepts a shorthand letter that can be used after a single dash. +func StringP(name, shorthand string, value string, usage string) *string { + return CommandLine.StringP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go new file mode 100644 index 0000000..f320f2e --- /dev/null +++ b/vendor/github.com/spf13/pflag/string_array.go @@ -0,0 +1,110 @@ +package pflag + +import ( + "fmt" + "strings" +) + +var _ = fmt.Fprint + +// -- stringArray Value +type stringArrayValue struct { + value *[]string + changed bool +} + +func newStringArrayValue(val []string, p *[]string) *stringArrayValue { + ssv := new(stringArrayValue) + ssv.value = p + *ssv.value = val + return ssv +} + +func (s *stringArrayValue) Set(val string) error { + if !s.changed { + *s.value = []string{val} + s.changed = true + } else { + *s.value = append(*s.value, val) + } + return nil +} + +func (s *stringArrayValue) Type() string { + return "stringArray" +} + +func (s *stringArrayValue) String() string { + str, _ := writeAsCSV(*s.value) + return "[" + str + "]" +} + +func stringArrayConv(sval string) (interface{}, error) { + sval = strings.Trim(sval, "[]") + // An empty string would cause a array with one (empty) string + if len(sval) == 0 { + return []string{}, nil + } + return readAsCSV(sval) +} + +// GetStringArray return the []string value of a flag with the given name +func (f *FlagSet) GetStringArray(name string) ([]string, error) { + val, err := f.getFlagType(name, "stringArray", stringArrayConv) + if err != nil { + return []string{}, err + } + return val.([]string), nil +} + +// StringArrayVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the values of the multiple flags. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) { + f.VarP(newStringArrayValue(value, p), name, "", usage) +} + +// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { + f.VarP(newStringArrayValue(value, p), name, shorthand, usage) +} + +// StringArrayVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the value of the flag. +// The value of each argument will not try to be separated by comma +func StringArrayVar(p *[]string, name string, value []string, usage string) { + CommandLine.VarP(newStringArrayValue(value, p), name, "", usage) +} + +// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. +func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { + CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage) +} + +// StringArray defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string { + p := []string{} + f.StringArrayVarP(&p, name, "", value, usage) + return &p +} + +// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string { + p := []string{} + f.StringArrayVarP(&p, name, shorthand, value, usage) + return &p +} + +// StringArray defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func StringArray(name string, value []string, usage string) *[]string { + return CommandLine.StringArrayP(name, "", value, usage) +} + +// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. +func StringArrayP(name, shorthand string, value []string, usage string) *[]string { + return CommandLine.StringArrayP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go new file mode 100644 index 0000000..51e3c5d --- /dev/null +++ b/vendor/github.com/spf13/pflag/string_slice.go @@ -0,0 +1,132 @@ +package pflag + +import ( + "bytes" + "encoding/csv" + "fmt" + "strings" +) + +var _ = fmt.Fprint + +// -- stringSlice Value +type stringSliceValue struct { + value *[]string + changed bool +} + +func newStringSliceValue(val []string, p *[]string) *stringSliceValue { + ssv := new(stringSliceValue) + ssv.value = p + *ssv.value = val + return ssv +} + +func readAsCSV(val string) ([]string, error) { + if val == "" { + return []string{}, nil + } + stringReader := strings.NewReader(val) + csvReader := csv.NewReader(stringReader) + return csvReader.Read() +} + +func writeAsCSV(vals []string) (string, error) { + b := &bytes.Buffer{} + w := csv.NewWriter(b) + err := w.Write(vals) + if err != nil { + return "", err + } + w.Flush() + return strings.TrimSuffix(b.String(), fmt.Sprintln()), nil +} + +func (s *stringSliceValue) Set(val string) error { + v, err := readAsCSV(val) + if err != nil { + return err + } + if !s.changed { + *s.value = v + } else { + *s.value = append(*s.value, v...) + } + s.changed = true + return nil +} + +func (s *stringSliceValue) Type() string { + return "stringSlice" +} + +func (s *stringSliceValue) String() string { + str, _ := writeAsCSV(*s.value) + return "[" + str + "]" +} + +func stringSliceConv(sval string) (interface{}, error) { + sval = strings.Trim(sval, "[]") + // An empty string would cause a slice with one (empty) string + if len(sval) == 0 { + return []string{}, nil + } + return readAsCSV(sval) +} + +// GetStringSlice return the []string value of a flag with the given name +func (f *FlagSet) GetStringSlice(name string) ([]string, error) { + val, err := f.getFlagType(name, "stringSlice", stringSliceConv) + if err != nil { + return []string{}, err + } + return val.([]string), nil +} + +// StringSliceVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the value of the flag. +func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) { + f.VarP(newStringSliceValue(value, p), name, "", usage) +} + +// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { + f.VarP(newStringSliceValue(value, p), name, shorthand, usage) +} + +// StringSliceVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the value of the flag. +func StringSliceVar(p *[]string, name string, value []string, usage string) { + CommandLine.VarP(newStringSliceValue(value, p), name, "", usage) +} + +// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. +func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { + CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage) +} + +// StringSlice defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string { + p := []string{} + f.StringSliceVarP(&p, name, "", value, usage) + return &p +} + +// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string { + p := []string{} + f.StringSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// StringSlice defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +func StringSlice(name string, value []string, usage string) *[]string { + return CommandLine.StringSliceP(name, "", value, usage) +} + +// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. +func StringSliceP(name, shorthand string, value []string, usage string) *[]string { + return CommandLine.StringSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint.go b/vendor/github.com/spf13/pflag/uint.go new file mode 100644 index 0000000..dcbc2b7 --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- uint Value +type uintValue uint + +func newUintValue(val uint, p *uint) *uintValue { + *p = val + return (*uintValue)(p) +} + +func (i *uintValue) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uintValue(v) + return err +} + +func (i *uintValue) Type() string { + return "uint" +} + +func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) } + +func uintConv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 0) + if err != nil { + return 0, err + } + return uint(v), nil +} + +// GetUint return the uint value of a flag with the given name +func (f *FlagSet) GetUint(name string) (uint, error) { + val, err := f.getFlagType(name, "uint", uintConv) + if err != nil { + return 0, err + } + return val.(uint), nil +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) { + f.VarP(newUintValue(value, p), name, "", usage) +} + +// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) { + f.VarP(newUintValue(value, p), name, shorthand, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func UintVar(p *uint, name string, value uint, usage string) { + CommandLine.VarP(newUintValue(value, p), name, "", usage) +} + +// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. +func UintVarP(p *uint, name, shorthand string, value uint, usage string) { + CommandLine.VarP(newUintValue(value, p), name, shorthand, usage) +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (f *FlagSet) Uint(name string, value uint, usage string) *uint { + p := new(uint) + f.UintVarP(p, name, "", value, usage) + return p +} + +// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint { + p := new(uint) + f.UintVarP(p, name, shorthand, value, usage) + return p +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint(name string, value uint, usage string) *uint { + return CommandLine.UintP(name, "", value, usage) +} + +// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. +func UintP(name, shorthand string, value uint, usage string) *uint { + return CommandLine.UintP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint16.go b/vendor/github.com/spf13/pflag/uint16.go new file mode 100644 index 0000000..7e9914e --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint16.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- uint16 value +type uint16Value uint16 + +func newUint16Value(val uint16, p *uint16) *uint16Value { + *p = val + return (*uint16Value)(p) +} + +func (i *uint16Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 16) + *i = uint16Value(v) + return err +} + +func (i *uint16Value) Type() string { + return "uint16" +} + +func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) } + +func uint16Conv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 16) + if err != nil { + return 0, err + } + return uint16(v), nil +} + +// GetUint16 return the uint16 value of a flag with the given name +func (f *FlagSet) GetUint16(name string) (uint16, error) { + val, err := f.getFlagType(name, "uint16", uint16Conv) + if err != nil { + return 0, err + } + return val.(uint16), nil +} + +// Uint16Var defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) { + f.VarP(newUint16Value(value, p), name, "", usage) +} + +// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { + f.VarP(newUint16Value(value, p), name, shorthand, usage) +} + +// Uint16Var defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func Uint16Var(p *uint16, name string, value uint16, usage string) { + CommandLine.VarP(newUint16Value(value, p), name, "", usage) +} + +// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. +func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { + CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage) +} + +// Uint16 defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 { + p := new(uint16) + f.Uint16VarP(p, name, "", value, usage) + return p +} + +// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 { + p := new(uint16) + f.Uint16VarP(p, name, shorthand, value, usage) + return p +} + +// Uint16 defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint16(name string, value uint16, usage string) *uint16 { + return CommandLine.Uint16P(name, "", value, usage) +} + +// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. +func Uint16P(name, shorthand string, value uint16, usage string) *uint16 { + return CommandLine.Uint16P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint32.go b/vendor/github.com/spf13/pflag/uint32.go new file mode 100644 index 0000000..d802453 --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint32.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- uint32 value +type uint32Value uint32 + +func newUint32Value(val uint32, p *uint32) *uint32Value { + *p = val + return (*uint32Value)(p) +} + +func (i *uint32Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 32) + *i = uint32Value(v) + return err +} + +func (i *uint32Value) Type() string { + return "uint32" +} + +func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) } + +func uint32Conv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 32) + if err != nil { + return 0, err + } + return uint32(v), nil +} + +// GetUint32 return the uint32 value of a flag with the given name +func (f *FlagSet) GetUint32(name string) (uint32, error) { + val, err := f.getFlagType(name, "uint32", uint32Conv) + if err != nil { + return 0, err + } + return val.(uint32), nil +} + +// Uint32Var defines a uint32 flag with specified name, default value, and usage string. +// The argument p points to a uint32 variable in which to store the value of the flag. +func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) { + f.VarP(newUint32Value(value, p), name, "", usage) +} + +// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { + f.VarP(newUint32Value(value, p), name, shorthand, usage) +} + +// Uint32Var defines a uint32 flag with specified name, default value, and usage string. +// The argument p points to a uint32 variable in which to store the value of the flag. +func Uint32Var(p *uint32, name string, value uint32, usage string) { + CommandLine.VarP(newUint32Value(value, p), name, "", usage) +} + +// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. +func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { + CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage) +} + +// Uint32 defines a uint32 flag with specified name, default value, and usage string. +// The return value is the address of a uint32 variable that stores the value of the flag. +func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 { + p := new(uint32) + f.Uint32VarP(p, name, "", value, usage) + return p +} + +// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 { + p := new(uint32) + f.Uint32VarP(p, name, shorthand, value, usage) + return p +} + +// Uint32 defines a uint32 flag with specified name, default value, and usage string. +// The return value is the address of a uint32 variable that stores the value of the flag. +func Uint32(name string, value uint32, usage string) *uint32 { + return CommandLine.Uint32P(name, "", value, usage) +} + +// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. +func Uint32P(name, shorthand string, value uint32, usage string) *uint32 { + return CommandLine.Uint32P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint64.go b/vendor/github.com/spf13/pflag/uint64.go new file mode 100644 index 0000000..f62240f --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint64.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- uint64 Value +type uint64Value uint64 + +func newUint64Value(val uint64, p *uint64) *uint64Value { + *p = val + return (*uint64Value)(p) +} + +func (i *uint64Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uint64Value(v) + return err +} + +func (i *uint64Value) Type() string { + return "uint64" +} + +func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) } + +func uint64Conv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 64) + if err != nil { + return 0, err + } + return uint64(v), nil +} + +// GetUint64 return the uint64 value of a flag with the given name +func (f *FlagSet) GetUint64(name string) (uint64, error) { + val, err := f.getFlagType(name, "uint64", uint64Conv) + if err != nil { + return 0, err + } + return val.(uint64), nil +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) { + f.VarP(newUint64Value(value, p), name, "", usage) +} + +// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { + f.VarP(newUint64Value(value, p), name, shorthand, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func Uint64Var(p *uint64, name string, value uint64, usage string) { + CommandLine.VarP(newUint64Value(value, p), name, "", usage) +} + +// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. +func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { + CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage) +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { + p := new(uint64) + f.Uint64VarP(p, name, "", value, usage) + return p +} + +// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 { + p := new(uint64) + f.Uint64VarP(p, name, shorthand, value, usage) + return p +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func Uint64(name string, value uint64, usage string) *uint64 { + return CommandLine.Uint64P(name, "", value, usage) +} + +// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. +func Uint64P(name, shorthand string, value uint64, usage string) *uint64 { + return CommandLine.Uint64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint8.go b/vendor/github.com/spf13/pflag/uint8.go new file mode 100644 index 0000000..bb0e83c --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint8.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- uint8 Value +type uint8Value uint8 + +func newUint8Value(val uint8, p *uint8) *uint8Value { + *p = val + return (*uint8Value)(p) +} + +func (i *uint8Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 8) + *i = uint8Value(v) + return err +} + +func (i *uint8Value) Type() string { + return "uint8" +} + +func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) } + +func uint8Conv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 8) + if err != nil { + return 0, err + } + return uint8(v), nil +} + +// GetUint8 return the uint8 value of a flag with the given name +func (f *FlagSet) GetUint8(name string) (uint8, error) { + val, err := f.getFlagType(name, "uint8", uint8Conv) + if err != nil { + return 0, err + } + return val.(uint8), nil +} + +// Uint8Var defines a uint8 flag with specified name, default value, and usage string. +// The argument p points to a uint8 variable in which to store the value of the flag. +func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) { + f.VarP(newUint8Value(value, p), name, "", usage) +} + +// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { + f.VarP(newUint8Value(value, p), name, shorthand, usage) +} + +// Uint8Var defines a uint8 flag with specified name, default value, and usage string. +// The argument p points to a uint8 variable in which to store the value of the flag. +func Uint8Var(p *uint8, name string, value uint8, usage string) { + CommandLine.VarP(newUint8Value(value, p), name, "", usage) +} + +// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. +func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { + CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage) +} + +// Uint8 defines a uint8 flag with specified name, default value, and usage string. +// The return value is the address of a uint8 variable that stores the value of the flag. +func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 { + p := new(uint8) + f.Uint8VarP(p, name, "", value, usage) + return p +} + +// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 { + p := new(uint8) + f.Uint8VarP(p, name, shorthand, value, usage) + return p +} + +// Uint8 defines a uint8 flag with specified name, default value, and usage string. +// The return value is the address of a uint8 variable that stores the value of the flag. +func Uint8(name string, value uint8, usage string) *uint8 { + return CommandLine.Uint8P(name, "", value, usage) +} + +// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. +func Uint8P(name, shorthand string, value uint8, usage string) *uint8 { + return CommandLine.Uint8P(name, shorthand, value, usage) +} diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/net/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/net/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go new file mode 100644 index 0000000..ea1a7cd --- /dev/null +++ b/vendor/golang.org/x/net/context/context.go @@ -0,0 +1,156 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context + +import "time" + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out chan<- Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go new file mode 100644 index 0000000..f8cda19 --- /dev/null +++ b/vendor/golang.org/x/net/context/go17.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +var ( + todo = context.TODO() + background = context.Background() +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = context.DeadlineExceeded + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + ctx, f := context.WithCancel(parent) + return ctx, CancelFunc(f) +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + ctx, f := context.WithDeadline(parent, deadline) + return ctx, CancelFunc(f) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go new file mode 100644 index 0000000..5a30aca --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -0,0 +1,300 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + *cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/golang.org/x/oauth2/.travis.yml b/vendor/golang.org/x/oauth2/.travis.yml new file mode 100644 index 0000000..a035125 --- /dev/null +++ b/vendor/golang.org/x/oauth2/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.3 + - 1.4 + +install: + - export GOPATH="$HOME/gopath" + - mkdir -p "$GOPATH/src/golang.org/x" + - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2" + - go get -v -t -d golang.org/x/oauth2/... + +script: + - go test -v golang.org/x/oauth2/... diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS new file mode 100644 index 0000000..15167cd --- /dev/null +++ b/vendor/golang.org/x/oauth2/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md new file mode 100644 index 0000000..46aa2b1 --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + + +## Filing issues + +When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. + diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS new file mode 100644 index 0000000..1c4577e --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE new file mode 100644 index 0000000..d02f24f --- /dev/null +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The oauth2 Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md new file mode 100644 index 0000000..0d51417 --- /dev/null +++ b/vendor/golang.org/x/oauth2/README.md @@ -0,0 +1,64 @@ +# OAuth2 for Go + +[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) + +oauth2 package contains a client implementation for OAuth 2.0 spec. + +## Installation + +~~~~ +go get golang.org/x/oauth2 +~~~~ + +See godoc for further documentation and examples. + +* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) +* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) + + +## App Engine + +In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor +of the [`context.Context`](https://golang.org/x/net/context#Context) type from +the `golang.org/x/net/context` package + +This means its no longer possible to use the "Classic App Engine" +`appengine.Context` type with the `oauth2` package. (You're using +Classic App Engine if you import the package `"appengine"`.) + +To work around this, you may use the new `"google.golang.org/appengine"` +package. This package has almost the same API as the `"appengine"` package, +but it can be fetched with `go get` and used on "Managed VMs" and well as +Classic App Engine. + +See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) +for information on updating your app. + +If you don't want to update your entire app to use the new App Engine packages, +you may use both sets of packages in parallel, using only the new packages +with the `oauth2` package. + + import ( + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + newappengine "google.golang.org/appengine" + newurlfetch "google.golang.org/appengine/urlfetch" + + "appengine" + ) + + func handler(w http.ResponseWriter, r *http.Request) { + var c appengine.Context = appengine.NewContext(r) + c.Infof("Logging a message with the old package") + + var ctx context.Context = newappengine.NewContext(r) + client := &http.Client{ + Transport: &oauth2.Transport{ + Source: google.AppEngineTokenSource(ctx, "scope"), + Base: &newurlfetch.Transport{Context: ctx}, + }, + } + client.Get("...") + } + diff --git a/vendor/golang.org/x/oauth2/client_appengine.go b/vendor/golang.org/x/oauth2/client_appengine.go new file mode 100644 index 0000000..8962c49 --- /dev/null +++ b/vendor/golang.org/x/oauth2/client_appengine.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +// App Engine hooks. + +package oauth2 + +import ( + "net/http" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" + "google.golang.org/appengine/urlfetch" +) + +func init() { + internal.RegisterContextClientFunc(contextClientAppEngine) +} + +func contextClientAppEngine(ctx context.Context) (*http.Client, error) { + return urlfetch.Client(ctx), nil +} diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go new file mode 100644 index 0000000..fbe1028 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -0,0 +1,76 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "bufio" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io" + "strings" +) + +// ParseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func ParseKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + return parsed, nil +} + +func ParseINI(ini io.Reader) (map[string]map[string]string, error) { + result := map[string]map[string]string{ + "": map[string]string{}, // root section + } + scanner := bufio.NewScanner(ini) + currentSection := "" + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, ";") { + // comment. + continue + } + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSection = strings.TrimSpace(line[1 : len(line)-1]) + result[currentSection] = map[string]string{} + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 && parts[0] != "" { + result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning ini: %v", err) + } + return result, nil +} + +func CondVal(v string) []string { + if v == "" { + return nil + } + return []string{v} +} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go new file mode 100644 index 0000000..39caf6c --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -0,0 +1,221 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" +) + +// Token represents the crendentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// This type is a mirror of oauth2.Token and exists to break +// an otherwise-circular dependency. Other internal packages +// should convert this Token into an oauth2.Token before use. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time + + // Raw optionally contains extra metadata from the server + // when updating a token. + Raw interface{} +} + +// tokenJSON is the struct representing the HTTP response from OAuth2 +// providers returning a token in JSON form. +type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number + Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in +} + +func (e *tokenJSON) expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + if v := e.Expires; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + +type expirationTime int32 + +func (e *expirationTime) UnmarshalJSON(b []byte) error { + var n json.Number + err := json.Unmarshal(b, &n) + if err != nil { + return err + } + i, err := n.Int64() + if err != nil { + return err + } + *e = expirationTime(i) + return nil +} + +var brokenAuthHeaderProviders = []string{ + "https://accounts.google.com/", + "https://api.dropbox.com/", + "https://api.instagram.com/", + "https://api.netatmo.net/", + "https://api.odnoklassniki.ru/", + "https://api.pushbullet.com/", + "https://api.soundcloud.com/", + "https://api.twitch.tv/", + "https://app.box.com/", + "https://connect.stripe.com/", + "https://login.microsoftonline.com/", + "https://login.salesforce.com/", + "https://oauth.sandbox.trainingpeaks.com/", + "https://oauth.trainingpeaks.com/", + "https://oauth.vk.com/", + "https://slack.com/", + "https://test-sandbox.auth.corp.google.com", + "https://test.salesforce.com/", + "https://user.gini.net/", + "https://www.douban.com/", + "https://www.googleapis.com/", + "https://www.linkedin.com/", + "https://www.strava.com/oauth/", +} + +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL) +} + +// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL +// implements the OAuth2 spec correctly +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +// In summary: +// - Reddit only accepts client secret in the Authorization header +// - Dropbox accepts either it in URL param or Auth header, but not both. +// - Google only accepts URL param (not spec compliant?), not Auth header +// - Stripe only accepts client secret in Auth header with Bearer method, not Basic +func providerAuthHeaderWorks(tokenURL string) bool { + for _, s := range brokenAuthHeaderProviders { + if strings.HasPrefix(tokenURL, s) { + // Some sites fail to implement the OAuth2 spec fully. + return false + } + } + + // Assume the provider implements the spec properly + // otherwise. We can add more exceptions as they're + // discovered. We will _not_ be adding configurable hooks + // to this package to let users select server bugs. + return true +} + +func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, v url.Values) (*Token, error) { + hc, err := ContextClient(ctx) + if err != nil { + return nil, err + } + v.Set("client_id", ClientID) + bustedAuth := !providerAuthHeaderWorks(TokenURL) + if bustedAuth && ClientSecret != "" { + v.Set("client_secret", ClientSecret) + } + req, err := http.NewRequest("POST", TokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if !bustedAuth { + req.SetBasicAuth(ClientID, ClientSecret) + } + r, err := hc.Do(req) + if err != nil { + return nil, err + } + defer r.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body) + } + + var token *Token + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + vals, err := url.ParseQuery(string(body)) + if err != nil { + return nil, err + } + token = &Token{ + AccessToken: vals.Get("access_token"), + TokenType: vals.Get("token_type"), + RefreshToken: vals.Get("refresh_token"), + Raw: vals, + } + e := vals.Get("expires_in") + if e == "" { + // TODO(jbd): Facebook's OAuth2 implementation is broken and + // returns expires_in field in expires. Remove the fallback to expires, + // when Facebook fixes their implementation. + e = vals.Get("expires") + } + expires, _ := strconv.Atoi(e) + if expires != 0 { + token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) + } + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { + return nil, err + } + token = &Token{ + AccessToken: tj.AccessToken, + TokenType: tj.TokenType, + RefreshToken: tj.RefreshToken, + Expiry: tj.expiry(), + Raw: make(map[string]interface{}), + } + json.Unmarshal(body, &token.Raw) // no error checks for optional fields + } + // Don't overwrite `RefreshToken` with an empty value + // if this was a token refreshing request. + if token.RefreshToken == "" { + token.RefreshToken = v.Get("refresh_token") + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go new file mode 100644 index 0000000..f1f173e --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -0,0 +1,69 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient ContextKey + +// ContextKey is just an empty struct. It exists so HTTPClient can be +// an immutable public variable with a unique type. It's immutable +// because nobody else can create a ContextKey, being unexported. +type ContextKey struct{} + +// ContextClientFunc is a func which tries to return an *http.Client +// given a Context value. If it returns an error, the search stops +// with that error. If it returns (nil, nil), the search continues +// down the list of registered funcs. +type ContextClientFunc func(context.Context) (*http.Client, error) + +var contextClientFuncs []ContextClientFunc + +func RegisterContextClientFunc(fn ContextClientFunc) { + contextClientFuncs = append(contextClientFuncs, fn) +} + +func ContextClient(ctx context.Context) (*http.Client, error) { + if ctx != nil { + if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { + return hc, nil + } + } + for _, fn := range contextClientFuncs { + c, err := fn(ctx) + if err != nil { + return nil, err + } + if c != nil { + return c, nil + } + } + return http.DefaultClient, nil +} + +func ContextTransport(ctx context.Context) http.RoundTripper { + hc, err := ContextClient(ctx) + // This is a rare error case (somebody using nil on App Engine). + if err != nil { + return ErrorTransport{err} + } + return hc.Transport +} + +// ErrorTransport returns the specified error on RoundTrip. +// This RoundTripper should be used in rare error cases where +// error handling can be postponed to response handling time. +type ErrorTransport struct{ Err error } + +func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) { + return nil, t.Err +} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go new file mode 100644 index 0000000..9b7b977 --- /dev/null +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -0,0 +1,337 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package oauth2 provides support for making +// OAuth2 authorized and authenticated HTTP requests. +// It can additionally grant authorization with Bearer JWT. +package oauth2 + +import ( + "bytes" + "errors" + "net/http" + "net/url" + "strings" + "sync" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// NoContext is the default context you should supply if not using +// your own context.Context (see https://golang.org/x/net/context). +var NoContext = context.TODO() + +// RegisterBrokenAuthHeaderProvider registers an OAuth2 server +// identified by the tokenURL prefix as an OAuth2 implementation +// which doesn't support the HTTP Basic authentication +// scheme to authenticate with the authorization server. +// Once a server is registered, credentials (client_id and client_secret) +// will be passed as query parameters rather than being present +// in the Authorization header. +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + internal.RegisterBrokenAuthHeaderProvider(tokenURL) +} + +// Config describes a typical 3-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // Endpoint contains the resource server's token endpoint + // URLs. These are constants specific to each server and are + // often available via site-specific packages, such as + // google.Endpoint or github.Endpoint. + Endpoint Endpoint + + // RedirectURL is the URL to redirect users going through + // the OAuth flow, after the resource owner's URLs. + RedirectURL string + + // Scope specifies optional requested permissions. + Scopes []string +} + +// A TokenSource is anything that can return a token. +type TokenSource interface { + // Token returns a token or an error. + // Token must be safe for concurrent use by multiple goroutines. + // The returned Token must not be modified. + Token() (*Token, error) +} + +// Endpoint contains the OAuth 2.0 provider's authorization and token +// endpoint URLs. +type Endpoint struct { + AuthURL string + TokenURL string +} + +var ( + // AccessTypeOnline and AccessTypeOffline are options passed + // to the Options.AuthCodeURL method. They modify the + // "access_type" field that gets sent in the URL returned by + // AuthCodeURL. + // + // Online is the default if neither is specified. If your + // application needs to refresh access tokens when the user + // is not present at the browser, then use offline. This will + // result in your application obtaining a refresh token the + // first time your application exchanges an authorization + // code for a user. + AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") + AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") + + // ApprovalForce forces the users to view the consent dialog + // and confirm the permissions request at the URL returned + // from AuthCodeURL, even if they've already done so. + ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") +) + +// An AuthCodeOption is passed to Config.AuthCodeURL. +type AuthCodeOption interface { + setValue(url.Values) +} + +type setParam struct{ k, v string } + +func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } + +// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// to a provider's authorization endpoint. +func SetAuthURLParam(key, value string) AuthCodeOption { + return setParam{key, value} +} + +// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page +// that asks for permissions for the required scopes explicitly. +// +// State is a token to protect the user from CSRF attacks. You must +// always provide a non-zero string and validate that it matches the +// the state query parameter on your redirect callback. +// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// +// Opts may include AccessTypeOnline or AccessTypeOffline, as well +// as ApprovalForce. +func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { + var buf bytes.Buffer + buf.WriteString(c.Endpoint.AuthURL) + v := url.Values{ + "response_type": {"code"}, + "client_id": {c.ClientID}, + "redirect_uri": internal.CondVal(c.RedirectURL), + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + "state": internal.CondVal(state), + } + for _, opt := range opts { + opt.setValue(v) + } + if strings.Contains(c.Endpoint.AuthURL, "?") { + buf.WriteByte('&') + } else { + buf.WriteByte('?') + } + buf.WriteString(v.Encode()) + return buf.String() +} + +// PasswordCredentialsToken converts a resource owner username and password +// pair into a token. +// +// Per the RFC, this grant type should only be used "when there is a high +// degree of trust between the resource owner and the client (e.g., the client +// is part of the device operating system or a highly privileged application), +// and when other authorization grant types are not available." +// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. +// +// The HTTP client to use is derived from the context. +// If nil, http.DefaultClient is used. +func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"password"}, + "username": {username}, + "password": {password}, + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Exchange converts an authorization code into a token. +// +// It is used after a resource provider redirects the user back +// to the Redirect URI (the URL obtained from AuthCodeURL). +// +// The HTTP client to use is derived from the context. +// If a client is not provided via the context, http.DefaultClient is used. +// +// The code will be in the *http.Request.FormValue("code"). Before +// calling Exchange, be sure to validate FormValue("state"). +func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"authorization_code"}, + "code": {code}, + "redirect_uri": internal.CondVal(c.RedirectURL), + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. The underlying +// HTTP transport will be obtained using the provided context. +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context, t *Token) *http.Client { + return NewClient(ctx, c.TokenSource(ctx, t)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { + tkr := &tokenRefresher{ + ctx: ctx, + conf: c, + } + if t != nil { + tkr.refreshToken = t.RefreshToken + } + return &reuseTokenSource{ + t: t, + new: tkr, + } +} + +// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// HTTP requests to renew a token using a RefreshToken. +type tokenRefresher struct { + ctx context.Context // used to get HTTP requests + conf *Config + refreshToken string +} + +// WARNING: Token is not safe for concurrent access, as it +// updates the tokenRefresher's refreshToken field. +// Within this package, it is used by reuseTokenSource which +// synchronizes calls to this method with its own mutex. +func (tf *tokenRefresher) Token() (*Token, error) { + if tf.refreshToken == "" { + return nil, errors.New("oauth2: token expired and refresh token is not set") + } + + tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ + "grant_type": {"refresh_token"}, + "refresh_token": {tf.refreshToken}, + }) + + if err != nil { + return nil, err + } + if tf.refreshToken != tk.RefreshToken { + tf.refreshToken = tk.RefreshToken + } + return tk, err +} + +// reuseTokenSource is a TokenSource that holds a single token in memory +// and validates its expiry before each call to retrieve it with +// Token. If it's expired, it will be auto-refreshed using the +// new TokenSource. +type reuseTokenSource struct { + new TokenSource // called when t is expired. + + mu sync.Mutex // guards t + t *Token +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *reuseTokenSource) Token() (*Token, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.t.Valid() { + return s.t, nil + } + t, err := s.new.Token() + if err != nil { + return nil, err + } + s.t = t + return t, nil +} + +// StaticTokenSource returns a TokenSource that always returns the same token. +// Because the provided token t is never refreshed, StaticTokenSource is only +// useful for tokens that never expire. +func StaticTokenSource(t *Token) TokenSource { + return staticTokenSource{t} +} + +// staticTokenSource is a TokenSource that always returns the same Token. +type staticTokenSource struct { + t *Token +} + +func (s staticTokenSource) Token() (*Token, error) { + return s.t, nil +} + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient internal.ContextKey + +// NewClient creates an *http.Client from a Context and TokenSource. +// The returned client is not valid beyond the lifetime of the context. +// +// As a special case, if src is nil, a non-OAuth2 client is returned +// using the provided context. This exists to support related OAuth2 +// packages. +func NewClient(ctx context.Context, src TokenSource) *http.Client { + if src == nil { + c, err := internal.ContextClient(ctx) + if err != nil { + return &http.Client{Transport: internal.ErrorTransport{err}} + } + return c + } + return &http.Client{ + Transport: &Transport{ + Base: internal.ContextTransport(ctx), + Source: ReuseTokenSource(nil, src), + }, + } +} + +// ReuseTokenSource returns a TokenSource which repeatedly returns the +// same token as long as it's valid, starting with t. +// When its cached token is invalid, a new token is obtained from src. +// +// ReuseTokenSource is typically used to reuse tokens from a cache +// (such as a file on disk) between runs of a program, rather than +// obtaining new tokens unnecessarily. +// +// The initial token t may be nil, in which case the TokenSource is +// wrapped in a caching version if it isn't one already. This also +// means it's always safe to wrap ReuseTokenSource around any other +// TokenSource without adverse effects. +func ReuseTokenSource(t *Token, src TokenSource) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly. + return rt + } + src = rt.new + } + return &reuseTokenSource{ + t: t, + new: src, + } +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go new file mode 100644 index 0000000..7a3167f --- /dev/null +++ b/vendor/golang.org/x/oauth2/token.go @@ -0,0 +1,158 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// expiryDelta determines how earlier a token should be considered +// expired than its actual expiration time. It is used to avoid late +// expirations due to client-server time mismatches. +const expiryDelta = 10 * time.Second + +// Token represents the crendentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// Most users of this package should not access fields of Token +// directly. They're exported mostly for use by related packages +// implementing derivative OAuth2 flows. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string `json:"access_token"` + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string `json:"token_type,omitempty"` + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string `json:"refresh_token,omitempty"` + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time `json:"expiry,omitempty"` + + // raw optionally contains extra metadata from the server + // when updating a token. + raw interface{} +} + +// Type returns t.TokenType if non-empty, else "Bearer". +func (t *Token) Type() string { + if strings.EqualFold(t.TokenType, "bearer") { + return "Bearer" + } + if strings.EqualFold(t.TokenType, "mac") { + return "MAC" + } + if strings.EqualFold(t.TokenType, "basic") { + return "Basic" + } + if t.TokenType != "" { + return t.TokenType + } + return "Bearer" +} + +// SetAuthHeader sets the Authorization header to r using the access +// token in t. +// +// This method is unnecessary when using Transport or an HTTP Client +// returned by this package. +func (t *Token) SetAuthHeader(r *http.Request) { + r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) +} + +// WithExtra returns a new Token that's a clone of t, but using the +// provided raw extra map. This is only intended for use by packages +// implementing derivative OAuth2 flows. +func (t *Token) WithExtra(extra interface{}) *Token { + t2 := new(Token) + *t2 = *t + t2.raw = extra + return t2 +} + +// Extra returns an extra field. +// Extra fields are key-value pairs returned by the server as a +// part of the token retrieval response. +func (t *Token) Extra(key string) interface{} { + if raw, ok := t.raw.(map[string]interface{}); ok { + return raw[key] + } + + vals, ok := t.raw.(url.Values) + if !ok { + return nil + } + + v := vals.Get(key) + switch s := strings.TrimSpace(v); strings.Count(s, ".") { + case 0: // Contains no "."; try to parse as int + if i, err := strconv.ParseInt(s, 10, 64); err == nil { + return i + } + case 1: // Contains a single "."; try to parse as float + if f, err := strconv.ParseFloat(s, 64); err == nil { + return f + } + } + + return v +} + +// expired reports whether the token is expired. +// t must be non-nil. +func (t *Token) expired() bool { + if t.Expiry.IsZero() { + return false + } + return t.Expiry.Add(-expiryDelta).Before(time.Now()) +} + +// Valid reports whether t is non-nil, has an AccessToken, and is not expired. +func (t *Token) Valid() bool { + return t != nil && t.AccessToken != "" && !t.expired() +} + +// tokenFromInternal maps an *internal.Token struct into +// a *Token struct. +func tokenFromInternal(t *internal.Token) *Token { + if t == nil { + return nil + } + return &Token{ + AccessToken: t.AccessToken, + TokenType: t.TokenType, + RefreshToken: t.RefreshToken, + Expiry: t.Expiry, + raw: t.Raw, + } +} + +// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. +// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along +// with an error.. +func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v) + if err != nil { + return nil, err + } + return tokenFromInternal(tk), nil +} diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go new file mode 100644 index 0000000..92ac7e2 --- /dev/null +++ b/vendor/golang.org/x/oauth2/transport.go @@ -0,0 +1,132 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "errors" + "io" + "net/http" + "sync" +) + +// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, +// wrapping a base RoundTripper and adding an Authorization header +// with a token from the supplied Sources. +// +// Transport is a low-level mechanism. Most code will use the +// higher-level Config.Client method instead. +type Transport struct { + // Source supplies the token to add to outgoing requests' + // Authorization headers. + Source TokenSource + + // Base is the base RoundTripper used to make HTTP requests. + // If nil, http.DefaultTransport is used. + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token. If no token exists or token is expired, +// tries to refresh/fetch a new token. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.Source == nil { + return nil, errors.New("oauth2: Transport's Source is nil") + } + token, err := t.Source.Token() + if err != nil { + return nil, err + } + + req2 := cloneRequest(req) // per RoundTripper contract + token.SetAuthHeader(req2) + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *Transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml new file mode 100644 index 0000000..004172a --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.4 + - 1.5 + - 1.6 + - tip + +go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE new file mode 100644 index 0000000..866d74a --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml new file mode 100644 index 0000000..8da58fb --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md new file mode 100644 index 0000000..1884de6 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/README.md @@ -0,0 +1,131 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go new file mode 100644 index 0000000..95ec014 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -0,0 +1,742 @@ +package yaml + +import ( + "io" + "os" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// File read handler. +func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_file.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_file_read_handler + parser.input_file = file +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + return true +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// File write handler. +func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_file.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_file_write_handler + emitter.output_file = file +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } + return true +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } + return true +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } + return true +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } + return true +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } + return true +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compliler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go new file mode 100644 index 0000000..085cddc --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -0,0 +1,683 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + + if len(b) == 0 { + b = []byte{'\n'} + } + + yaml_parser_set_input_string(&p.parser, b) + + p.skip() + if p.event.typ != yaml_STREAM_START_EVENT { + panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return &p +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +func (p *parser) skip() { + if p.event.typ != yaml_NO_EVENT { + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + yaml_event_delete(&p.event) + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + switch p.event.typ { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) + } + panic("unreachable") +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.skip() + n.children = append(n.children, p.parse()) + if p.event.typ != yaml_DOCUMENT_END_EVENT { + panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + p.skip() + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.skip() + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.skip() + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.skip() + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[string]bool + mapType reflect.Type + terrors []string +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() +) + +func newDecoder() *decoder { + d := &decoder{mapType: defaultMapType} + d.aliases = make(map[string]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + an, ok := d.doc.anchors[n.value] + if !ok { + failf("unknown anchor '%s' referenced", n.value) + } + if d.aliases[n.value] { + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n.value] = true + good = d.unmarshal(an, out) + delete(d.aliases, n.value) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if s, ok := resolved.(string); ok && out.CanAddr() { + if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { + err := u.UnmarshalText([]byte(s)) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + good = true + } else if resolved != nil { + out.SetString(n.value) + good = true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else { + out.Set(reflect.ValueOf(resolved)) + } + good = true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + good = true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + good = true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + good = true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + good = true + case int64: + out.SetFloat(float64(resolved)) + good = true + case uint64: + out.SetFloat(float64(resolved)) + good = true + case float64: + out.SetFloat(resolved) + good = true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + good = true + } + } + if !good { + d.terror(n, tag, out) + } + return good +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + out.Set(out.Slice(0, j)) + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + out.SetMapIndex(k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + inlineMap.SetMapIndex(name, value) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + an, ok := d.doc.anchors[n.value] + if ok && an.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + an, ok := d.doc.anchors[ni.value] + if ok && an.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go new file mode 100644 index 0000000..2befd55 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") + } + return false +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an achor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceeded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceeded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceeded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceeded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go new file mode 100644 index 0000000..84f8499 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/encode.go @@ -0,0 +1,306 @@ +package yaml + +import ( + "encoding" + "fmt" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool +} + +func newEncoder() (e *encoder) { + e = &encoder{} + e.must(yaml_emitter_initialize(&e.emitter)) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) + e.emit() + e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) + e.emit() + return e +} + +func (e *encoder) finish() { + e.must(yaml_document_end_event_initialize(&e.event, true)) + e.emit() + e.emitter.open_ended = false + e.must(yaml_stream_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { + e.must(false) + } +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() { + e.nilv() + return + } + iface := in.Interface() + if m, ok := iface.(Marshaler); ok { + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + } else if m, ok := iface.(encoding.TextMarshaler); ok { + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + } + switch in.Kind() { + case reflect.Interface: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + f() + e.must(yaml_mapping_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + rtag, rs := resolve("", s) + if rtag == yaml_BINARY_TAG { + if tag == "" || tag == yaml_STR_TAG { + tag = rtag + s = rs.(string) + } else if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } else { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + } + if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else if strings.Contains(s, "\n") { + style = yaml_LITERAL_SCALAR_STYLE + } else { + style = yaml_PLAIN_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // FIXME: Handle 64 bits here. + s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go new file mode 100644 index 0000000..0a7037a --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/parserc.go @@ -0,0 +1,1096 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } + return false +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go new file mode 100644 index 0000000..f450791 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/readerc.go @@ -0,0 +1,394 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go new file mode 100644 index 0000000..93a8632 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -0,0 +1,203 @@ +package yaml + +import ( + "encoding/base64" + "math" + "strconv" + "strings" + "unicode/utf8" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: + return true + } + return false +} + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt(plain[3:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, -int(intv) + } else { + return yaml_INT_TAG, -intv + } + } + } + // XXX Handle timestamps here. + + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + if tag == yaml_BINARY_TAG { + return yaml_BINARY_TAG, in + } + if utf8.ValidString(in) { + return yaml_STR_TAG, in + } + return yaml_BINARY_TAG, encodeBase64(in) +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go new file mode 100644 index 0000000..2580800 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -0,0 +1,2710 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // A simple key is required only when it is the first token in the current + // line. Therefore it is always allowed. But we add a check anyway. + if required && !parser.simple_key_allowed { + panic("should not happen") + } + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && !(s[0] == '!' && s[1] == 0) { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the tag is non-empty. + if len(s) == 0 { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". + if parser.flow_level > 0 && + parser.buffer[parser.buffer_pos] == ':' && + !is_blankz(parser.buffer, parser.buffer_pos+1) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found unexpected ':'") + return false + } + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab character that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violate indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go new file mode 100644 index 0000000..5958822 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/sorter.go @@ -0,0 +1,104 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go new file mode 100644 index 0000000..190362f --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/writerc.go @@ -0,0 +1,89 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + // If the output encoding is UTF-8, we don't need to recode the buffer. + if emitter.encoding == yaml_UTF8_ENCODING { + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true + } + + // Recode the buffer into the raw buffer. + var low, high int + if emitter.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + high, low = 1, 0 + } + + pos := 0 + for pos < emitter.buffer_pos { + // See the "reader.c" code for more details on UTF-8 encoding. Note + // that we assume that the buffer contains a valid UTF-8 sequence. + + // Read the next UTF-8 character. + octet := emitter.buffer[pos] + + var w int + var value rune + switch { + case octet&0x80 == 0x00: + w, value = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, value = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, value = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, value = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = emitter.buffer[pos+k] + value = (value << 6) + (rune(octet) & 0x3F) + } + pos += w + + // Write the character. + if value < 0x10000 { + var b [2]byte + b[high] = byte(value >> 8) + b[low] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) + } else { + // Write the character using a surrogate pair (check "reader.c"). + var b [4]byte + value -= 0x10000 + b[high] = byte(0xD8 + (value >> 18)) + b[low] = byte((value >> 10) & 0xFF) + b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) + b[low+2] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) + } + } + + // Write the raw buffer. + if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + emitter.raw_buffer = emitter.raw_buffer[:0] + return true +} diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go new file mode 100644 index 0000000..36d6b88 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -0,0 +1,346 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only unmarshalled if they are exported (have an upper case +// first letter), and are unmarshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Does not apply to zero valued structs. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int "a,omitempty" +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshal("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go new file mode 100644 index 0000000..d60a6b6 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlh.go @@ -0,0 +1,716 @@ +package yaml + +import ( + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occured. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_file io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_file io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go new file mode 100644 index 0000000..8110ce3 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +}