1
0
Fork 0
mirror of https://github.com/Luzifer/expose.git synced 2024-12-22 19:01:21 +00:00

Switch to dep for vendoring

Signed-off-by: Knut Ahlers <knut@ahlers.me>
This commit is contained in:
Knut Ahlers 2018-09-17 21:32:52 +02:00
parent 54c5b7b597
commit 360c8ec055
Signed by: luzifer
GPG key ID: DC2729FDD34BE99E
71 changed files with 11659 additions and 2653 deletions

28
Godeps/Godeps.json generated
View file

@ -1,28 +0,0 @@
{
"ImportPath": "github.com/Luzifer/expose",
"GoVersion": "go1.8",
"GodepVersion": "v79",
"Deps": [
{
"ImportPath": "github.com/inconshreveable/mousetrap",
"Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
},
{
"ImportPath": "github.com/mattn/go-runewidth",
"Comment": "v0.0.2-1-g14207d2",
"Rev": "14207d285c6c197daabb5c9793d63e7af9ab2d50"
},
{
"ImportPath": "github.com/olekukonko/tablewriter",
"Rev": "febf2d34b54a69ce7530036c7503b1c9fbfdf0bb"
},
{
"ImportPath": "github.com/spf13/cobra",
"Rev": "9c28e4bbd74e5c3ed7aacbc552b2cab7cfdfe744"
},
{
"ImportPath": "github.com/spf13/pflag",
"Rev": "c7e63cf4530bcd3ba943729cee0efeff2ebea63f"
}
]
}

5
Godeps/Readme generated
View file

@ -1,5 +0,0 @@
This directory tree is generated automatically by godep.
Please do not edit.
See https://github.com/tools/godep for more information.

78
Gopkg.lock generated Normal file
View file

@ -0,0 +1,78 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
digest = "1:e071d88fce5f456a724ee19e9724ee37940ad0bfd378e11c943c2483032d9ba1"
name = "github.com/Luzifer/go_helpers"
packages = [
"accessLogger",
"http",
"str",
]
pruneopts = "NUT"
revision = "c3bea85c97943065c31d13b2193a9ef8c8488fb2"
version = "v2.8.0"
[[projects]]
digest = "1:406338ad39ab2e37b7f4452906442a3dbf0eb3379dd1f06aafb5c07e769a5fbb"
name = "github.com/inconshreveable/mousetrap"
packages = ["."]
pruneopts = "NUT"
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
version = "v1.0"
[[projects]]
digest = "1:bff482b22ebed387378546ba6a7850fdef87fd47f8ee58a7c62124a8e889a56b"
name = "github.com/mattn/go-runewidth"
packages = ["."]
pruneopts = "NUT"
revision = "ce7b0b5c7b45a81508558cd1dba6bb1e4ddb51bb"
version = "v0.0.3"
[[projects]]
branch = "master"
digest = "1:e6baebf0bd20950e285254902a8f2241b4870528fb63fdc10460c67b1f31b1a3"
name = "github.com/olekukonko/tablewriter"
packages = ["."]
pruneopts = "NUT"
revision = "be2c049b30ccd4d3fd795d6bf7dce74e42eeedaa"
[[projects]]
digest = "1:343d44e06621142ab09ae0c76c1799104cdfddd3ffb445d78b1adf8dc3ffaf3d"
name = "github.com/spf13/cobra"
packages = ["."]
pruneopts = "NUT"
revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
version = "v0.0.3"
[[projects]]
digest = "1:e3707aeaccd2adc89eba6c062fec72116fe1fc1ba71097da85b4d8ae1668a675"
name = "github.com/spf13/pflag"
packages = ["."]
pruneopts = "NUT"
revision = "9a97c102cda95a86cec2345a6f09f55a939babf5"
version = "v1.0.2"
[[projects]]
branch = "master"
digest = "1:82717a3ee6864b3537c0905fbb07ff9261a1368c7eee4d6281af3496ecf9de8f"
name = "golang.org/x/net"
packages = [
"context",
"webdav",
"webdav/internal/xml",
]
pruneopts = "NUT"
revision = "26e67e76b6c3f6ce91f7c52def5af501b4e0f3a2"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"github.com/Luzifer/go_helpers/http",
"github.com/olekukonko/tablewriter",
"github.com/spf13/cobra",
"golang.org/x/net/webdav",
]
solver-name = "gps-cdcl"
solver-version = 1

47
Gopkg.toml Normal file
View file

@ -0,0 +1,47 @@
# Gopkg.toml example
#
# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
#
# [prune]
# non-go = false
# go-tests = true
# unused-packages = true
[[constraint]]
name = "github.com/Luzifer/go_helpers"
version = "2.8.0"
[[constraint]]
branch = "master"
name = "github.com/olekukonko/tablewriter"
[[constraint]]
name = "github.com/spf13/cobra"
version = "0.0.3"
[[constraint]]
branch = "master"
name = "golang.org/x/net"
[prune]
non-go = true
go-tests = true
unused-packages = true

202
vendor/github.com/Luzifer/go_helpers/LICENSE generated vendored Normal file
View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016- Knut Ahlers <knut@ahlers.me>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -0,0 +1,37 @@
package accessLogger
import (
"fmt"
"net/http"
"strconv"
)
type AccessLogResponseWriter struct {
StatusCode int
Size int
http.ResponseWriter
}
func New(res http.ResponseWriter) *AccessLogResponseWriter {
return &AccessLogResponseWriter{
StatusCode: 200,
Size: 0,
ResponseWriter: res,
}
}
func (a *AccessLogResponseWriter) Write(out []byte) (int, error) {
s, err := a.ResponseWriter.Write(out)
a.Size += s
return s, err
}
func (a *AccessLogResponseWriter) WriteHeader(code int) {
a.StatusCode = code
a.ResponseWriter.WriteHeader(code)
}
func (a *AccessLogResponseWriter) HTTPResponseType() string {
return fmt.Sprintf("%cxx", strconv.FormatInt(int64(a.StatusCode), 10)[0])
}

55
vendor/github.com/Luzifer/go_helpers/http/digest.go generated vendored Normal file
View file

@ -0,0 +1,55 @@
package http
import (
"crypto/md5"
"crypto/rand"
"encoding/hex"
"fmt"
"io"
"net/http"
"strings"
"github.com/Luzifer/go_helpers/str"
)
func GetDigestAuth(resp *http.Response, method, requestPath, user, password string) string {
params := map[string]string{}
for _, part := range strings.Split(resp.Header.Get("Www-Authenticate"), " ") {
if !strings.Contains(part, `="`) {
continue
}
spl := strings.Split(strings.Trim(part, " ,"), "=")
if !str.StringInSlice(spl[0], []string{"nonce", "realm", "qop"}) {
continue
}
params[spl[0]] = strings.Trim(spl[1], `"`)
}
b := make([]byte, 8)
io.ReadFull(rand.Reader, b)
params["cnonce"] = fmt.Sprintf("%x", b)
params["nc"] = "1"
params["uri"] = requestPath
params["username"] = user
params["response"] = getMD5([]string{
getMD5([]string{params["username"], params["realm"], password}),
params["nonce"],
params["nc"],
params["cnonce"],
params["qop"],
getMD5([]string{method, requestPath}),
})
authParts := []string{}
for k, v := range params {
authParts = append(authParts, fmt.Sprintf("%s=%q", k, v))
}
return "Digest " + strings.Join(authParts, ", ")
}
func getMD5(in []string) string {
h := md5.New()
h.Write([]byte(strings.Join(in, ":")))
return hex.EncodeToString(h.Sum(nil))
}

47
vendor/github.com/Luzifer/go_helpers/http/gzip.go generated vendored Normal file
View file

@ -0,0 +1,47 @@
package http
import (
"compress/gzip"
"io"
"net/http"
"strings"
)
type gzipResponseWriter struct {
io.Writer
http.ResponseWriter
}
func (w gzipResponseWriter) Write(b []byte) (int, error) {
return w.Writer.Write(b)
}
// GzipHandler wraps an http.Handler and gzips responses if the client supports it
func GzipHandler(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
handler.ServeHTTP(w, r)
return
}
w.Header().Set("Content-Encoding", "gzip")
gz := gzip.NewWriter(w)
defer gz.Close()
gzw := gzipResponseWriter{Writer: gz, ResponseWriter: w}
handler.ServeHTTP(gzw, r)
})
}
// GzipFunc wraps an http.HandlerFunc and gzips responses if the client supports it
func GzipFunc(f http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
f(w, r)
return
}
w.Header().Set("Content-Encoding", "gzip")
gz := gzip.NewWriter(w)
defer gz.Close()
gzw := gzipResponseWriter{Writer: gz, ResponseWriter: w}
f(gzw, r)
}
}

View file

@ -0,0 +1,52 @@
package http
import (
"log"
"net/http"
"strings"
"time"
"github.com/Luzifer/go_helpers/accessLogger"
)
type HTTPLogHandler struct {
Handler http.Handler
TrustedIPHeaders []string
}
func NewHTTPLogHandler(h http.Handler) http.Handler {
return HTTPLogHandler{
Handler: h,
TrustedIPHeaders: []string{"X-Forwarded-For", "RemoteAddr", "X-Real-IP"},
}
}
func (l HTTPLogHandler) ServeHTTP(res http.ResponseWriter, r *http.Request) {
start := time.Now()
ares := accessLogger.New(res)
l.Handler.ServeHTTP(ares, r)
log.Printf("%s - \"%s %s\" %d %d \"%s\" \"%s\" %s",
l.findIP(r),
r.Method,
r.URL.Path,
ares.StatusCode,
ares.Size,
r.Header.Get("Referer"),
r.Header.Get("User-Agent"),
time.Since(start),
)
}
func (l HTTPLogHandler) findIP(r *http.Request) string {
remoteAddr := strings.SplitN(r.RemoteAddr, ":", 2)[0]
for _, hdr := range l.TrustedIPHeaders {
if value := r.Header.Get(hdr); value != "" {
return strings.SplitN(value, ",", 2)[0]
}
}
return remoteAddr
}

21
vendor/github.com/Luzifer/go_helpers/str/slice.go generated vendored Normal file
View file

@ -0,0 +1,21 @@
package str
// AppendIfMissing adds a string to a slice when it's not present yet
func AppendIfMissing(slice []string, s string) []string {
for _, e := range slice {
if e == s {
return slice
}
}
return append(slice, s)
}
// StringInSlice checks for the existence of a string in the slice
func StringInSlice(a string, list []string) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}

View file

@ -1,23 +0,0 @@
# mousetrap
mousetrap is a tiny library that answers a single question.
On a Windows machine, was the process invoked by someone double clicking on
the executable file while browsing in explorer?
### Motivation
Windows developers unfamiliar with command line tools will often "double-click"
the executable for a tool. Because most CLI tools print the help and then exit
when invoked without arguments, this is often very frustrating for those users.
mousetrap provides a way to detect these invocations so that you can provide
more helpful behavior and instructions on how to run the CLI tool. To see what
this looks like, both from an organizational and a technical perspective, see
https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/
### The interface
The library exposes a single interface:
func StartedByExplorer() (bool)

View file

@ -1,8 +0,0 @@
language: go
go:
- tip
before_install:
- go get github.com/mattn/goveralls
- go get golang.org/x/tools/cmd/cover
script:
- $HOME/gopath/bin/goveralls -repotoken lAKAWPzcGsD3A8yBX3BGGtRUdJ6CaGERL

View file

@ -1,27 +0,0 @@
go-runewidth
============
[![Build Status](https://travis-ci.org/mattn/go-runewidth.png?branch=master)](https://travis-ci.org/mattn/go-runewidth)
[![Coverage Status](https://coveralls.io/repos/mattn/go-runewidth/badge.png?branch=HEAD)](https://coveralls.io/r/mattn/go-runewidth?branch=HEAD)
[![GoDoc](https://godoc.org/github.com/mattn/go-runewidth?status.svg)](http://godoc.org/github.com/mattn/go-runewidth)
[![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-runewidth)](https://goreportcard.com/report/github.com/mattn/go-runewidth)
Provides functions to get fixed width of the character or string.
Usage
-----
```go
runewidth.StringWidth("つのだ☆HIRO") == 12
```
Author
------
Yasuhiro Matsumoto
License
-------
under the MIT License: http://mattn.mit-license.org/2013

View file

@ -1,13 +1,24 @@
package runewidth
import "os"
var (
// EastAsianWidth will be set true if the current locale is CJK
EastAsianWidth = IsEastAsian()
EastAsianWidth bool
// DefaultCondition is a condition in current locale
DefaultCondition = &Condition{EastAsianWidth}
)
func init() {
env := os.Getenv("RUNEWIDTH_EASTASIAN")
if env == "" {
EastAsianWidth = IsEastAsian()
} else {
EastAsianWidth = env == "1"
}
}
type interval struct {
first rune
last rune
@ -55,6 +66,7 @@ var private = table{
var nonprint = table{
{0x0000, 0x001F}, {0x007F, 0x009F}, {0x00AD, 0x00AD},
{0x070F, 0x070F}, {0x180B, 0x180E}, {0x200B, 0x200F},
{0x2028, 0x2029},
{0x202A, 0x202E}, {0x206A, 0x206F}, {0xD800, 0xDFFF},
{0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFE, 0xFFFF},
}

View file

@ -1,8 +0,0 @@
language: go
go:
- 1.1
- 1.2
- 1.3
- 1.4
- tip

View file

@ -1,204 +0,0 @@
ASCII Table Writer
=========
[![Build Status](https://travis-ci.org/olekukonko/tablewriter.png?branch=master)](https://travis-ci.org/olekukonko/tablewriter) [![Total views](https://sourcegraph.com/api/repos/github.com/olekukonko/tablewriter/counters/views.png)](https://sourcegraph.com/github.com/olekukonko/tablewriter)
Generate ASCII table on the fly ... Installation is simple as
go get github.com/olekukonko/tablewriter
#### Features
- Automatic Padding
- Support Multiple Lines
- Supports Alignment
- Support Custom Separators
- Automatic Alignment of numbers & percentage
- Write directly to http , file etc via `io.Writer`
- Read directly from CSV file
- Optional row line via `SetRowLine`
- Normalise table header
- Make CSV Headers optional
- Enable or disable table border
- Set custom footer support
- Optional identical cells merging
#### Example 1 - Basic
```go
data := [][]string{
[]string{"A", "The Good", "500"},
[]string{"B", "The Very very Bad Man", "288"},
[]string{"C", "The Ugly", "120"},
[]string{"D", "The Gopher", "800"},
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Name", "Sign", "Rating"})
for _, v := range data {
table.Append(v)
}
table.Render() // Send output
```
##### Output 1
```
+------+-----------------------+--------+
| NAME | SIGN | RATING |
+------+-----------------------+--------+
| A | The Good | 500 |
| B | The Very very Bad Man | 288 |
| C | The Ugly | 120 |
| D | The Gopher | 800 |
+------+-----------------------+--------+
```
#### Example 2 - Without Border / Footer / Bulk Append
```go
data := [][]string{
[]string{"1/1/2014", "Domain name", "2233", "$10.98"},
[]string{"1/1/2014", "January Hosting", "2233", "$54.95"},
[]string{"1/4/2014", "February Hosting", "2233", "$51.00"},
[]string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"},
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Date", "Description", "CV2", "Amount"})
table.SetFooter([]string{"", "", "Total", "$146.93"}) // Add Footer
table.SetBorder(false) // Set Border to false
table.AppendBulk(data) // Add Bulk Data
table.Render()
```
##### Output 2
```
DATE | DESCRIPTION | CV2 | AMOUNT
+----------+--------------------------+-------+---------+
1/1/2014 | Domain name | 2233 | $10.98
1/1/2014 | January Hosting | 2233 | $54.95
1/4/2014 | February Hosting | 2233 | $51.00
1/4/2014 | February Extra Bandwidth | 2233 | $30.00
+----------+--------------------------+-------+---------+
TOTAL | $146 93
+-------+---------+
```
#### Example 3 - CSV
```go
table, _ := tablewriter.NewCSV(os.Stdout, "test_info.csv", true)
table.SetAlignment(tablewriter.ALIGN_LEFT) // Set Alignment
table.Render()
```
##### Output 3
```
+----------+--------------+------+-----+---------+----------------+
| FIELD | TYPE | NULL | KEY | DEFAULT | EXTRA |
+----------+--------------+------+-----+---------+----------------+
| user_id | smallint(5) | NO | PRI | NULL | auto_increment |
| username | varchar(10) | NO | | NULL | |
| password | varchar(100) | NO | | NULL | |
+----------+--------------+------+-----+---------+----------------+
```
#### Example 4 - Custom Separator
```go
table, _ := tablewriter.NewCSV(os.Stdout, "test.csv", true)
table.SetRowLine(true) // Enable row line
// Change table lines
table.SetCenterSeparator("*")
table.SetColumnSeparator("‡")
table.SetRowSeparator("-")
table.SetAlignment(tablewriter.ALIGN_LEFT)
table.Render()
```
##### Output 4
```
*------------*-----------*---------*
╪ FIRST NAME ╪ LAST NAME ╪ SSN ╪
*------------*-----------*---------*
╪ John ╪ Barry ╪ 123456 ╪
*------------*-----------*---------*
╪ Kathy ╪ Smith ╪ 687987 ╪
*------------*-----------*---------*
╪ Bob ╪ McCornick ╪ 3979870 ╪
*------------*-----------*---------*
```
##### Example 5 - Markdown Format
```go
data := [][]string{
[]string{"1/1/2014", "Domain name", "2233", "$10.98"},
[]string{"1/1/2014", "January Hosting", "2233", "$54.95"},
[]string{"1/4/2014", "February Hosting", "2233", "$51.00"},
[]string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"},
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Date", "Description", "CV2", "Amount"})
table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})
table.SetCenterSeparator("|")
table.AppendBulk(data) // Add Bulk Data
table.Render()
```
##### Output 5
```
| DATE | DESCRIPTION | CV2 | AMOUNT |
|----------|--------------------------|------|--------|
| 1/1/2014 | Domain name | 2233 | $10.98 |
| 1/1/2014 | January Hosting | 2233 | $54.95 |
| 1/4/2014 | February Hosting | 2233 | $51.00 |
| 1/4/2014 | February Extra Bandwidth | 2233 | $30.00 |
```
#### Example 6 - Identical cells merging
```go
data := [][]string{
[]string{"1/1/2014", "Domain name", "1234", "$10.98"},
[]string{"1/1/2014", "January Hosting", "2345", "$54.95"},
[]string{"1/4/2014", "February Hosting", "3456", "$51.00"},
[]string{"1/4/2014", "February Extra Bandwidth", "4567", "$30.00"},
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Date", "Description", "CV2", "Amount"})
table.SetFooter([]string{"", "", "Total", "$146.93"})
table.SetAutoMergeCells(true)
table.SetRowLine(true)
table.AppendBulk(data)
table.Render()
```
##### Output 6
```
+----------+--------------------------+-------+---------+
| DATE | DESCRIPTION | CV2 | AMOUNT |
+----------+--------------------------+-------+---------+
| 1/1/2014 | Domain name | 1234 | $10.98 |
+ +--------------------------+-------+---------+
| | January Hosting | 2345 | $54.95 |
+----------+--------------------------+-------+---------+
| 1/4/2014 | February Hosting | 3456 | $51.00 |
+ +--------------------------+-------+---------+
| | February Extra Bandwidth | 4567 | $30.00 |
+----------+--------------------------+-------+---------+
| TOTAL | $146 93 |
+----------+--------------------------+-------+---------+
```
#### TODO
- ~~Import Directly from CSV~~ - `done`
- ~~Support for `SetFooter`~~ - `done`
- ~~Support for `SetBorder`~~ - `done`
- ~~Support table with uneven rows~~ - `done`
- Support custom alignment
- General Improvement & Optimisation
- `NewHTML` Parse table from HTML

View file

@ -53,10 +53,13 @@ type Table struct {
lines [][][]string
cs map[int]int
rs map[int]int
headers []string
footers []string
headers [][]string
footers [][]string
caption bool
captionText string
autoFmt bool
autoWrap bool
reflowText bool
mW int
pCenter string
pRow string
@ -72,6 +75,10 @@ type Table struct {
hdrLine bool
borders Border
colSize int
headerParams []string
columnsParams []string
footerParams []string
columnsAlign []int
}
// Start New Table
@ -83,10 +90,13 @@ func NewWriter(writer io.Writer) *Table {
lines: [][][]string{},
cs: make(map[int]int),
rs: make(map[int]int),
headers: []string{},
footers: []string{},
headers: [][]string{},
footers: [][]string{},
caption: false,
captionText: "Table caption.",
autoFmt: true,
autoWrap: true,
reflowText: true,
mW: MAX_ROW_WIDTH,
pCenter: CENTER,
pRow: ROW,
@ -100,12 +110,16 @@ func NewWriter(writer io.Writer) *Table {
rowLine: false,
hdrLine: true,
borders: Border{Left: true, Right: true, Bottom: true, Top: true},
colSize: -1}
colSize: -1,
headerParams: []string{},
columnsParams: []string{},
footerParams: []string{},
columnsAlign: []int{}}
return t
}
// Render table output
func (t Table) Render() {
func (t *Table) Render() {
if t.borders.Top {
t.printLine(true)
}
@ -115,20 +129,27 @@ func (t Table) Render() {
} else {
t.printRows()
}
if !t.rowLine && t.borders.Bottom {
t.printLine(true)
}
t.printFooter()
if t.caption {
t.printCaption()
}
}
const (
headerRowIdx = -1
footerRowIdx = -2
)
// Set table header
func (t *Table) SetHeader(keys []string) {
t.colSize = len(keys)
for i, v := range keys {
t.parseDimension(v, i, -1)
t.headers = append(t.headers, v)
lines := t.parseDimension(v, i, headerRowIdx)
t.headers = append(t.headers, lines)
}
}
@ -136,8 +157,16 @@ func (t *Table) SetHeader(keys []string) {
func (t *Table) SetFooter(keys []string) {
//t.colSize = len(keys)
for i, v := range keys {
t.parseDimension(v, i, -1)
t.footers = append(t.footers, v)
lines := t.parseDimension(v, i, footerRowIdx)
t.footers = append(t.footers, lines)
}
}
// Set table Caption
func (t *Table) SetCaption(caption bool, captionText ...string) {
t.caption = caption
if len(captionText) == 1 {
t.captionText = captionText[0]
}
}
@ -151,11 +180,21 @@ func (t *Table) SetAutoWrapText(auto bool) {
t.autoWrap = auto
}
// Turn automatic reflowing of multiline text when rewrapping. Default is on (true).
func (t *Table) SetReflowDuringAutoWrap(auto bool) {
t.reflowText = auto
}
// Set the Default column width
func (t *Table) SetColWidth(width int) {
t.mW = width
}
// Set the minimal width for a column
func (t *Table) SetColMinWidth(column int, width int) {
t.cs[column] = width
}
// Set the Column Separator
func (t *Table) SetColumnSeparator(sep string) {
t.pColumn = sep
@ -186,6 +225,22 @@ func (t *Table) SetAlignment(align int) {
t.align = align
}
func (t *Table) SetColumnAlignment(keys []int) {
for _, v := range keys {
switch v {
case ALIGN_CENTER:
break
case ALIGN_LEFT:
break
case ALIGN_RIGHT:
break
default:
v = ALIGN_DEFAULT
}
t.columnsAlign = append(t.columnsAlign, v)
}
}
// Set New Line
func (t *Table) SetNewLine(nl string) {
t.newLine = nl
@ -249,8 +304,23 @@ func (t *Table) AppendBulk(rows [][]string) {
}
}
// NumLines to get the number of lines
func (t *Table) NumLines() int {
return len(t.lines)
}
// Clear rows
func (t *Table) ClearRows() {
t.lines = [][][]string{}
}
// Clear footer
func (t *Table) ClearFooter() {
t.footers = [][]string{}
}
// Print line based on row width
func (t Table) printLine(nl bool) {
func (t *Table) printLine(nl bool) {
fmt.Fprint(t.out, t.pCenter)
for i := 0; i < len(t.cs); i++ {
v := t.cs[i]
@ -266,7 +336,7 @@ func (t Table) printLine(nl bool) {
}
// Print line based on row width with our without cell separator
func (t Table) printLineOptionalCellSeparators(nl bool, displayCellSeparator []bool) {
func (t *Table) printLineOptionalCellSeparators(nl bool, displayCellSeparator []bool) {
fmt.Fprint(t.out, t.pCenter)
for i := 0; i < len(t.cs); i++ {
v := t.cs[i]
@ -303,43 +373,64 @@ func pad(align int) func(string, string, int) string {
}
// Print heading information
func (t Table) printHeading() {
func (t *Table) printHeading() {
// Check if headers is available
if len(t.headers) < 1 {
return
}
// Check if border is set
// Replace with space if not set
fmt.Fprint(t.out, ConditionString(t.borders.Left, t.pColumn, SPACE))
// Identify last column
end := len(t.cs) - 1
// Get pad function
padFunc := pad(t.hAlign)
// Print Heading column
for i := 0; i <= end; i++ {
v := t.cs[i]
h := t.headers[i]
// Checking for ANSI escape sequences for header
is_esc_seq := false
if len(t.headerParams) > 0 {
is_esc_seq = true
}
// Maximum height.
max := t.rs[headerRowIdx]
// Print Heading
for x := 0; x < max; x++ {
// Check if border is set
// Replace with space if not set
fmt.Fprint(t.out, ConditionString(t.borders.Left, t.pColumn, SPACE))
for y := 0; y <= end; y++ {
v := t.cs[y]
h := ""
if y < len(t.headers) && x < len(t.headers[y]) {
h = t.headers[y][x]
}
if t.autoFmt {
h = Title(h)
}
pad := ConditionString((i == end && !t.borders.Left), SPACE, t.pColumn)
pad := ConditionString((y == end && !t.borders.Left), SPACE, t.pColumn)
if is_esc_seq {
fmt.Fprintf(t.out, " %s %s",
format(padFunc(h, SPACE, v),
t.headerParams[y]), pad)
} else {
fmt.Fprintf(t.out, " %s %s",
padFunc(h, SPACE, v),
pad)
}
}
// Next line
fmt.Fprint(t.out, t.newLine)
}
if t.hdrLine {
t.printLine(true)
}
}
// Print heading information
func (t Table) printFooter() {
func (t *Table) printFooter() {
// Check if headers is available
if len(t.footers) < 1 {
return
@ -349,9 +440,6 @@ func (t Table) printFooter() {
if !t.borders.Bottom {
t.printLine(true)
}
// Check if border is set
// Replace with space if not set
fmt.Fprint(t.out, ConditionString(t.borders.Bottom, t.pColumn, SPACE))
// Identify last column
end := len(t.cs) - 1
@ -359,25 +447,56 @@ func (t Table) printFooter() {
// Get pad function
padFunc := pad(t.fAlign)
// Print Heading column
for i := 0; i <= end; i++ {
v := t.cs[i]
f := t.footers[i]
// Checking for ANSI escape sequences for header
is_esc_seq := false
if len(t.footerParams) > 0 {
is_esc_seq = true
}
// Maximum height.
max := t.rs[footerRowIdx]
// Print Footer
erasePad := make([]bool, len(t.footers))
for x := 0; x < max; x++ {
// Check if border is set
// Replace with space if not set
fmt.Fprint(t.out, ConditionString(t.borders.Bottom, t.pColumn, SPACE))
for y := 0; y <= end; y++ {
v := t.cs[y]
f := ""
if y < len(t.footers) && x < len(t.footers[y]) {
f = t.footers[y][x]
}
if t.autoFmt {
f = Title(f)
}
pad := ConditionString((i == end && !t.borders.Top), SPACE, t.pColumn)
pad := ConditionString((y == end && !t.borders.Top), SPACE, t.pColumn)
if len(t.footers[i]) == 0 {
if erasePad[y] || (x == 0 && len(f) == 0) {
pad = SPACE
erasePad[y] = true
}
if is_esc_seq {
fmt.Fprintf(t.out, " %s %s",
format(padFunc(f, SPACE, v),
t.footerParams[y]), pad)
} else {
fmt.Fprintf(t.out, " %s %s",
padFunc(f, SPACE, v),
pad)
}
//fmt.Fprintf(t.out, " %s %s",
// padFunc(f, SPACE, v),
// pad)
}
// Next line
fmt.Fprint(t.out, t.newLine)
//t.printLine(true)
}
hasPrinted := false
@ -385,7 +504,7 @@ func (t Table) printFooter() {
v := t.cs[i]
pad := t.pRow
center := t.pCenter
length := len(t.footers[i])
length := len(t.footers[i][0])
if length > 0 {
hasPrinted = true
@ -413,7 +532,7 @@ func (t Table) printFooter() {
// Change Center start position
if center == SPACE {
if i < end && len(t.footers[i+1]) != 0 {
if i < end && len(t.footers[i+1][0]) != 0 {
center = t.pCenter
}
}
@ -428,22 +547,53 @@ func (t Table) printFooter() {
}
fmt.Fprint(t.out, t.newLine)
}
// Print caption text
func (t Table) printCaption() {
width := t.getTableWidth()
paragraph, _ := WrapString(t.captionText, width)
for linecount := 0; linecount < len(paragraph); linecount++ {
fmt.Fprintln(t.out, paragraph[linecount])
}
}
// Calculate the total number of characters in a row
func (t Table) getTableWidth() int {
var chars int
for _, v := range t.cs {
chars += v
}
// Add chars, spaces, seperators to calculate the total width of the table.
// ncols := t.colSize
// spaces := ncols * 2
// seps := ncols + 1
return (chars + (3 * t.colSize) + 2)
}
func (t Table) printRows() {
for i, lines := range t.lines {
t.printRow(lines, i)
}
}
func (t *Table) fillAlignment(num int) {
if len(t.columnsAlign) < num {
t.columnsAlign = make([]int, num)
for i := range t.columnsAlign {
t.columnsAlign[i] = t.align
}
}
}
// Print Row Information
// Adjust column alignment based on type
func (t Table) printRow(columns [][]string, colKey int) {
func (t *Table) printRow(columns [][]string, rowIdx int) {
// Get Maximum Height
max := t.rs[colKey]
max := t.rs[rowIdx]
total := len(columns)
// TODO Fix uneven col size
@ -455,9 +605,15 @@ func (t Table) printRow(columns [][]string, colKey int) {
//}
// Pad Each Height
// pads := []int{}
pads := []int{}
// Checking for ANSI escape sequences for columns
is_esc_seq := false
if len(t.columnsParams) > 0 {
is_esc_seq = true
}
t.fillAlignment(total)
for i, line := range columns {
length := len(line)
pad := max - length
@ -476,9 +632,14 @@ func (t Table) printRow(columns [][]string, colKey int) {
fmt.Fprintf(t.out, SPACE)
str := columns[y][x]
// Embedding escape sequence with column value
if is_esc_seq {
str = format(str, t.columnsParams[y])
}
// This would print alignment
// Default alignment would use multiple configuration
switch t.align {
switch t.columnsAlign[y] {
case ALIGN_CENTER: //
fmt.Fprintf(t.out, "%s", Pad(str, SPACE, t.cs[y]))
case ALIGN_RIGHT:
@ -514,7 +675,7 @@ func (t Table) printRow(columns [][]string, colKey int) {
}
// Print the rows of the table and merge the cells that are identical
func (t Table) printRowsMergeCells() {
func (t *Table) printRowsMergeCells() {
var previousLine []string
var displayCellBorder []bool
var tmpWriter bytes.Buffer
@ -537,9 +698,9 @@ func (t Table) printRowsMergeCells() {
// Print Row Information to a writer and merge identical cells.
// Adjust column alignment based on type
func (t Table) printRowMergeCells(writer io.Writer, columns [][]string, colKey int, previousLine []string) ([]string, []bool) {
func (t *Table) printRowMergeCells(writer io.Writer, columns [][]string, rowIdx int, previousLine []string) ([]string, []bool) {
// Get Maximum Height
max := t.rs[colKey]
max := t.rs[rowIdx]
total := len(columns)
// Pad Each Height
@ -555,6 +716,7 @@ func (t Table) printRowMergeCells(writer io.Writer, columns [][]string, colKey i
}
var displayCellBorder []bool
t.fillAlignment(total)
for x := 0; x < max; x++ {
for y := 0; y < total; y++ {
@ -580,7 +742,7 @@ func (t Table) printRowMergeCells(writer io.Writer, columns [][]string, colKey i
// This would print alignment
// Default alignment would use multiple configuration
switch t.align {
switch t.columnsAlign[y] {
case ALIGN_CENTER: //
fmt.Fprintf(writer, "%s", Pad(str, SPACE, t.cs[y]))
case ALIGN_RIGHT:
@ -614,43 +776,58 @@ func (t Table) printRowMergeCells(writer io.Writer, columns [][]string, colKey i
func (t *Table) parseDimension(str string, colKey, rowKey int) []string {
var (
raw []string
max int
maxWidth int
)
w := DisplayWidth(str)
// Calculate Width
// Check if with is grater than maximum width
if w > t.mW {
w = t.mW
}
// Check if width exists
v, ok := t.cs[colKey]
if !ok || v < w || v == 0 {
t.cs[colKey] = w
}
if rowKey == -1 {
return raw
}
// Calculate Height
if t.autoWrap {
raw, _ = WrapString(str, t.cs[colKey])
} else {
raw = getLines(str)
}
maxWidth = 0
for _, line := range raw {
if w := DisplayWidth(line); w > max {
max = w
if w := DisplayWidth(line); w > maxWidth {
maxWidth = w
}
}
// Make sure the with is the same length as maximum word
// Important for cases where the width is smaller than maxu word
if max > t.cs[colKey] {
t.cs[colKey] = max
// If wrapping, ensure that all paragraphs in the cell fit in the
// specified width.
if t.autoWrap {
// If there's a maximum allowed width for wrapping, use that.
if maxWidth > t.mW {
maxWidth = t.mW
}
// In the process of doing so, we need to recompute maxWidth. This
// is because perhaps a word in the cell is longer than the
// allowed maximum width in t.mW.
newMaxWidth := maxWidth
newRaw := make([]string, 0, len(raw))
if t.reflowText {
// Make a single paragraph of everything.
raw = []string{strings.Join(raw, " ")}
}
for i, para := range raw {
paraLines, _ := WrapString(para, maxWidth)
for _, line := range paraLines {
if w := DisplayWidth(line); w > newMaxWidth {
newMaxWidth = w
}
}
if i > 0 {
newRaw = append(newRaw, " ")
}
newRaw = append(newRaw, paraLines...)
}
raw = newRaw
maxWidth = newMaxWidth
}
// Store the new known maximum width.
v, ok := t.cs[colKey]
if !ok || v < maxWidth || v == 0 {
t.cs[colKey] = maxWidth
}
// Remember the number of lines for the row printer.
h := len(raw)
v, ok = t.rs[rowKey]

View file

@ -0,0 +1,134 @@
package tablewriter
import (
"fmt"
"strconv"
"strings"
)
const ESC = "\033"
const SEP = ";"
const (
BgBlackColor int = iota + 40
BgRedColor
BgGreenColor
BgYellowColor
BgBlueColor
BgMagentaColor
BgCyanColor
BgWhiteColor
)
const (
FgBlackColor int = iota + 30
FgRedColor
FgGreenColor
FgYellowColor
FgBlueColor
FgMagentaColor
FgCyanColor
FgWhiteColor
)
const (
BgHiBlackColor int = iota + 100
BgHiRedColor
BgHiGreenColor
BgHiYellowColor
BgHiBlueColor
BgHiMagentaColor
BgHiCyanColor
BgHiWhiteColor
)
const (
FgHiBlackColor int = iota + 90
FgHiRedColor
FgHiGreenColor
FgHiYellowColor
FgHiBlueColor
FgHiMagentaColor
FgHiCyanColor
FgHiWhiteColor
)
const (
Normal = 0
Bold = 1
UnderlineSingle = 4
Italic
)
type Colors []int
func startFormat(seq string) string {
return fmt.Sprintf("%s[%sm", ESC, seq)
}
func stopFormat() string {
return fmt.Sprintf("%s[%dm", ESC, Normal)
}
// Making the SGR (Select Graphic Rendition) sequence.
func makeSequence(codes []int) string {
codesInString := []string{}
for _, code := range codes {
codesInString = append(codesInString, strconv.Itoa(code))
}
return strings.Join(codesInString, SEP)
}
// Adding ANSI escape sequences before and after string
func format(s string, codes interface{}) string {
var seq string
switch v := codes.(type) {
case string:
seq = v
case []int:
seq = makeSequence(v)
default:
return s
}
if len(seq) == 0 {
return s
}
return startFormat(seq) + s + stopFormat()
}
// Adding header colors (ANSI codes)
func (t *Table) SetHeaderColor(colors ...Colors) {
if t.colSize != len(colors) {
panic("Number of header colors must be equal to number of headers.")
}
for i := 0; i < len(colors); i++ {
t.headerParams = append(t.headerParams, makeSequence(colors[i]))
}
}
// Adding column colors (ANSI codes)
func (t *Table) SetColumnColor(colors ...Colors) {
if t.colSize != len(colors) {
panic("Number of column colors must be equal to number of headers.")
}
for i := 0; i < len(colors); i++ {
t.columnsParams = append(t.columnsParams, makeSequence(colors[i]))
}
}
// Adding column colors (ANSI codes)
func (t *Table) SetFooterColor(colors ...Colors) {
if len(t.footers) != len(colors) {
panic("Number of footer colors must be equal to number of footer.")
}
for i := 0; i < len(colors); i++ {
t.footerParams = append(t.footerParams, makeSequence(colors[i]))
}
}
func Color(colors ...int) []int {
return colors
}

View file

@ -1,4 +0,0 @@
first_name,last_name,ssn
John,Barry,123456
Kathy,Smith,687987
Bob,McCornick,3979870
1 first_name last_name ssn
2 John Barry 123456
3 Kathy Smith 687987
4 Bob McCornick 3979870

View file

@ -1,4 +0,0 @@
Field,Type,Null,Key,Default,Extra
user_id,smallint(5),NO,PRI,NULL,auto_increment
username,varchar(10),NO,,NULL,
password,varchar(100),NO,,NULL,
1 Field Type Null Key Default Extra
2 user_id smallint(5) NO PRI NULL auto_increment
3 username varchar(10) NO NULL
4 password varchar(100) NO NULL

View file

@ -30,12 +30,33 @@ func ConditionString(cond bool, valid, inValid string) string {
return inValid
}
func isNumOrSpace(r rune) bool {
return ('0' <= r && r <= '9') || r == ' '
}
// Format Table Header
// Replace _ , . and spaces
func Title(name string) string {
name = strings.Replace(name, "_", " ", -1)
name = strings.Replace(name, ".", " ", -1)
origLen := len(name)
rs := []rune(name)
for i, r := range rs {
switch r {
case '_':
rs[i] = ' '
case '.':
// ignore floating number 0.0
if (i != 0 && !isNumOrSpace(rs[i-1])) || (i != len(rs)-1 && !isNumOrSpace(rs[i+1])) {
rs[i] = ' '
}
}
}
name = string(rs)
name = strings.TrimSpace(name)
if len(name) == 0 && origLen > 0 {
// Keep at least one character. This is important to preserve
// empty lines in multi-line headers/footers.
name = " "
}
return strings.ToUpper(name)
}

View file

@ -10,7 +10,8 @@ package tablewriter
import (
"math"
"strings"
"unicode/utf8"
"github.com/mattn/go-runewidth"
)
var (
@ -27,7 +28,7 @@ func WrapString(s string, lim int) ([]string, int) {
var lines []string
max := 0
for _, v := range words {
max = len(v)
max = runewidth.StringWidth(v)
if max > lim {
lim = max
}
@ -55,9 +56,9 @@ func WrapWords(words []string, spc, lim, pen int) [][]string {
length := make([][]int, n)
for i := 0; i < n; i++ {
length[i] = make([]int, n)
length[i][i] = utf8.RuneCountInString(words[i])
length[i][i] = runewidth.StringWidth(words[i])
for j := i + 1; j < n; j++ {
length[i][j] = length[i][j-1] + spc + utf8.RuneCountInString(words[j])
length[i][j] = length[i][j-1] + spc + runewidth.StringWidth(words[j])
}
}
nbrk := make([]int, n)
@ -94,10 +95,5 @@ func WrapWords(words []string, spc, lim, pen int) [][]string {
// getLines decomposes a multiline string into a slice of strings.
func getLines(s string) []string {
var lines []string
for _, line := range strings.Split(s, nl) {
lines = append(lines, line)
}
return lines
return strings.Split(s, nl)
}

View file

@ -1,36 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore
# swap
[._]*.s[a-w][a-z]
[._]s[a-w][a-z]
# session
Session.vim
# temporary
.netrwhist
*~
# auto-generated tag files
tags
*.exe
cobra.test

View file

@ -1,3 +0,0 @@
Steve Francia <steve.francia@gmail.com>
Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
Fabiano Franz <ffranz@redhat.com> <contact@fabianofranz.com>

View file

@ -1,24 +0,0 @@
language: go
matrix:
include:
- go: 1.4.3
env: NOVET=true # No bundled vet.
- go: 1.5.4
- go: 1.6.3
- go: 1.7
- go: tip
allow_failures:
- go: tip
before_install:
- mkdir -p bin
- curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.4.3/shellcheck
- chmod +x bin/shellcheck
script:
- PATH=$PATH:$PWD/bin go test -v ./...
- go build
- diff -u <(echo -n) <(gofmt -d -s .)
- if [ -z $NOVET ]; then
diff -u <(echo -n) <(go tool vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint');
fi

View file

@ -1,898 +0,0 @@
![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png)
Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files.
Many of the most widely used Go projects are built using Cobra including:
* [Kubernetes](http://kubernetes.io/)
* [Hugo](http://gohugo.io)
* [rkt](https://github.com/coreos/rkt)
* [etcd](https://github.com/coreos/etcd)
* [Docker (distribution)](https://github.com/docker/distribution)
* [OpenShift](https://www.openshift.com/)
* [Delve](https://github.com/derekparker/delve)
* [GopherJS](http://www.gopherjs.org/)
* [CockroachDB](http://www.cockroachlabs.com/)
* [Bleve](http://www.blevesearch.com/)
* [ProjectAtomic (enterprise)](http://www.projectatomic.io/)
* [Parse (CLI)](https://parse.com/)
* [GiantSwarm's swarm](https://github.com/giantswarm/cli)
* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack)
[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra)
[![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra)
[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra)
![cobra](https://cloud.githubusercontent.com/assets/173412/10911369/84832a8e-8212-11e5-9f82-cc96660a4794.gif)
# Overview
Cobra is a library providing a simple interface to create powerful modern CLI
interfaces similar to git & go tools.
Cobra is also an application that will generate your application scaffolding to rapidly
develop a Cobra-based application.
Cobra provides:
* Easy subcommand-based CLIs: `app server`, `app fetch`, etc.
* Fully POSIX-compliant flags (including short & long versions)
* Nested subcommands
* Global, local and cascading flags
* Easy generation of applications & commands with `cobra create appname` & `cobra add cmdname`
* Intelligent suggestions (`app srver`... did you mean `app server`?)
* Automatic help generation for commands and flags
* Automatic detailed help for `app help [command]`
* Automatic help flag recognition of `-h`, `--help`, etc.
* Automatically generated bash autocomplete for your application
* Automatically generated man pages for your application
* Command aliases so you can change things without breaking them
* The flexibilty to define your own help, usage, etc.
* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps
Cobra has an exceptionally clean interface and simple design without needless
constructors or initialization methods.
Applications built with Cobra commands are designed to be as user-friendly as
possible. Flags can be placed before or after the command (as long as a
confusing space isnt provided). Both short and long flags can be used. A
command need not even be fully typed. Help is automatically generated and
available for the application or for a specific command using either the help
command or the `--help` flag.
# Concepts
Cobra is built on a structure of commands, arguments & flags.
**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions.
The best applications will read like sentences when used. Users will know how
to use the application because they will natively understand how to use it.
The pattern to follow is
`APPNAME VERB NOUN --ADJECTIVE.`
or
`APPNAME COMMAND ARG --FLAG`
A few good real world examples may better illustrate this point.
In the following example, 'server' is a command, and 'port' is a flag:
> hugo server --port=1313
In this command we are telling Git to clone the url bare.
> git clone URL --bare
## Commands
Command is the central point of the application. Each interaction that
the application supports will be contained in a Command. A command can
have children commands and optionally run an action.
In the example above, 'server' is the command.
A Command has the following structure:
```go
type Command struct {
Use string // The one-line usage message.
Short string // The short description shown in the 'help' output.
Long string // The long message shown in the 'help <this-command>' output.
Run func(cmd *Command, args []string) // Run runs the command.
}
```
## Flags
A Flag is a way to modify the behavior of a command. Cobra supports
fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/).
A Cobra command can define flags that persist through to children commands
and flags that are only available to that command.
In the example above, 'port' is the flag.
Flag functionality is provided by the [pflag
library](https://github.com/ogier/pflag), a fork of the flag standard library
which maintains the same interface while adding POSIX compliance.
## Usage
Cobra works by creating a set of commands and then organizing them into a tree.
The tree defines the structure of the application.
Once each command is defined with its corresponding flags, then the
tree is assigned to the commander which is finally executed.
# Installing
Using Cobra is easy. First, use `go get` to install the latest version
of the library. This command will install the `cobra` generator executible
along with the library:
> go get -v github.com/spf13/cobra/cobra
Next, include Cobra in your application:
```go
import "github.com/spf13/cobra"
```
# Getting Started
While you are welcome to provide your own organization, typically a Cobra based
application will follow the following organizational structure.
```
▾ appName/
▾ cmd/
add.go
your.go
commands.go
here.go
main.go
```
In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra.
```go
package main
import "{pathToYourApp}/cmd"
func main() {
if err := cmd.RootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(-1)
}
}
```
## Using the Cobra Generator
Cobra provides its own program that will create your application and add any
commands you want. It's the easiest way to incorporate Cobra into your application.
In order to use the cobra command, compile it using the following command:
> go install github.com/spf13/cobra/cobra
This will create the cobra executable under your go path bin directory!
### cobra init
The `cobra init [yourApp]` command will create your initial application code
for you. It is a very powerful application that will populate your program with
the right structure so you can immediately enjoy all the benefits of Cobra. It
will also automatically apply the license you specify to your application.
Cobra init is pretty smart. You can provide it a full path, or simply a path
similar to what is expected in the import.
```
cobra init github.com/spf13/newAppName
```
### cobra add
Once an application is initialized Cobra can create additional commands for you.
Let's say you created an app and you wanted the following commands for it:
* app serve
* app config
* app config create
In your project directory (where your main.go file is) you would run the following:
```
cobra add serve
cobra add config
cobra add create -p 'configCmd'
```
Once you have run these three commands you would have an app structure that would look like:
```
▾ app/
▾ cmd/
serve.go
config.go
create.go
main.go
```
at this point you can run `go run main.go` and it would run your app. `go run
main.go serve`, `go run main.go config`, `go run main.go config create` along
with `go run main.go help serve`, etc would all work.
Obviously you haven't added your own code to these yet, the commands are ready
for you to give them their tasks. Have fun.
### Configuring the cobra generator
The cobra generator will be easier to use if you provide a simple configuration
file which will help you eliminate providing a bunch of repeated information in
flags over and over.
An example ~/.cobra.yaml file:
```yaml
author: Steve Francia <spf@spf13.com>
license: MIT
```
You can specify no license by setting `license` to `none` or you can specify
a custom license:
```yaml
license:
header: This file is part of {{ .appName }}.
text: |
{{ .copyright }}
This is my license. There are many like it, but this one is mine.
My license is my best friend. It is my life. I must master it as I must
master my life.
```
## Manually implementing Cobra
To manually implement cobra you need to create a bare main.go file and a RootCmd file.
You will optionally provide additional commands as you see fit.
### Create the root command
The root command represents your binary itself.
#### Manually create rootCmd
Cobra doesn't require any special constructors. Simply create your commands.
Ideally you place this in app/cmd/root.go:
```go
var RootCmd = &cobra.Command{
Use: "hugo",
Short: "Hugo is a very fast static site generator",
Long: `A Fast and Flexible Static Site Generator built with
love by spf13 and friends in Go.
Complete documentation is available at http://hugo.spf13.com`,
Run: func(cmd *cobra.Command, args []string) {
// Do Stuff Here
},
}
```
You will additionally define flags and handle configuration in your init() function.
for example cmd/root.go:
```go
func init() {
cobra.OnInitialize(initConfig)
RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
RootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/")
RootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution")
RootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)")
RootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration")
viper.BindPFlag("author", RootCmd.PersistentFlags().Lookup("author"))
viper.BindPFlag("projectbase", RootCmd.PersistentFlags().Lookup("projectbase"))
viper.BindPFlag("useViper", RootCmd.PersistentFlags().Lookup("viper"))
viper.SetDefault("author", "NAME HERE <EMAIL ADDRESS>")
viper.SetDefault("license", "apache")
}
```
### Create your main.go
With the root command you need to have your main function execute it.
Execute should be run on the root for clarity, though it can be called on any command.
In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra.
```go
package main
import "{pathToYourApp}/cmd"
func main() {
if err := cmd.RootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(-1)
}
}
```
### Create additional commands
Additional commands can be defined and typically are each given their own file
inside of the cmd/ directory.
If you wanted to create a version command you would create cmd/version.go and
populate it with the following:
```go
package cmd
import (
"github.com/spf13/cobra"
)
func init() {
RootCmd.AddCommand(versionCmd)
}
var versionCmd = &cobra.Command{
Use: "version",
Short: "Print the version number of Hugo",
Long: `All software has versions. This is Hugo's`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("Hugo Static Site Generator v0.9 -- HEAD")
},
}
```
### Attach command to its parent
If you notice in the above example we attach the command to its parent. In
this case the parent is the rootCmd. In this example we are attaching it to the
root, but commands can be attached at any level.
```go
RootCmd.AddCommand(versionCmd)
```
### Remove a command from its parent
Removing a command is not a common action in simple programs, but it allows 3rd
parties to customize an existing command tree.
In this example, we remove the existing `VersionCmd` command of an existing
root command, and we replace it with our own version:
```go
mainlib.RootCmd.RemoveCommand(mainlib.VersionCmd)
mainlib.RootCmd.AddCommand(versionCmd)
```
## Working with Flags
Flags provide modifiers to control how the action command operates.
### Assign flags to a command
Since the flags are defined and used in different locations, we need to
define a variable outside with the correct scope to assign the flag to
work with.
```go
var Verbose bool
var Source string
```
There are two different approaches to assign a flag.
### Persistent Flags
A flag can be 'persistent' meaning that this flag will be available to the
command it's assigned to as well as every command under that command. For
global flags, assign a flag as a persistent flag on the root.
```go
RootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output")
```
### Local Flags
A flag can also be assigned locally which will only apply to that specific command.
```go
RootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from")
```
## Example
In the example below, we have defined three commands. Two are at the top level
and one (cmdTimes) is a child of one of the top commands. In this case the root
is not executable meaning that a subcommand is required. This is accomplished
by not providing a 'Run' for the 'rootCmd'.
We have only defined one flag for a single command.
More documentation about flags is available at https://github.com/spf13/pflag
```go
package main
import (
"fmt"
"strings"
"github.com/spf13/cobra"
)
func main() {
var echoTimes int
var cmdPrint = &cobra.Command{
Use: "print [string to print]",
Short: "Print anything to the screen",
Long: `print is for printing anything back to the screen.
For many years people have printed back to the screen.
`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("Print: " + strings.Join(args, " "))
},
}
var cmdEcho = &cobra.Command{
Use: "echo [string to echo]",
Short: "Echo anything to the screen",
Long: `echo is for echoing anything back.
Echo works a lot like print, except it has a child command.
`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("Print: " + strings.Join(args, " "))
},
}
var cmdTimes = &cobra.Command{
Use: "times [# times] [string to echo]",
Short: "Echo anything to the screen more times",
Long: `echo things multiple times back to the user by providing
a count and a string.`,
Run: func(cmd *cobra.Command, args []string) {
for i := 0; i < echoTimes; i++ {
fmt.Println("Echo: " + strings.Join(args, " "))
}
},
}
cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input")
var rootCmd = &cobra.Command{Use: "app"}
rootCmd.AddCommand(cmdPrint, cmdEcho)
cmdEcho.AddCommand(cmdTimes)
rootCmd.Execute()
}
```
For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/).
## The Help Command
Cobra automatically adds a help command to your application when you have subcommands.
This will be called when a user runs 'app help'. Additionally, help will also
support all other commands as input. Say, for instance, you have a command called
'create' without any additional configuration; Cobra will work when 'app help
create' is called. Every command will automatically have the '--help' flag added.
### Example
The following output is automatically generated by Cobra. Nothing beyond the
command and flag definitions are needed.
> hugo help
hugo is the main command, used to build your Hugo site.
Hugo is a Fast and Flexible Static Site Generator
built with love by spf13 and friends in Go.
Complete documentation is available at http://gohugo.io/.
Usage:
hugo [flags]
hugo [command]
Available Commands:
server Hugo runs its own webserver to render the files
version Print the version number of Hugo
config Print the site configuration
check Check content in the source directory
benchmark Benchmark hugo by building a site a number of times.
convert Convert your content to different formats
new Create new content for your site
list Listing out various types of content
undraft Undraft changes the content's draft status from 'True' to 'False'
genautocomplete Generate shell autocompletion script for Hugo
gendoc Generate Markdown documentation for the Hugo CLI.
genman Generate man page for Hugo
import Import your site from others.
Flags:
-b, --baseURL="": hostname (and path) to the root, e.g. http://spf13.com/
-D, --buildDrafts[=false]: include content marked as draft
-F, --buildFuture[=false]: include content with publishdate in the future
--cacheDir="": filesystem path to cache directory. Defaults: $TMPDIR/hugo_cache/
--canonifyURLs[=false]: if true, all relative URLs will be canonicalized using baseURL
--config="": config file (default is path/config.yaml|json|toml)
-d, --destination="": filesystem path to write files to
--disableRSS[=false]: Do not build RSS files
--disableSitemap[=false]: Do not build Sitemap file
--editor="": edit new content with this editor, if provided
--ignoreCache[=false]: Ignores the cache directory for reading but still writes to it
--log[=false]: Enable Logging
--logFile="": Log File path (if set, logging enabled automatically)
--noTimes[=false]: Don't sync modification time of files
--pluralizeListTitles[=true]: Pluralize titles in lists using inflect
--preserveTaxonomyNames[=false]: Preserve taxonomy names as written ("Gérard Depardieu" vs "gerard-depardieu")
-s, --source="": filesystem path to read files relative from
--stepAnalysis[=false]: display memory and timing of different steps of the program
-t, --theme="": theme to use (located in /themes/THEMENAME/)
--uglyURLs[=false]: if true, use /filename.html instead of /filename/
-v, --verbose[=false]: verbose output
--verboseLog[=false]: verbose logging
-w, --watch[=false]: watch filesystem for changes and recreate as needed
Use "hugo [command] --help" for more information about a command.
Help is just a command like any other. There is no special logic or behavior
around it. In fact, you can provide your own if you want.
### Defining your own help
You can provide your own Help command or your own template for the default command to use.
The default help command is
```go
func (c *Command) initHelp() {
if c.helpCommand == nil {
c.helpCommand = &Command{
Use: "help [command]",
Short: "Help about any command",
Long: `Help provides help for any command in the application.
Simply type ` + c.Name() + ` help [path to command] for full details.`,
Run: c.HelpFunc(),
}
}
c.AddCommand(c.helpCommand)
}
```
You can provide your own command, function or template through the following methods:
```go
command.SetHelpCommand(cmd *Command)
command.SetHelpFunc(f func(*Command, []string))
command.SetHelpTemplate(s string)
```
The latter two will also apply to any children commands.
## Usage
When the user provides an invalid flag or invalid command, Cobra responds by
showing the user the 'usage'.
### Example
You may recognize this from the help above. That's because the default help
embeds the usage as part of its output.
Usage:
hugo [flags]
hugo [command]
Available Commands:
server Hugo runs its own webserver to render the files
version Print the version number of Hugo
config Print the site configuration
check Check content in the source directory
benchmark Benchmark hugo by building a site a number of times.
convert Convert your content to different formats
new Create new content for your site
list Listing out various types of content
undraft Undraft changes the content's draft status from 'True' to 'False'
genautocomplete Generate shell autocompletion script for Hugo
gendoc Generate Markdown documentation for the Hugo CLI.
genman Generate man page for Hugo
import Import your site from others.
Flags:
-b, --baseURL="": hostname (and path) to the root, e.g. http://spf13.com/
-D, --buildDrafts[=false]: include content marked as draft
-F, --buildFuture[=false]: include content with publishdate in the future
--cacheDir="": filesystem path to cache directory. Defaults: $TMPDIR/hugo_cache/
--canonifyURLs[=false]: if true, all relative URLs will be canonicalized using baseURL
--config="": config file (default is path/config.yaml|json|toml)
-d, --destination="": filesystem path to write files to
--disableRSS[=false]: Do not build RSS files
--disableSitemap[=false]: Do not build Sitemap file
--editor="": edit new content with this editor, if provided
--ignoreCache[=false]: Ignores the cache directory for reading but still writes to it
--log[=false]: Enable Logging
--logFile="": Log File path (if set, logging enabled automatically)
--noTimes[=false]: Don't sync modification time of files
--pluralizeListTitles[=true]: Pluralize titles in lists using inflect
--preserveTaxonomyNames[=false]: Preserve taxonomy names as written ("Gérard Depardieu" vs "gerard-depardieu")
-s, --source="": filesystem path to read files relative from
--stepAnalysis[=false]: display memory and timing of different steps of the program
-t, --theme="": theme to use (located in /themes/THEMENAME/)
--uglyURLs[=false]: if true, use /filename.html instead of /filename/
-v, --verbose[=false]: verbose output
--verboseLog[=false]: verbose logging
-w, --watch[=false]: watch filesystem for changes and recreate as needed
### Defining your own usage
You can provide your own usage function or template for Cobra to use.
The default usage function is:
```go
return func(c *Command) error {
err := tmpl(c.Out(), c.UsageTemplate(), c)
return err
}
```
Like help, the function and template are overridable through public methods:
```go
command.SetUsageFunc(f func(*Command) error)
command.SetUsageTemplate(s string)
```
## PreRun or PostRun Hooks
It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order:
- `PersistentPreRun`
- `PreRun`
- `Run`
- `PostRun`
- `PersistentPostRun`
An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`:
```go
package main
import (
"fmt"
"github.com/spf13/cobra"
)
func main() {
var rootCmd = &cobra.Command{
Use: "root [sub]",
Short: "My root command",
PersistentPreRun: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args)
},
PreRun: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside rootCmd PreRun with args: %v\n", args)
},
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside rootCmd Run with args: %v\n", args)
},
PostRun: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside rootCmd PostRun with args: %v\n", args)
},
PersistentPostRun: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args)
},
}
var subCmd = &cobra.Command{
Use: "sub [no options!]",
Short: "My subcommand",
PreRun: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside subCmd PreRun with args: %v\n", args)
},
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside subCmd Run with args: %v\n", args)
},
PostRun: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside subCmd PostRun with args: %v\n", args)
},
PersistentPostRun: func(cmd *cobra.Command, args []string) {
fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args)
},
}
rootCmd.AddCommand(subCmd)
rootCmd.SetArgs([]string{""})
_ = rootCmd.Execute()
fmt.Print("\n")
rootCmd.SetArgs([]string{"sub", "arg1", "arg2"})
_ = rootCmd.Execute()
}
```
## Alternative Error Handling
Cobra also has functions where the return signature is an error. This allows for errors to bubble up to the top,
providing a way to handle the errors in one location. The current list of functions that return an error is:
* PersistentPreRunE
* PreRunE
* RunE
* PostRunE
* PersistentPostRunE
If you would like to silence the default `error` and `usage` output in favor of your own, you can set `SilenceUsage`
and `SilenceErrors` to `false` on the command. A child command respects these flags if they are set on the parent
command.
**Example Usage using RunE:**
```go
package main
import (
"errors"
"log"
"github.com/spf13/cobra"
)
func main() {
var rootCmd = &cobra.Command{
Use: "hugo",
Short: "Hugo is a very fast static site generator",
Long: `A Fast and Flexible Static Site Generator built with
love by spf13 and friends in Go.
Complete documentation is available at http://hugo.spf13.com`,
RunE: func(cmd *cobra.Command, args []string) error {
// Do Stuff Here
return errors.New("some random error")
},
}
if err := rootCmd.Execute(); err != nil {
log.Fatal(err)
}
}
```
## Suggestions when "unknown command" happens
Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example:
```
$ hugo srever
Error: unknown command "srever" for "hugo"
Did you mean this?
server
Run 'hugo --help' for usage.
```
Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion.
If you need to disable suggestions or tweak the string distance in your command, use:
```go
command.DisableSuggestions = true
```
or
```go
command.SuggestionsMinimumDistance = 1
```
You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example:
```
$ kubectl remove
Error: unknown command "remove" for "kubectl"
Did you mean this?
delete
Run 'kubectl help' for usage.
```
## Generating Markdown-formatted documentation for your command
Cobra can generate a Markdown-formatted document based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Markdown Docs](doc/md_docs.md).
## Generating man pages for your command
Cobra can generate a man page based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Man Docs](doc/man_docs.md).
## Generating bash completions for your command
Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md).
## Debugging
Cobra provides a DebugFlags method on a command which, when called, will print
out everything Cobra knows about the flags for each command.
### Example
```go
command.DebugFlags()
```
## Release Notes
* **0.9.0** June 17, 2014
* flags can appears anywhere in the args (provided they are unambiguous)
* --help prints usage screen for app or command
* Prefix matching for commands
* Cleaner looking help and usage output
* Extensive test suite
* **0.8.0** Nov 5, 2013
* Reworked interface to remove commander completely
* Command now primary structure
* No initialization needed
* Usage & Help templates & functions definable at any level
* Updated Readme
* **0.7.0** Sept 24, 2013
* Needs more eyes
* Test suite
* Support for automatic error messages
* Support for help command
* Support for printing to any io.Writer instead of os.Stderr
* Support for persistent flags which cascade down tree
* Ready for integration into Hugo
* **0.1.0** Sept 3, 2013
* Implement first draft
## Extensions
Libraries for extending Cobra:
* [cmdns](https://github.com/gosuri/cmdns): Enables name spacing a command's immediate children. It provides an alternative way to structure subcommands, similar to `heroku apps:create` and `ovrclk clusters:launch`.
## ToDo
* Launch proper documentation site
## Contributing
1. Fork it
2. Create your feature branch (`git checkout -b my-new-feature`)
3. Commit your changes (`git commit -am 'Add some feature'`)
4. Push to the branch (`git push origin my-new-feature`)
5. Create new Pull Request
## Contributors
Names in no particular order:
* [spf13](https://github.com/spf13),
[eparis](https://github.com/eparis),
[bep](https://github.com/bep), and many more!
## License
Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt)
[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/spf13/cobra/trend.png)](https://bitdeli.com/free "Bitdeli Badge")

89
vendor/github.com/spf13/cobra/args.go generated vendored Normal file
View file

@ -0,0 +1,89 @@
package cobra
import (
"fmt"
)
type PositionalArgs func(cmd *Command, args []string) error
// Legacy arg validation has the following behaviour:
// - root commands with no subcommands can take arbitrary arguments
// - root commands with subcommands will do subcommand validity checking
// - subcommands will always accept arbitrary arguments
func legacyArgs(cmd *Command, args []string) error {
// no subcommand, always take args
if !cmd.HasSubCommands() {
return nil
}
// root command with subcommands, do subcommand checking.
if !cmd.HasParent() && len(args) > 0 {
return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0]))
}
return nil
}
// NoArgs returns an error if any args are included.
func NoArgs(cmd *Command, args []string) error {
if len(args) > 0 {
return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath())
}
return nil
}
// OnlyValidArgs returns an error if any args are not in the list of ValidArgs.
func OnlyValidArgs(cmd *Command, args []string) error {
if len(cmd.ValidArgs) > 0 {
for _, v := range args {
if !stringInSlice(v, cmd.ValidArgs) {
return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0]))
}
}
}
return nil
}
// ArbitraryArgs never returns an error.
func ArbitraryArgs(cmd *Command, args []string) error {
return nil
}
// MinimumNArgs returns an error if there is not at least N args.
func MinimumNArgs(n int) PositionalArgs {
return func(cmd *Command, args []string) error {
if len(args) < n {
return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args))
}
return nil
}
}
// MaximumNArgs returns an error if there are more than N args.
func MaximumNArgs(n int) PositionalArgs {
return func(cmd *Command, args []string) error {
if len(args) > n {
return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args))
}
return nil
}
}
// ExactArgs returns an error if there are not exactly n args.
func ExactArgs(n int) PositionalArgs {
return func(cmd *Command, args []string) error {
if len(args) != n {
return fmt.Errorf("accepts %d arg(s), received %d", n, len(args))
}
return nil
}
}
// RangeArgs returns an error if the number of args is not within the expected range.
func RangeArgs(min int, max int) PositionalArgs {
return func(cmd *Command, args []string) error {
if len(args) < min || len(args) > max {
return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args))
}
return nil
}
}

View file

@ -1,6 +1,7 @@
package cobra
import (
"bytes"
"fmt"
"io"
"os"
@ -10,6 +11,7 @@ import (
"github.com/spf13/pflag"
)
// Annotations for Bash completion.
const (
BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions"
BashCompCustom = "cobra_annotation_bash_completion_custom"
@ -17,13 +19,10 @@ const (
BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir"
)
func preamble(out io.Writer, name string) error {
_, err := fmt.Fprintf(out, "# bash completion for %-36s -*- shell-script -*-\n", name)
if err != nil {
return err
}
_, err = fmt.Fprint(out, `
__debug()
func writePreamble(buf *bytes.Buffer, name string) {
buf.WriteString(fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name))
buf.WriteString(fmt.Sprintf(`
__%[1]s_debug()
{
if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then
echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
@ -32,13 +31,13 @@ __debug()
# Homebrew on Macs have version 1.3 of bash-completion which doesn't include
# _init_completion. This is a very minimal version of that function.
__my_init_completion()
__%[1]s_init_completion()
{
COMPREPLY=()
_get_comp_words_by_ref "$@" cur prev words cword
}
__index_of_word()
__%[1]s_index_of_word()
{
local w word=$1
shift
@ -50,7 +49,7 @@ __index_of_word()
index=-1
}
__contains_word()
__%[1]s_contains_word()
{
local w word=$1; shift
for w in "$@"; do
@ -59,9 +58,9 @@ __contains_word()
return 1
}
__handle_reply()
__%[1]s_handle_reply()
{
__debug "${FUNCNAME[0]}"
__%[1]s_debug "${FUNCNAME[0]}"
case $cur in
-*)
if [[ $(type -t compopt) = "builtin" ]]; then
@ -86,14 +85,14 @@ __handle_reply()
local index flag
flag="${cur%%=*}"
__index_of_word "${flag}" "${flags_with_completion[@]}"
if [[ ${index} -ge 0 ]]; then
__%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}"
COMPREPLY=()
if [[ ${index} -ge 0 ]]; then
PREFIX=""
cur="${cur#*=}"
${flags_completion[${index}]}
if [ -n "${ZSH_VERSION}" ]; then
# zfs completion needs --flag= prefix
# zsh completion needs --flag= prefix
eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )"
fi
fi
@ -104,7 +103,7 @@ __handle_reply()
# check if we are handling a flag with special work handling
local index
__index_of_word "${prev}" "${flags_with_completion[@]}"
__%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}"
if [[ ${index} -ge 0 ]]; then
${flags_completion[${index}]}
return
@ -133,25 +132,34 @@ __handle_reply()
declare -F __custom_func >/dev/null && __custom_func
fi
# available in bash-completion >= 2, not always present on macOS
if declare -F __ltrim_colon_completions >/dev/null; then
__ltrim_colon_completions "$cur"
fi
# If there is only 1 completion and it is a flag with an = it will be completed
# but we don't want a space after the =
if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then
compopt -o nospace
fi
}
# The arguments should be in the form "ext1|ext2|extn"
__handle_filename_extension_flag()
__%[1]s_handle_filename_extension_flag()
{
local ext="$1"
_filedir "@(${ext})"
}
__handle_subdirs_in_dir_flag()
__%[1]s_handle_subdirs_in_dir_flag()
{
local dir="$1"
pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1
}
__handle_flag()
__%[1]s_handle_flag()
{
__debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
__%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
# if a command required a flag, and we found it, unset must_have_one_flag()
local flagname=${words[c]}
@ -162,17 +170,19 @@ __handle_flag()
flagname=${flagname%%=*} # strip everything after the =
flagname="${flagname}=" # but put the = back
fi
__debug "${FUNCNAME[0]}: looking for ${flagname}"
if __contains_word "${flagname}" "${must_have_one_flag[@]}"; then
__%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}"
if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then
must_have_one_flag=()
fi
# if you set a flag which only applies to this command, don't show subcommands
if __contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then
if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then
commands=()
fi
# keep flag value with flagname as flaghash
# flaghash variable is an associative array which is only supported in bash > 3.
if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
if [ -n "${flagvalue}" ] ; then
flaghash[${flagname}]=${flagvalue}
elif [ -n "${words[ $((c+1)) ]}" ] ; then
@ -180,9 +190,10 @@ __handle_flag()
else
flaghash[${flagname}]="true" # pad "true" for bool flag
fi
fi
# skip the argument to a two word flag
if __contains_word "${words[c]}" "${two_word_flags[@]}"; then
if __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then
c=$((c+1))
# if we are looking for a flags value, don't show commands
if [[ $c -eq $cword ]]; then
@ -194,13 +205,13 @@ __handle_flag()
}
__handle_noun()
__%[1]s_handle_noun()
{
__debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
__%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
if __contains_word "${words[c]}" "${must_have_one_noun[@]}"; then
if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then
must_have_one_noun=()
elif __contains_word "${words[c]}" "${noun_aliases[@]}"; then
elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then
must_have_one_noun=()
fi
@ -208,61 +219,66 @@ __handle_noun()
c=$((c+1))
}
__handle_command()
__%[1]s_handle_command()
{
__debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
__%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
local next_command
if [[ -n ${last_command} ]]; then
next_command="_${last_command}_${words[c]//:/__}"
else
if [[ $c -eq 0 ]]; then
next_command="_$(basename "${words[c]//:/__}")"
next_command="_%[1]s_root_command"
else
next_command="_${words[c]//:/__}"
fi
fi
c=$((c+1))
__debug "${FUNCNAME[0]}: looking for ${next_command}"
declare -F $next_command >/dev/null && $next_command
__%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}"
declare -F "$next_command" >/dev/null && $next_command
}
__handle_word()
__%[1]s_handle_word()
{
if [[ $c -ge $cword ]]; then
__handle_reply
__%[1]s_handle_reply
return
fi
__debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
__%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
if [[ "${words[c]}" == -* ]]; then
__handle_flag
elif __contains_word "${words[c]}" "${commands[@]}"; then
__handle_command
elif [[ $c -eq 0 ]] && __contains_word "$(basename "${words[c]}")" "${commands[@]}"; then
__handle_command
__%[1]s_handle_flag
elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then
__%[1]s_handle_command
elif [[ $c -eq 0 ]]; then
__%[1]s_handle_command
elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then
# aliashash variable is an associative array which is only supported in bash > 3.
if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
words[c]=${aliashash[${words[c]}]}
__%[1]s_handle_command
else
__handle_noun
__%[1]s_handle_noun
fi
__handle_word
else
__%[1]s_handle_noun
fi
__%[1]s_handle_word
}
`)
return err
`, name))
}
func postscript(w io.Writer, name string) error {
func writePostscript(buf *bytes.Buffer, name string) {
name = strings.Replace(name, ":", "__", -1)
_, err := fmt.Fprintf(w, "__start_%s()\n", name)
if err != nil {
return err
}
_, err = fmt.Fprintf(w, `{
buf.WriteString(fmt.Sprintf("__start_%s()\n", name))
buf.WriteString(fmt.Sprintf(`{
local cur prev words cword
declare -A flaghash 2>/dev/null || :
declare -A aliashash 2>/dev/null || :
if declare -F _init_completion >/dev/null 2>&1; then
_init_completion -s || return
else
__my_init_completion -n "=" || return
__%[1]s_init_completion -n "=" || return
fi
local c=0
@ -271,206 +287,142 @@ func postscript(w io.Writer, name string) error {
local local_nonpersistent_flags=()
local flags_with_completion=()
local flags_completion=()
local commands=("%s")
local commands=("%[1]s")
local must_have_one_flag=()
local must_have_one_noun=()
local last_command
local nouns=()
__handle_word
__%[1]s_handle_word
}
`, name)
if err != nil {
return err
}
_, err = fmt.Fprintf(w, `if [[ $(type -t compopt) = "builtin" ]]; then
`, name))
buf.WriteString(fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then
complete -o default -F __start_%s %s
else
complete -o default -o nospace -F __start_%s %s
fi
`, name, name, name, name)
if err != nil {
return err
}
_, err = fmt.Fprintf(w, "# ex: ts=4 sw=4 et filetype=sh\n")
return err
`, name, name, name, name))
buf.WriteString("# ex: ts=4 sw=4 et filetype=sh\n")
}
func writeCommands(cmd *Command, w io.Writer) error {
if _, err := fmt.Fprintf(w, " commands=()\n"); err != nil {
return err
}
func writeCommands(buf *bytes.Buffer, cmd *Command) {
buf.WriteString(" commands=()\n")
for _, c := range cmd.Commands() {
if !c.IsAvailableCommand() || c == cmd.helpCommand {
continue
}
if _, err := fmt.Fprintf(w, " commands+=(%q)\n", c.Name()); err != nil {
return err
buf.WriteString(fmt.Sprintf(" commands+=(%q)\n", c.Name()))
writeCmdAliases(buf, c)
}
}
_, err := fmt.Fprintf(w, "\n")
return err
buf.WriteString("\n")
}
func writeFlagHandler(name string, annotations map[string][]string, w io.Writer) error {
func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string, cmd *Command) {
for key, value := range annotations {
switch key {
case BashCompFilenameExt:
_, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name)
if err != nil {
return err
}
buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
var ext string
if len(value) > 0 {
ext := "__handle_filename_extension_flag " + strings.Join(value, "|")
_, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext)
ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|")
} else {
ext := "_filedir"
_, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext)
}
if err != nil {
return err
ext = "_filedir"
}
buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext))
case BashCompCustom:
_, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name)
if err != nil {
return err
}
buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
if len(value) > 0 {
handlers := strings.Join(value, "; ")
_, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", handlers)
buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", handlers))
} else {
_, err = fmt.Fprintf(w, " flags_completion+=(:)\n")
}
if err != nil {
return err
buf.WriteString(" flags_completion+=(:)\n")
}
case BashCompSubdirsInDir:
_, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name)
buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
var ext string
if len(value) == 1 {
ext := "__handle_subdirs_in_dir_flag " + value[0]
_, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext)
ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0]
} else {
ext := "_filedir -d"
_, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext)
ext = "_filedir -d"
}
if err != nil {
return err
buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext))
}
}
}
return nil
}
func writeShortFlag(flag *pflag.Flag, w io.Writer) error {
b := (len(flag.NoOptDefVal) > 0)
func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
name := flag.Shorthand
format := " "
if !b {
if len(flag.NoOptDefVal) == 0 {
format += "two_word_"
}
format += "flags+=(\"-%s\")\n"
if _, err := fmt.Fprintf(w, format, name); err != nil {
return err
}
return writeFlagHandler("-"+name, flag.Annotations, w)
buf.WriteString(fmt.Sprintf(format, name))
writeFlagHandler(buf, "-"+name, flag.Annotations, cmd)
}
func writeFlag(flag *pflag.Flag, w io.Writer) error {
b := (len(flag.NoOptDefVal) > 0)
func writeFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
name := flag.Name
format := " flags+=(\"--%s"
if !b {
if len(flag.NoOptDefVal) == 0 {
format += "="
}
format += "\")\n"
if _, err := fmt.Fprintf(w, format, name); err != nil {
return err
}
return writeFlagHandler("--"+name, flag.Annotations, w)
buf.WriteString(fmt.Sprintf(format, name))
writeFlagHandler(buf, "--"+name, flag.Annotations, cmd)
}
func writeLocalNonPersistentFlag(flag *pflag.Flag, w io.Writer) error {
b := (len(flag.NoOptDefVal) > 0)
func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) {
name := flag.Name
format := " local_nonpersistent_flags+=(\"--%s"
if !b {
if len(flag.NoOptDefVal) == 0 {
format += "="
}
format += "\")\n"
_, err := fmt.Fprintf(w, format, name)
return err
buf.WriteString(fmt.Sprintf(format, name))
}
func writeFlags(cmd *Command, w io.Writer) error {
_, err := fmt.Fprintf(w, ` flags=()
func writeFlags(buf *bytes.Buffer, cmd *Command) {
buf.WriteString(` flags=()
two_word_flags=()
local_nonpersistent_flags=()
flags_with_completion=()
flags_completion=()
`)
if err != nil {
return err
}
localNonPersistentFlags := cmd.LocalNonPersistentFlags()
var visitErr error
cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
if nonCompletableFlag(flag) {
return
}
if err := writeFlag(flag, w); err != nil {
visitErr = err
return
}
writeFlag(buf, flag, cmd)
if len(flag.Shorthand) > 0 {
if err := writeShortFlag(flag, w); err != nil {
visitErr = err
return
}
writeShortFlag(buf, flag, cmd)
}
if localNonPersistentFlags.Lookup(flag.Name) != nil {
if err := writeLocalNonPersistentFlag(flag, w); err != nil {
visitErr = err
return
}
writeLocalNonPersistentFlag(buf, flag)
}
})
if visitErr != nil {
return visitErr
}
cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
if nonCompletableFlag(flag) {
return
}
if err := writeFlag(flag, w); err != nil {
visitErr = err
return
}
writeFlag(buf, flag, cmd)
if len(flag.Shorthand) > 0 {
if err := writeShortFlag(flag, w); err != nil {
visitErr = err
return
}
writeShortFlag(buf, flag, cmd)
}
})
if visitErr != nil {
return visitErr
}
_, err = fmt.Fprintf(w, "\n")
return err
buf.WriteString("\n")
}
func writeRequiredFlag(cmd *Command, w io.Writer) error {
if _, err := fmt.Fprintf(w, " must_have_one_flag=()\n"); err != nil {
return err
}
func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) {
buf.WriteString(" must_have_one_flag=()\n")
flags := cmd.NonInheritedFlags()
var visitErr error
flags.VisitAll(func(flag *pflag.Flag) {
if nonCompletableFlag(flag) {
return
@ -479,153 +431,144 @@ func writeRequiredFlag(cmd *Command, w io.Writer) error {
switch key {
case BashCompOneRequiredFlag:
format := " must_have_one_flag+=(\"--%s"
b := (flag.Value.Type() == "bool")
if !b {
if flag.Value.Type() != "bool" {
format += "="
}
format += "\")\n"
if _, err := fmt.Fprintf(w, format, flag.Name); err != nil {
visitErr = err
return
}
buf.WriteString(fmt.Sprintf(format, flag.Name))
if len(flag.Shorthand) > 0 {
if _, err := fmt.Fprintf(w, " must_have_one_flag+=(\"-%s\")\n", flag.Shorthand); err != nil {
visitErr = err
return
}
buf.WriteString(fmt.Sprintf(" must_have_one_flag+=(\"-%s\")\n", flag.Shorthand))
}
}
}
})
return visitErr
}
func writeRequiredNouns(cmd *Command, w io.Writer) error {
if _, err := fmt.Fprintf(w, " must_have_one_noun=()\n"); err != nil {
return err
}
func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) {
buf.WriteString(" must_have_one_noun=()\n")
sort.Sort(sort.StringSlice(cmd.ValidArgs))
for _, value := range cmd.ValidArgs {
if _, err := fmt.Fprintf(w, " must_have_one_noun+=(%q)\n", value); err != nil {
return err
buf.WriteString(fmt.Sprintf(" must_have_one_noun+=(%q)\n", value))
}
}
return nil
}
func writeArgAliases(cmd *Command, w io.Writer) error {
if _, err := fmt.Fprintf(w, " noun_aliases=()\n"); err != nil {
return err
func writeCmdAliases(buf *bytes.Buffer, cmd *Command) {
if len(cmd.Aliases) == 0 {
return
}
sort.Sort(sort.StringSlice(cmd.Aliases))
buf.WriteString(fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n"))
for _, value := range cmd.Aliases {
buf.WriteString(fmt.Sprintf(" command_aliases+=(%q)\n", value))
buf.WriteString(fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name()))
}
buf.WriteString(` fi`)
buf.WriteString("\n")
}
func writeArgAliases(buf *bytes.Buffer, cmd *Command) {
buf.WriteString(" noun_aliases=()\n")
sort.Sort(sort.StringSlice(cmd.ArgAliases))
for _, value := range cmd.ArgAliases {
if _, err := fmt.Fprintf(w, " noun_aliases+=(%q)\n", value); err != nil {
return err
buf.WriteString(fmt.Sprintf(" noun_aliases+=(%q)\n", value))
}
}
return nil
}
func gen(cmd *Command, w io.Writer) error {
func gen(buf *bytes.Buffer, cmd *Command) {
for _, c := range cmd.Commands() {
if !c.IsAvailableCommand() || c == cmd.helpCommand {
continue
}
if err := gen(c, w); err != nil {
return err
}
gen(buf, c)
}
commandName := cmd.CommandPath()
commandName = strings.Replace(commandName, " ", "_", -1)
commandName = strings.Replace(commandName, ":", "__", -1)
if _, err := fmt.Fprintf(w, "_%s()\n{\n", commandName); err != nil {
return err
if cmd.Root() == cmd {
buf.WriteString(fmt.Sprintf("_%s_root_command()\n{\n", commandName))
} else {
buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName))
}
if _, err := fmt.Fprintf(w, " last_command=%q\n", commandName); err != nil {
return err
}
if err := writeCommands(cmd, w); err != nil {
return err
}
if err := writeFlags(cmd, w); err != nil {
return err
}
if err := writeRequiredFlag(cmd, w); err != nil {
return err
}
if err := writeRequiredNouns(cmd, w); err != nil {
return err
}
if err := writeArgAliases(cmd, w); err != nil {
return err
}
if _, err := fmt.Fprintf(w, "}\n\n"); err != nil {
return err
}
return nil
buf.WriteString(fmt.Sprintf(" last_command=%q\n", commandName))
buf.WriteString("\n")
buf.WriteString(" command_aliases=()\n")
buf.WriteString("\n")
writeCommands(buf, cmd)
writeFlags(buf, cmd)
writeRequiredFlag(buf, cmd)
writeRequiredNouns(buf, cmd)
writeArgAliases(buf, cmd)
buf.WriteString("}\n\n")
}
func (cmd *Command) GenBashCompletion(w io.Writer) error {
if err := preamble(w, cmd.Name()); err != nil {
// GenBashCompletion generates bash completion file and writes to the passed writer.
func (c *Command) GenBashCompletion(w io.Writer) error {
buf := new(bytes.Buffer)
writePreamble(buf, c.Name())
if len(c.BashCompletionFunction) > 0 {
buf.WriteString(c.BashCompletionFunction + "\n")
}
gen(buf, c)
writePostscript(buf, c.Name())
_, err := buf.WriteTo(w)
return err
}
if len(cmd.BashCompletionFunction) > 0 {
if _, err := fmt.Fprintf(w, "%s\n", cmd.BashCompletionFunction); err != nil {
return err
}
}
if err := gen(cmd, w); err != nil {
return err
}
return postscript(w, cmd.Name())
}
func nonCompletableFlag(flag *pflag.Flag) bool {
return flag.Hidden || len(flag.Deprecated) > 0
}
func (cmd *Command) GenBashCompletionFile(filename string) error {
// GenBashCompletionFile generates bash completion file.
func (c *Command) GenBashCompletionFile(filename string) error {
outFile, err := os.Create(filename)
if err != nil {
return err
}
defer outFile.Close()
return cmd.GenBashCompletion(outFile)
return c.GenBashCompletion(outFile)
}
// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag, if it exists.
func (cmd *Command) MarkFlagRequired(name string) error {
return MarkFlagRequired(cmd.Flags(), name)
// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists,
// and causes your command to report an error if invoked without the flag.
func (c *Command) MarkFlagRequired(name string) error {
return MarkFlagRequired(c.Flags(), name)
}
// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag, if it exists.
func (cmd *Command) MarkPersistentFlagRequired(name string) error {
return MarkFlagRequired(cmd.PersistentFlags(), name)
// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag if it exists,
// and causes your command to report an error if invoked without the flag.
func (c *Command) MarkPersistentFlagRequired(name string) error {
return MarkFlagRequired(c.PersistentFlags(), name)
}
// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag in the flag set, if it exists.
// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists,
// and causes your command to report an error if invoked without the flag.
func MarkFlagRequired(flags *pflag.FlagSet, name string) error {
return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"})
}
// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists.
// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
func (cmd *Command) MarkFlagFilename(name string, extensions ...string) error {
return MarkFlagFilename(cmd.Flags(), name, extensions...)
func (c *Command) MarkFlagFilename(name string, extensions ...string) error {
return MarkFlagFilename(c.Flags(), name, extensions...)
}
// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
// Generated bash autocompletion will call the bash function f for the flag.
func (cmd *Command) MarkFlagCustom(name string, f string) error {
return MarkFlagCustom(cmd.Flags(), name, f)
func (c *Command) MarkFlagCustom(name string, f string) error {
return MarkFlagCustom(c.Flags(), name, f)
}
// MarkPersistentFlagFilename adds the BashCompFilenameExt annotation to the named persistent flag, if it exists.
// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
func (cmd *Command) MarkPersistentFlagFilename(name string, extensions ...string) error {
return MarkFlagFilename(cmd.PersistentFlags(), name, extensions...)
func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error {
return MarkFlagFilename(c.PersistentFlags(), name, extensions...)
}
// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag in the flag set, if it exists.

View file

@ -1,206 +0,0 @@
# Generating Bash Completions For Your Own cobra.Command
Generating bash completions from a cobra command is incredibly easy. An actual program which does so for the kubernetes kubectl binary is as follows:
```go
package main
import (
"io/ioutil"
"os"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd"
)
func main() {
kubectl := cmd.NewFactory(nil).NewKubectlCommand(os.Stdin, ioutil.Discard, ioutil.Discard)
kubectl.GenBashCompletionFile("out.sh")
}
```
That will get you completions of subcommands and flags. If you make additional annotations to your code, you can get even more intelligent and flexible behavior.
## Creating your own custom functions
Some more actual code that works in kubernetes:
```bash
const (
bash_completion_func = `__kubectl_parse_get()
{
local kubectl_output out
if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then
out=($(echo "${kubectl_output}" | awk '{print $1}'))
COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) )
fi
}
__kubectl_get_resource()
{
if [[ ${#nouns[@]} -eq 0 ]]; then
return 1
fi
__kubectl_parse_get ${nouns[${#nouns[@]} -1]}
if [[ $? -eq 0 ]]; then
return 0
fi
}
__custom_func() {
case ${last_command} in
kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop)
__kubectl_get_resource
return
;;
*)
;;
esac
}
`)
```
And then I set that in my command definition:
```go
cmds := &cobra.Command{
Use: "kubectl",
Short: "kubectl controls the Kubernetes cluster manager",
Long: `kubectl controls the Kubernetes cluster manager.
Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`,
Run: runHelp,
BashCompletionFunction: bash_completion_func,
}
```
The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__custom_func()` to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods!
## Have the completions code complete your 'nouns'
In the above example "pod" was assumed to already be typed. But if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like:
```go
validArgs []string = { "pod", "node", "service", "replicationcontroller" }
cmd := &cobra.Command{
Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)",
Short: "Display one or many resources",
Long: get_long,
Example: get_example,
Run: func(cmd *cobra.Command, args []string) {
err := RunGet(f, out, cmd, args)
util.CheckErr(err)
},
ValidArgs: validArgs,
}
```
Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like
```bash
# kubectl get [tab][tab]
node pod replicationcontroller service
```
## Plural form and shortcuts for nouns
If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`:
```go`
argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" }
cmd := &cobra.Command{
...
ValidArgs: validArgs,
ArgAliases: argAliases
}
```
The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by
the completion algorithm if entered manually, e.g. in:
```bash
# kubectl get rc [tab][tab]
backend frontend database
```
Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns
in this example again instead of the replication controllers.
## Mark flags as required
Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab]. Marking a flag as 'Required' is incredibly easy.
```go
cmd.MarkFlagRequired("pod")
cmd.MarkFlagRequired("container")
```
and you'll get something like
```bash
# kubectl exec [tab][tab][tab]
-c --container= -p --pod=
```
# Specify valid filename extensions for flags that take a filename
In this example we use --filename= and expect to get a json or yaml file as the argument. To make this easier we annotate the --filename flag with valid filename extensions.
```go
annotations := []string{"json", "yaml", "yml"}
annotation := make(map[string][]string)
annotation[cobra.BashCompFilenameExt] = annotations
flag := &pflag.Flag{
Name: "filename",
Shorthand: "f",
Usage: usage,
Value: value,
DefValue: value.String(),
Annotations: annotation,
}
cmd.Flags().AddFlag(flag)
```
Now when you run a command with this filename flag you'll get something like
```bash
# kubectl create -f
test/ example/ rpmbuild/
hello.yml test.json
```
So while there are many other files in the CWD it only shows me subdirs and those with valid extensions.
# Specifiy custom flag completion
Similar to the filename completion and filtering using cobra.BashCompFilenameExt, you can specifiy
a custom flag completion function with cobra.BashCompCustom:
```go
annotation := make(map[string][]string)
annotation[cobra.BashCompFilenameExt] = []string{"__kubectl_get_namespaces"}
flag := &pflag.Flag{
Name: "namespace",
Usage: usage,
Annotations: annotation,
}
cmd.Flags().AddFlag(flag)
```
In addition add the `__handle_namespace_flag` implementation in the `BashCompletionFunction`
value, e.g.:
```bash
__kubectl_get_namespaces()
{
local template
template="{{ range .items }}{{ .metadata.name }} {{ end }}"
local kubectl_out
if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then
COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) )
fi
}
```

View file

@ -29,6 +29,7 @@ import (
var templateFuncs = template.FuncMap{
"trim": strings.TrimSpace,
"trimRightSpace": trimRightSpace,
"trimTrailingWhitespaces": trimRightSpace,
"appendIfNotPresent": appendIfNotPresent,
"rpad": rpad,
"gt": Gt,
@ -37,7 +38,8 @@ var templateFuncs = template.FuncMap{
var initializers []func()
// Automatic prefix matching can be a dangerous thing to automatically enable in CLI tools.
// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing
// to automatically enable in CLI tools.
// Set this to true to enable it.
var EnablePrefixMatching = false
@ -45,13 +47,22 @@ var EnablePrefixMatching = false
// To disable sorting, set it to false.
var EnableCommandSorting = true
// MousetrapHelpText enables an information splash screen on Windows
// if the CLI is started from explorer.exe.
// To disable the mousetrap, just set this variable to blank string ("").
// Works only on Microsoft Windows.
var MousetrapHelpText string = `This is a command line tool.
You need to open cmd.exe and run it from there.
`
// AddTemplateFunc adds a template function that's available to Usage and Help
// template generation.
func AddTemplateFunc(name string, tmplFunc interface{}) {
templateFuncs[name] = tmplFunc
}
// AddTemplateFuncs adds multiple template functions availalble to Usage and
// AddTemplateFuncs adds multiple template functions that are available to Usage and
// Help template generation.
func AddTemplateFuncs(tmplFuncs template.FuncMap) {
for k, v := range tmplFuncs {
@ -59,11 +70,14 @@ func AddTemplateFuncs(tmplFuncs template.FuncMap) {
}
}
// OnInitialize takes a series of func() arguments and appends them to a slice of func().
// OnInitialize sets the passed functions to be run when each command's
// Execute method is called.
func OnInitialize(y ...func()) {
initializers = append(initializers, y...)
}
// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans,
// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as
// ints and then compared.
@ -94,6 +108,8 @@ func Gt(a interface{}, b interface{}) bool {
return left > right
}
// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic.
func Eq(a interface{}, b interface{}) bool {
av := reflect.ValueOf(a)
@ -114,6 +130,8 @@ func trimRightSpace(s string) string {
return strings.TrimRightFunc(s, unicode.IsSpace)
}
// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s.
func appendIfNotPresent(s, stringToAppend string) string {
if strings.Contains(s, stringToAppend) {
@ -171,3 +189,12 @@ func ld(s, t string, ignoreCase bool) int {
}
return d[len(s)][len(t)]
}
func stringInSlice(a string, list []string) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}

View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

File diff suppressed because it is too large Load diff

View file

@ -11,14 +11,8 @@ import (
var preExecHookFn = preExecHook
// enables an information splash screen on Windows if the CLI is started from explorer.exe.
var MousetrapHelpText string = `This is a command line tool
You need to open cmd.exe and run it from there.
`
func preExecHook(c *Command) {
if mousetrap.StartedByExplorer() {
if MousetrapHelpText != "" && mousetrap.StartedByExplorer() {
c.Print(MousetrapHelpText)
time.Sleep(5 * time.Second)
os.Exit(1)

126
vendor/github.com/spf13/cobra/zsh_completions.go generated vendored Normal file
View file

@ -0,0 +1,126 @@
package cobra
import (
"bytes"
"fmt"
"io"
"os"
"strings"
)
// GenZshCompletionFile generates zsh completion file.
func (c *Command) GenZshCompletionFile(filename string) error {
outFile, err := os.Create(filename)
if err != nil {
return err
}
defer outFile.Close()
return c.GenZshCompletion(outFile)
}
// GenZshCompletion generates a zsh completion file and writes to the passed writer.
func (c *Command) GenZshCompletion(w io.Writer) error {
buf := new(bytes.Buffer)
writeHeader(buf, c)
maxDepth := maxDepth(c)
writeLevelMapping(buf, maxDepth)
writeLevelCases(buf, maxDepth, c)
_, err := buf.WriteTo(w)
return err
}
func writeHeader(w io.Writer, cmd *Command) {
fmt.Fprintf(w, "#compdef %s\n\n", cmd.Name())
}
func maxDepth(c *Command) int {
if len(c.Commands()) == 0 {
return 0
}
maxDepthSub := 0
for _, s := range c.Commands() {
subDepth := maxDepth(s)
if subDepth > maxDepthSub {
maxDepthSub = subDepth
}
}
return 1 + maxDepthSub
}
func writeLevelMapping(w io.Writer, numLevels int) {
fmt.Fprintln(w, `_arguments \`)
for i := 1; i <= numLevels; i++ {
fmt.Fprintf(w, ` '%d: :->level%d' \`, i, i)
fmt.Fprintln(w)
}
fmt.Fprintf(w, ` '%d: :%s'`, numLevels+1, "_files")
fmt.Fprintln(w)
}
func writeLevelCases(w io.Writer, maxDepth int, root *Command) {
fmt.Fprintln(w, "case $state in")
defer fmt.Fprintln(w, "esac")
for i := 1; i <= maxDepth; i++ {
fmt.Fprintf(w, " level%d)\n", i)
writeLevel(w, root, i)
fmt.Fprintln(w, " ;;")
}
fmt.Fprintln(w, " *)")
fmt.Fprintln(w, " _arguments '*: :_files'")
fmt.Fprintln(w, " ;;")
}
func writeLevel(w io.Writer, root *Command, i int) {
fmt.Fprintf(w, " case $words[%d] in\n", i)
defer fmt.Fprintln(w, " esac")
commands := filterByLevel(root, i)
byParent := groupByParent(commands)
for p, c := range byParent {
names := names(c)
fmt.Fprintf(w, " %s)\n", p)
fmt.Fprintf(w, " _arguments '%d: :(%s)'\n", i, strings.Join(names, " "))
fmt.Fprintln(w, " ;;")
}
fmt.Fprintln(w, " *)")
fmt.Fprintln(w, " _arguments '*: :_files'")
fmt.Fprintln(w, " ;;")
}
func filterByLevel(c *Command, l int) []*Command {
cs := make([]*Command, 0)
if l == 0 {
cs = append(cs, c)
return cs
}
for _, s := range c.Commands() {
cs = append(cs, filterByLevel(s, l-1)...)
}
return cs
}
func groupByParent(commands []*Command) map[string][]*Command {
m := make(map[string][]*Command)
for _, c := range commands {
parent := c.Parent()
if parent == nil {
continue
}
m[parent.Name()] = append(m[parent.Name()], c)
}
return m
}
func names(commands []*Command) []string {
ns := make([]string, len(commands))
for i, c := range commands {
ns[i] = c.Name()
}
return ns
}

View file

@ -1,21 +0,0 @@
sudo: false
language: go
go:
- 1.5.4
- 1.6.3
- 1.7
- tip
matrix:
allow_failures:
- go: tip
install:
- go get github.com/golang/lint/golint
- export PATH=$GOPATH/bin:$PATH
- go install ./...
script:
- verify/all.sh -v
- go test ./...

View file

@ -1,275 +0,0 @@
[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag)
## Description
pflag is a drop-in replacement for Go's flag package, implementing
POSIX/GNU-style --flags.
pflag is compatible with the [GNU extensions to the POSIX recommendations
for command-line options][1]. For a more precise description, see the
"Command-line flag syntax" section below.
[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
pflag is available under the same style of BSD license as the Go language,
which can be found in the LICENSE file.
## Installation
pflag is available using the standard `go get` command.
Install by running:
go get github.com/spf13/pflag
Run tests by running:
go test github.com/spf13/pflag
## Usage
pflag is a drop-in replacement of Go's native flag package. If you import
pflag under the name "flag" then all code should continue to function
with no changes.
``` go
import flag "github.com/spf13/pflag"
```
There is one exception to this: if you directly instantiate the Flag struct
there is one more field "Shorthand" that you will need to set.
Most code never instantiates this struct directly, and instead uses
functions such as String(), BoolVar(), and Var(), and is therefore
unaffected.
Define flags using flag.String(), Bool(), Int(), etc.
This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
``` go
var ip *int = flag.Int("flagname", 1234, "help message for flagname")
```
If you like, you can bind the flag to a variable using the Var() functions.
``` go
var flagvar int
func init() {
flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
}
```
Or you can create custom flags that satisfy the Value interface (with
pointer receivers) and couple them to flag parsing by
``` go
flag.Var(&flagVal, "name", "help message for flagname")
```
For such flags, the default value is just the initial value of the variable.
After all flags are defined, call
``` go
flag.Parse()
```
to parse the command line into the defined flags.
Flags may then be used directly. If you're using the flags themselves,
they are all pointers; if you bind to variables, they're values.
``` go
fmt.Println("ip has value ", *ip)
fmt.Println("flagvar has value ", flagvar)
```
There are helpers function to get values later if you have the FlagSet but
it was difficult to keep up with all of the flag pointers in your code.
If you have a pflag.FlagSet with a flag called 'flagname' of type int you
can use GetInt() to get the int value. But notice that 'flagname' must exist
and it must be an int. GetString("flagname") will fail.
``` go
i, err := flagset.GetInt("flagname")
```
After parsing, the arguments after the flag are available as the
slice flag.Args() or individually as flag.Arg(i).
The arguments are indexed from 0 through flag.NArg()-1.
The pflag package also defines some new functions that are not in flag,
that give one-letter shorthands for flags. You can use these by appending
'P' to the name of any function that defines a flag.
``` go
var ip = flag.IntP("flagname", "f", 1234, "help message")
var flagvar bool
func init() {
flag.BoolVarP("boolname", "b", true, "help message")
}
flag.VarP(&flagVar, "varname", "v", 1234, "help message")
```
Shorthand letters can be used with single dashes on the command line.
Boolean shorthand flags can be combined with other shorthand flags.
The default set of command-line flags is controlled by
top-level functions. The FlagSet type allows one to define
independent sets of flags, such as to implement subcommands
in a command-line interface. The methods of FlagSet are
analogous to the top-level functions for the command-line
flag set.
## Setting no option default values for flags
After you create a flag it is possible to set the pflag.NoOptDefVal for
the given flag. Doing this changes the meaning of the flag slightly. If
a flag has a NoOptDefVal and the flag is set on the command line without
an option the flag will be set to the NoOptDefVal. For example given:
``` go
var ip = flag.IntP("flagname", "f", 1234, "help message")
flag.Lookup("flagname").NoOptDefVal = "4321"
```
Would result in something like
| Parsed Arguments | Resulting Value |
| ------------- | ------------- |
| --flagname=1357 | ip=1357 |
| --flagname | ip=4321 |
| [nothing] | ip=1234 |
## Command line flag syntax
```
--flag // boolean flags, or flags with no option default values
--flag x // only on flags without a default value
--flag=x
```
Unlike the flag package, a single dash before an option means something
different than a double dash. Single dashes signify a series of shorthand
letters for flags. All but the last shorthand letter must be boolean flags
or a flag with a default value
```
// boolean or flags where the 'no option default value' is set
-f
-f=true
-abc
but
-b true is INVALID
// non-boolean and flags without a 'no option default value'
-n 1234
-n=1234
-n1234
// mixed
-abcs "hello"
-absd="hello"
-abcs1234
```
Flag parsing stops after the terminator "--". Unlike the flag package,
flags can be interspersed with arguments anywhere on the command line
before this terminator.
Integer flags accept 1234, 0664, 0x1234 and may be negative.
Boolean flags (in their long form) accept 1, 0, t, f, true, false,
TRUE, FALSE, True, False.
Duration flags accept any input valid for time.ParseDuration.
## Mutating or "Normalizing" Flag names
It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow.
**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag
``` go
func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
from := []string{"-", "_"}
to := "."
for _, sep := range from {
name = strings.Replace(name, sep, to, -1)
}
return pflag.NormalizedName(name)
}
myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc)
```
**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name
``` go
func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
switch name {
case "old-flag-name":
name = "new-flag-name"
break
}
return pflag.NormalizedName(name)
}
myFlagSet.SetNormalizeFunc(aliasNormalizeFunc)
```
## Deprecating a flag or its shorthand
It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used.
**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead.
```go
// deprecate a flag by specifying its name and a usage message
flags.MarkDeprecated("badflag", "please use --good-flag instead")
```
This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used.
**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n".
```go
// deprecate a flag shorthand by specifying its flag name and a usage message
flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only")
```
This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used.
Note that usage message is essential here, and it should not be empty.
## Hidden flags
It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text.
**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available.
```go
// hide a flag by specifying its name
flags.MarkHidden("secretFlag")
```
## Supporting Go flags when using pflag
In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary
to support flags defined by third-party dependencies (e.g. `golang/glog`).
**Example**: You want to add the Go flags to the `CommandLine` flagset
```go
import (
goflag "flag"
flag "github.com/spf13/pflag"
)
var ip *int = flag.Int("flagname", 1234, "help message for flagname")
func main() {
flag.CommandLine.AddGoFlagSet(goflag.CommandLine)
flag.Parse()
}
```
## More info
You can see the full reference documentation of the pflag package
[at godoc.org][3], or through go's standard documentation system by
running `godoc -http=:6060` and browsing to
[http://localhost:6060/pkg/github.com/ogier/pflag][2] after
installation.
[2]: http://localhost:6060/pkg/github.com/ogier/pflag
[3]: http://godoc.org/github.com/ogier/pflag

147
vendor/github.com/spf13/pflag/bool_slice.go generated vendored Normal file
View file

@ -0,0 +1,147 @@
package pflag
import (
"io"
"strconv"
"strings"
)
// -- boolSlice Value
type boolSliceValue struct {
value *[]bool
changed bool
}
func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue {
bsv := new(boolSliceValue)
bsv.value = p
*bsv.value = val
return bsv
}
// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag.
// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended.
func (s *boolSliceValue) Set(val string) error {
// remove all quote characters
rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
// read flag arguments with CSV parser
boolStrSlice, err := readAsCSV(rmQuote.Replace(val))
if err != nil && err != io.EOF {
return err
}
// parse boolean values into slice
out := make([]bool, 0, len(boolStrSlice))
for _, boolStr := range boolStrSlice {
b, err := strconv.ParseBool(strings.TrimSpace(boolStr))
if err != nil {
return err
}
out = append(out, b)
}
if !s.changed {
*s.value = out
} else {
*s.value = append(*s.value, out...)
}
s.changed = true
return nil
}
// Type returns a string that uniquely represents this flag's type.
func (s *boolSliceValue) Type() string {
return "boolSlice"
}
// String defines a "native" format for this boolean slice flag value.
func (s *boolSliceValue) String() string {
boolStrSlice := make([]string, len(*s.value))
for i, b := range *s.value {
boolStrSlice[i] = strconv.FormatBool(b)
}
out, _ := writeAsCSV(boolStrSlice)
return "[" + out + "]"
}
func boolSliceConv(val string) (interface{}, error) {
val = strings.Trim(val, "[]")
// Empty string would cause a slice with one (empty) entry
if len(val) == 0 {
return []bool{}, nil
}
ss := strings.Split(val, ",")
out := make([]bool, len(ss))
for i, t := range ss {
var err error
out[i], err = strconv.ParseBool(t)
if err != nil {
return nil, err
}
}
return out, nil
}
// GetBoolSlice returns the []bool value of a flag with the given name.
func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) {
val, err := f.getFlagType(name, "boolSlice", boolSliceConv)
if err != nil {
return []bool{}, err
}
return val.([]bool), nil
}
// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string.
// The argument p points to a []bool variable in which to store the value of the flag.
func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
f.VarP(newBoolSliceValue(value, p), name, "", usage)
}
// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
f.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
}
// BoolSliceVar defines a []bool flag with specified name, default value, and usage string.
// The argument p points to a []bool variable in which to store the value of the flag.
func BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage)
}
// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
}
// BoolSlice defines a []bool flag with specified name, default value, and usage string.
// The return value is the address of a []bool variable that stores the value of the flag.
func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool {
p := []bool{}
f.BoolSliceVarP(&p, name, "", value, usage)
return &p
}
// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
p := []bool{}
f.BoolSliceVarP(&p, name, shorthand, value, usage)
return &p
}
// BoolSlice defines a []bool flag with specified name, default value, and usage string.
// The return value is the address of a []bool variable that stores the value of the flag.
func BoolSlice(name string, value []bool, usage string) *[]bool {
return CommandLine.BoolSliceP(name, "", value, usage)
}
// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
return CommandLine.BoolSliceP(name, shorthand, value, usage)
}

209
vendor/github.com/spf13/pflag/bytes.go generated vendored Normal file
View file

@ -0,0 +1,209 @@
package pflag
import (
"encoding/base64"
"encoding/hex"
"fmt"
"strings"
)
// BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded
type bytesHexValue []byte
// String implements pflag.Value.String.
func (bytesHex bytesHexValue) String() string {
return fmt.Sprintf("%X", []byte(bytesHex))
}
// Set implements pflag.Value.Set.
func (bytesHex *bytesHexValue) Set(value string) error {
bin, err := hex.DecodeString(strings.TrimSpace(value))
if err != nil {
return err
}
*bytesHex = bin
return nil
}
// Type implements pflag.Value.Type.
func (*bytesHexValue) Type() string {
return "bytesHex"
}
func newBytesHexValue(val []byte, p *[]byte) *bytesHexValue {
*p = val
return (*bytesHexValue)(p)
}
func bytesHexConv(sval string) (interface{}, error) {
bin, err := hex.DecodeString(sval)
if err == nil {
return bin, nil
}
return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
}
// GetBytesHex return the []byte value of a flag with the given name
func (f *FlagSet) GetBytesHex(name string) ([]byte, error) {
val, err := f.getFlagType(name, "bytesHex", bytesHexConv)
if err != nil {
return []byte{}, err
}
return val.([]byte), nil
}
// BytesHexVar defines an []byte flag with specified name, default value, and usage string.
// The argument p points to an []byte variable in which to store the value of the flag.
func (f *FlagSet) BytesHexVar(p *[]byte, name string, value []byte, usage string) {
f.VarP(newBytesHexValue(value, p), name, "", usage)
}
// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) {
f.VarP(newBytesHexValue(value, p), name, shorthand, usage)
}
// BytesHexVar defines an []byte flag with specified name, default value, and usage string.
// The argument p points to an []byte variable in which to store the value of the flag.
func BytesHexVar(p *[]byte, name string, value []byte, usage string) {
CommandLine.VarP(newBytesHexValue(value, p), name, "", usage)
}
// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash.
func BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) {
CommandLine.VarP(newBytesHexValue(value, p), name, shorthand, usage)
}
// BytesHex defines an []byte flag with specified name, default value, and usage string.
// The return value is the address of an []byte variable that stores the value of the flag.
func (f *FlagSet) BytesHex(name string, value []byte, usage string) *[]byte {
p := new([]byte)
f.BytesHexVarP(p, name, "", value, usage)
return p
}
// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
p := new([]byte)
f.BytesHexVarP(p, name, shorthand, value, usage)
return p
}
// BytesHex defines an []byte flag with specified name, default value, and usage string.
// The return value is the address of an []byte variable that stores the value of the flag.
func BytesHex(name string, value []byte, usage string) *[]byte {
return CommandLine.BytesHexP(name, "", value, usage)
}
// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash.
func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
return CommandLine.BytesHexP(name, shorthand, value, usage)
}
// BytesBase64 adapts []byte for use as a flag. Value of flag is Base64 encoded
type bytesBase64Value []byte
// String implements pflag.Value.String.
func (bytesBase64 bytesBase64Value) String() string {
return base64.StdEncoding.EncodeToString([]byte(bytesBase64))
}
// Set implements pflag.Value.Set.
func (bytesBase64 *bytesBase64Value) Set(value string) error {
bin, err := base64.StdEncoding.DecodeString(strings.TrimSpace(value))
if err != nil {
return err
}
*bytesBase64 = bin
return nil
}
// Type implements pflag.Value.Type.
func (*bytesBase64Value) Type() string {
return "bytesBase64"
}
func newBytesBase64Value(val []byte, p *[]byte) *bytesBase64Value {
*p = val
return (*bytesBase64Value)(p)
}
func bytesBase64ValueConv(sval string) (interface{}, error) {
bin, err := base64.StdEncoding.DecodeString(sval)
if err == nil {
return bin, nil
}
return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
}
// GetBytesBase64 return the []byte value of a flag with the given name
func (f *FlagSet) GetBytesBase64(name string) ([]byte, error) {
val, err := f.getFlagType(name, "bytesBase64", bytesBase64ValueConv)
if err != nil {
return []byte{}, err
}
return val.([]byte), nil
}
// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
// The argument p points to an []byte variable in which to store the value of the flag.
func (f *FlagSet) BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
f.VarP(newBytesBase64Value(value, p), name, "", usage)
}
// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
f.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
}
// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
// The argument p points to an []byte variable in which to store the value of the flag.
func BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
CommandLine.VarP(newBytesBase64Value(value, p), name, "", usage)
}
// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
func BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
CommandLine.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
}
// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
// The return value is the address of an []byte variable that stores the value of the flag.
func (f *FlagSet) BytesBase64(name string, value []byte, usage string) *[]byte {
p := new([]byte)
f.BytesBase64VarP(p, name, "", value, usage)
return p
}
// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
p := new([]byte)
f.BytesBase64VarP(p, name, shorthand, value, usage)
return p
}
// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
// The return value is the address of an []byte variable that stores the value of the flag.
func BytesBase64(name string, value []byte, usage string) *[]byte {
return CommandLine.BytesBase64P(name, "", value, usage)
}
// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
func BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
return CommandLine.BytesBase64P(name, shorthand, value, usage)
}

View file

@ -11,13 +11,13 @@ func newCountValue(val int, p *int) *countValue {
}
func (i *countValue) Set(s string) error {
v, err := strconv.ParseInt(s, 0, 64)
// -1 means that no specific value was passed, so increment
if v == -1 {
// "+1" means that no specific value was passed, so increment
if s == "+1" {
*i = countValue(*i + 1)
} else {
*i = countValue(v)
return nil
}
v, err := strconv.ParseInt(s, 0, 0)
*i = countValue(v)
return err
}
@ -54,7 +54,7 @@ func (f *FlagSet) CountVar(p *int, name string, usage string) {
// CountVarP is like CountVar only take a shorthand for the flag name.
func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) {
flag := f.VarPF(newCountValue(0, p), name, shorthand, usage)
flag.NoOptDefVal = "-1"
flag.NoOptDefVal = "+1"
}
// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set
@ -83,7 +83,9 @@ func (f *FlagSet) CountP(name, shorthand string, usage string) *int {
return p
}
// Count like Count only the flag is placed on the CommandLine isntead of a given flag set
// Count defines a count flag with specified name, default value, and usage string.
// The return value is the address of an int variable that stores the value of the flag.
// A count flag will add 1 to its value evey time it is found on the command line
func Count(name string, usage string) *int {
return CommandLine.CountP(name, "", usage)
}

128
vendor/github.com/spf13/pflag/duration_slice.go generated vendored Normal file
View file

@ -0,0 +1,128 @@
package pflag
import (
"fmt"
"strings"
"time"
)
// -- durationSlice Value
type durationSliceValue struct {
value *[]time.Duration
changed bool
}
func newDurationSliceValue(val []time.Duration, p *[]time.Duration) *durationSliceValue {
dsv := new(durationSliceValue)
dsv.value = p
*dsv.value = val
return dsv
}
func (s *durationSliceValue) Set(val string) error {
ss := strings.Split(val, ",")
out := make([]time.Duration, len(ss))
for i, d := range ss {
var err error
out[i], err = time.ParseDuration(d)
if err != nil {
return err
}
}
if !s.changed {
*s.value = out
} else {
*s.value = append(*s.value, out...)
}
s.changed = true
return nil
}
func (s *durationSliceValue) Type() string {
return "durationSlice"
}
func (s *durationSliceValue) String() string {
out := make([]string, len(*s.value))
for i, d := range *s.value {
out[i] = fmt.Sprintf("%s", d)
}
return "[" + strings.Join(out, ",") + "]"
}
func durationSliceConv(val string) (interface{}, error) {
val = strings.Trim(val, "[]")
// Empty string would cause a slice with one (empty) entry
if len(val) == 0 {
return []time.Duration{}, nil
}
ss := strings.Split(val, ",")
out := make([]time.Duration, len(ss))
for i, d := range ss {
var err error
out[i], err = time.ParseDuration(d)
if err != nil {
return nil, err
}
}
return out, nil
}
// GetDurationSlice returns the []time.Duration value of a flag with the given name
func (f *FlagSet) GetDurationSlice(name string) ([]time.Duration, error) {
val, err := f.getFlagType(name, "durationSlice", durationSliceConv)
if err != nil {
return []time.Duration{}, err
}
return val.([]time.Duration), nil
}
// DurationSliceVar defines a durationSlice flag with specified name, default value, and usage string.
// The argument p points to a []time.Duration variable in which to store the value of the flag.
func (f *FlagSet) DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) {
f.VarP(newDurationSliceValue(value, p), name, "", usage)
}
// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) {
f.VarP(newDurationSliceValue(value, p), name, shorthand, usage)
}
// DurationSliceVar defines a duration[] flag with specified name, default value, and usage string.
// The argument p points to a duration[] variable in which to store the value of the flag.
func DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) {
CommandLine.VarP(newDurationSliceValue(value, p), name, "", usage)
}
// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash.
func DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) {
CommandLine.VarP(newDurationSliceValue(value, p), name, shorthand, usage)
}
// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string.
// The return value is the address of a []time.Duration variable that stores the value of the flag.
func (f *FlagSet) DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration {
p := []time.Duration{}
f.DurationSliceVarP(&p, name, "", value, usage)
return &p
}
// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration {
p := []time.Duration{}
f.DurationSliceVarP(&p, name, shorthand, value, usage)
return &p
}
// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string.
// The return value is the address of a []time.Duration variable that stores the value of the flag.
func DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration {
return CommandLine.DurationSliceP(name, "", value, usage)
}
// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash.
func DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration {
return CommandLine.DurationSliceP(name, shorthand, value, usage)
}

467
vendor/github.com/spf13/pflag/flag.go generated vendored
View file

@ -16,9 +16,9 @@ pflag is a drop-in replacement of Go's native flag package. If you import
pflag under the name "flag" then all code should continue to function
with no changes.
import flag "github.com/ogier/pflag"
import flag "github.com/spf13/pflag"
There is one exception to this: if you directly instantiate the Flag struct
There is one exception to this: if you directly instantiate the Flag struct
there is one more field "Shorthand" that you will need to set.
Most code never instantiates this struct directly, and instead uses
functions such as String(), BoolVar(), and Var(), and is therefore
@ -101,6 +101,7 @@ package pflag
import (
"bytes"
"errors"
goflag "flag"
"fmt"
"io"
"os"
@ -123,6 +124,12 @@ const (
PanicOnError
)
// ParseErrorsWhitelist defines the parsing errors that can be ignored
type ParseErrorsWhitelist struct {
// UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags
UnknownFlags bool
}
// NormalizedName is a flag name that has been normalized according to rules
// for the FlagSet (e.g. making '-' and '_' equivalent).
type NormalizedName string
@ -134,18 +141,30 @@ type FlagSet struct {
// a custom error handler.
Usage func()
// SortFlags is used to indicate, if user wants to have sorted flags in
// help/usage messages.
SortFlags bool
// ParseErrorsWhitelist is used to configure a whitelist of errors
ParseErrorsWhitelist ParseErrorsWhitelist
name string
parsed bool
actual map[NormalizedName]*Flag
orderedActual []*Flag
sortedActual []*Flag
formal map[NormalizedName]*Flag
orderedFormal []*Flag
sortedFormal []*Flag
shorthands map[byte]*Flag
args []string // arguments after flags
argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no --
exitOnError bool // does the program exit if there's an error?
errorHandling ErrorHandling
output io.Writer // nil means stderr; use out() accessor
interspersed bool // allow interspersed option/non-option args
normalizeNameFunc func(f *FlagSet, name string) NormalizedName
addedGoFlagSets []*goflag.FlagSet
}
// A Flag represents the state of a flag.
@ -156,7 +175,7 @@ type Flag struct {
Value Value // value as set
DefValue string // default value (as text); for usage message
Changed bool // If the user set the value (or if left to default)
NoOptDefVal string //default value (as text); if the flag is on the command line without any options
NoOptDefVal string // default value (as text); if the flag is on the command line without any options
Deprecated string // If this flag is deprecated, this string is the new or now thing to use
Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text
ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use
@ -194,11 +213,19 @@ func sortFlags(flags map[NormalizedName]*Flag) []*Flag {
// "--getUrl" which may also be translated to "geturl" and everything will work.
func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) {
f.normalizeNameFunc = n
for k, v := range f.formal {
delete(f.formal, k)
nname := f.normalizeFlagName(string(k))
f.formal[nname] = v
v.Name = string(nname)
f.sortedFormal = f.sortedFormal[:0]
for fname, flag := range f.formal {
nname := f.normalizeFlagName(flag.Name)
if fname == nname {
continue
}
flag.Name = string(nname)
delete(f.formal, fname)
f.formal[nname] = flag
if _, set := f.actual[fname]; set {
delete(f.actual, fname)
f.actual[nname] = flag
}
}
}
@ -229,46 +256,78 @@ func (f *FlagSet) SetOutput(output io.Writer) {
f.output = output
}
// VisitAll visits the flags in lexicographical order, calling fn for each.
// VisitAll visits the flags in lexicographical order or
// in primordial order if f.SortFlags is false, calling fn for each.
// It visits all flags, even those not set.
func (f *FlagSet) VisitAll(fn func(*Flag)) {
for _, flag := range sortFlags(f.formal) {
if len(f.formal) == 0 {
return
}
var flags []*Flag
if f.SortFlags {
if len(f.formal) != len(f.sortedFormal) {
f.sortedFormal = sortFlags(f.formal)
}
flags = f.sortedFormal
} else {
flags = f.orderedFormal
}
for _, flag := range flags {
fn(flag)
}
}
// HasFlags returns a bool to indicate if the FlagSet has any flags definied.
// HasFlags returns a bool to indicate if the FlagSet has any flags defined.
func (f *FlagSet) HasFlags() bool {
return len(f.formal) > 0
}
// HasAvailableFlags returns a bool to indicate if the FlagSet has any flags
// definied that are not hidden or deprecated.
// that are not hidden.
func (f *FlagSet) HasAvailableFlags() bool {
for _, flag := range f.formal {
if !flag.Hidden && len(flag.Deprecated) == 0 {
if !flag.Hidden {
return true
}
}
return false
}
// VisitAll visits the command-line flags in lexicographical order, calling
// fn for each. It visits all flags, even those not set.
// VisitAll visits the command-line flags in lexicographical order or
// in primordial order if f.SortFlags is false, calling fn for each.
// It visits all flags, even those not set.
func VisitAll(fn func(*Flag)) {
CommandLine.VisitAll(fn)
}
// Visit visits the flags in lexicographical order, calling fn for each.
// Visit visits the flags in lexicographical order or
// in primordial order if f.SortFlags is false, calling fn for each.
// It visits only those flags that have been set.
func (f *FlagSet) Visit(fn func(*Flag)) {
for _, flag := range sortFlags(f.actual) {
if len(f.actual) == 0 {
return
}
var flags []*Flag
if f.SortFlags {
if len(f.actual) != len(f.sortedActual) {
f.sortedActual = sortFlags(f.actual)
}
flags = f.sortedActual
} else {
flags = f.orderedActual
}
for _, flag := range flags {
fn(flag)
}
}
// Visit visits the command-line flags in lexicographical order, calling fn
// for each. It visits only those flags that have been set.
// Visit visits the command-line flags in lexicographical order or
// in primordial order if f.SortFlags is false, calling fn for each.
// It visits only those flags that have been set.
func Visit(fn func(*Flag)) {
CommandLine.Visit(fn)
}
@ -278,6 +337,22 @@ func (f *FlagSet) Lookup(name string) *Flag {
return f.lookup(f.normalizeFlagName(name))
}
// ShorthandLookup returns the Flag structure of the short handed flag,
// returning nil if none exists.
// It panics, if len(name) > 1.
func (f *FlagSet) ShorthandLookup(name string) *Flag {
if name == "" {
return nil
}
if len(name) > 1 {
msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name)
fmt.Fprintf(f.out(), msg)
panic(msg)
}
c := name[0]
return f.shorthands[c]
}
// lookup returns the Flag structure of the named flag, returning nil if none exists.
func (f *FlagSet) lookup(name NormalizedName) *Flag {
return f.formal[name]
@ -319,10 +394,11 @@ func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error {
if flag == nil {
return fmt.Errorf("flag %q does not exist", name)
}
if len(usageMessage) == 0 {
if usageMessage == "" {
return fmt.Errorf("deprecated message for flag %q must be set", name)
}
flag.Deprecated = usageMessage
flag.Hidden = true
return nil
}
@ -334,7 +410,7 @@ func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) erro
if flag == nil {
return fmt.Errorf("flag %q does not exist", name)
}
if len(usageMessage) == 0 {
if usageMessage == "" {
return fmt.Errorf("deprecated message for flag %q must be set", name)
}
flag.ShorthandDeprecated = usageMessage
@ -358,6 +434,12 @@ func Lookup(name string) *Flag {
return CommandLine.Lookup(name)
}
// ShorthandLookup returns the Flag structure of the short handed flag,
// returning nil if none exists.
func ShorthandLookup(name string) *Flag {
return CommandLine.ShorthandLookup(name)
}
// Set sets the value of the named flag.
func (f *FlagSet) Set(name, value string) error {
normalName := f.normalizeFlagName(name)
@ -365,17 +447,30 @@ func (f *FlagSet) Set(name, value string) error {
if !ok {
return fmt.Errorf("no such flag -%v", name)
}
err := flag.Value.Set(value)
if err != nil {
return err
var flagName string
if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name)
} else {
flagName = fmt.Sprintf("--%s", flag.Name)
}
return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err)
}
if !flag.Changed {
if f.actual == nil {
f.actual = make(map[NormalizedName]*Flag)
}
f.actual[normalName] = flag
f.orderedActual = append(f.orderedActual, flag)
flag.Changed = true
if len(flag.Deprecated) > 0 {
fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
}
if flag.Deprecated != "" {
fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
}
return nil
}
@ -416,7 +511,7 @@ func Set(name, value string) error {
// otherwise, the default values of all defined flags in the set.
func (f *FlagSet) PrintDefaults() {
usages := f.FlagUsages()
fmt.Fprintf(f.out(), "%s", usages)
fmt.Fprint(f.out(), usages)
}
// defaultIsZeroValue returns true if the default value for this flag represents
@ -482,43 +577,125 @@ func UnquoteUsage(flag *Flag) (name string, usage string) {
name = "int"
case "uint64":
name = "uint"
case "stringSlice":
name = "strings"
case "intSlice":
name = "ints"
case "uintSlice":
name = "uints"
case "boolSlice":
name = "bools"
}
return
}
// FlagUsages Returns a string containing the usage information for all flags in
// the FlagSet
func (f *FlagSet) FlagUsages() string {
x := new(bytes.Buffer)
// Splits the string `s` on whitespace into an initial substring up to
// `i` runes in length and the remainder. Will go `slop` over `i` if
// that encompasses the entire string (which allows the caller to
// avoid short orphan words on the final line).
func wrapN(i, slop int, s string) (string, string) {
if i+slop > len(s) {
return s, ""
}
w := strings.LastIndexAny(s[:i], " \t\n")
if w <= 0 {
return s, ""
}
nlPos := strings.LastIndex(s[:i], "\n")
if nlPos > 0 && nlPos < w {
return s[:nlPos], s[nlPos+1:]
}
return s[:w], s[w+1:]
}
// Wraps the string `s` to a maximum width `w` with leading indent
// `i`. The first line is not indented (this is assumed to be done by
// caller). Pass `w` == 0 to do no wrapping
func wrap(i, w int, s string) string {
if w == 0 {
return strings.Replace(s, "\n", "\n"+strings.Repeat(" ", i), -1)
}
// space between indent i and end of line width w into which
// we should wrap the text.
wrap := w - i
var r, l string
// Not enough space for sensible wrapping. Wrap as a block on
// the next line instead.
if wrap < 24 {
i = 16
wrap = w - i
r += "\n" + strings.Repeat(" ", i)
}
// If still not enough space then don't even try to wrap.
if wrap < 24 {
return strings.Replace(s, "\n", r, -1)
}
// Try to avoid short orphan words on the final line, by
// allowing wrapN to go a bit over if that would fit in the
// remainder of the line.
slop := 5
wrap = wrap - slop
// Handle first line, which is indented by the caller (or the
// special case above)
l, s = wrapN(wrap, slop, s)
r = r + strings.Replace(l, "\n", "\n"+strings.Repeat(" ", i), -1)
// Now wrap the rest
for s != "" {
var t string
t, s = wrapN(wrap, slop, s)
r = r + "\n" + strings.Repeat(" ", i) + strings.Replace(t, "\n", "\n"+strings.Repeat(" ", i), -1)
}
return r
}
// FlagUsagesWrapped returns a string containing the usage information
// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no
// wrapping)
func (f *FlagSet) FlagUsagesWrapped(cols int) string {
buf := new(bytes.Buffer)
lines := make([]string, 0, len(f.formal))
maxlen := 0
f.VisitAll(func(flag *Flag) {
if len(flag.Deprecated) > 0 || flag.Hidden {
if flag.Hidden {
return
}
line := ""
if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 {
if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name)
} else {
line = fmt.Sprintf(" --%s", flag.Name)
}
varname, usage := UnquoteUsage(flag)
if len(varname) > 0 {
if varname != "" {
line += " " + varname
}
if len(flag.NoOptDefVal) > 0 {
if flag.NoOptDefVal != "" {
switch flag.Value.Type() {
case "string":
line += fmt.Sprintf("[=%q]", flag.NoOptDefVal)
line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal)
case "bool":
if flag.NoOptDefVal != "true" {
line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
}
case "count":
if flag.NoOptDefVal != "+1" {
line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
}
default:
line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
}
@ -539,6 +716,9 @@ func (f *FlagSet) FlagUsages() string {
line += fmt.Sprintf(" (default %s)", flag.DefValue)
}
}
if len(flag.Deprecated) != 0 {
line += fmt.Sprintf(" (DEPRECATED: %s)", flag.Deprecated)
}
lines = append(lines, line)
})
@ -546,10 +726,17 @@ func (f *FlagSet) FlagUsages() string {
for _, line := range lines {
sidx := strings.Index(line, "\x00")
spacing := strings.Repeat(" ", maxlen-sidx)
fmt.Fprintln(x, line[:sidx], spacing, line[sidx+1:])
// maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx
fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:]))
}
return x.String()
return buf.String()
}
// FlagUsages returns a string containing the usage information for all flags in
// the FlagSet
func (f *FlagSet) FlagUsages() string {
return f.FlagUsagesWrapped(0)
}
// PrintDefaults prints to standard error the default values of all defined command-line flags.
@ -635,16 +822,15 @@ func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag {
// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) VarP(value Value, name, shorthand, usage string) {
_ = f.VarPF(value, name, shorthand, usage)
f.VarPF(value, name, shorthand, usage)
}
// AddFlag will add the flag to the FlagSet
func (f *FlagSet) AddFlag(flag *Flag) {
// Call normalizeFlagName function only once
normalizedFlagName := f.normalizeFlagName(flag.Name)
_, alreadythere := f.formal[normalizedFlagName]
if alreadythere {
_, alreadyThere := f.formal[normalizedFlagName]
if alreadyThere {
msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name)
fmt.Fprintln(f.out(), msg)
panic(msg) // Happens only if flags are declared with identical names
@ -655,28 +841,31 @@ func (f *FlagSet) AddFlag(flag *Flag) {
flag.Name = string(normalizedFlagName)
f.formal[normalizedFlagName] = flag
f.orderedFormal = append(f.orderedFormal, flag)
if len(flag.Shorthand) == 0 {
if flag.Shorthand == "" {
return
}
if len(flag.Shorthand) > 1 {
fmt.Fprintf(f.out(), "%s shorthand more than ASCII character: %s\n", f.name, flag.Shorthand)
panic("shorthand is more than one character")
msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand)
fmt.Fprintf(f.out(), msg)
panic(msg)
}
if f.shorthands == nil {
f.shorthands = make(map[byte]*Flag)
}
c := flag.Shorthand[0]
old, alreadythere := f.shorthands[c]
if alreadythere {
fmt.Fprintf(f.out(), "%s shorthand reused: %q for %s already used for %s\n", f.name, c, flag.Name, old.Name)
panic("shorthand redefinition")
used, alreadyThere := f.shorthands[c]
if alreadyThere {
msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name)
fmt.Fprintf(f.out(), msg)
panic(msg)
}
f.shorthands[c] = flag
}
// AddFlagSet adds one FlagSet to another. If a flag is already present in f
// the flag from newSet will be ignored
// the flag from newSet will be ignored.
func (f *FlagSet) AddFlagSet(newSet *FlagSet) {
if newSet == nil {
return
@ -707,8 +896,10 @@ func VarP(value Value, name, shorthand, usage string) {
// returns the error.
func (f *FlagSet) failf(format string, a ...interface{}) error {
err := fmt.Errorf(format, a...)
if f.errorHandling != ContinueOnError {
fmt.Fprintln(f.out(), err)
f.usage()
}
return err
}
@ -724,57 +915,61 @@ func (f *FlagSet) usage() {
}
}
func (f *FlagSet) setFlag(flag *Flag, value string, origArg string) error {
if err := flag.Value.Set(value); err != nil {
return f.failf("invalid argument %q for %s: %v", value, origArg, err)
//--unknown (args will be empty)
//--unknown --next-flag ... (args will be --next-flag ...)
//--unknown arg ... (args will be arg ...)
func stripUnknownFlagValue(args []string) []string {
if len(args) == 0 {
//--unknown
return args
}
// mark as visited for Visit()
if f.actual == nil {
f.actual = make(map[NormalizedName]*Flag)
first := args[0]
if first[0] == '-' {
//--unknown --next-flag ...
return args
}
f.actual[f.normalizeFlagName(flag.Name)] = flag
flag.Changed = true
if len(flag.Deprecated) > 0 {
fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
}
if len(flag.ShorthandDeprecated) > 0 && containsShorthand(origArg, flag.Shorthand) {
fmt.Fprintf(os.Stderr, "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated)
}
return nil
//--unknown arg ... (args will be arg ...)
return args[1:]
}
func containsShorthand(arg, shorthand string) bool {
// filter out flags --<flag_name>
if strings.HasPrefix(arg, "-") {
return false
}
arg = strings.SplitN(arg, "=", 2)[0]
return strings.Contains(arg, shorthand)
}
func (f *FlagSet) parseLongArg(s string, args []string) (a []string, err error) {
func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) {
a = args
name := s[2:]
if len(name) == 0 || name[0] == '-' || name[0] == '=' {
err = f.failf("bad flag syntax: %s", s)
return
}
split := strings.SplitN(name, "=", 2)
name = split[0]
flag, alreadythere := f.formal[f.normalizeFlagName(name)]
if !alreadythere {
if name == "help" { // special case for nice help message.
flag, exists := f.formal[f.normalizeFlagName(name)]
if !exists {
switch {
case name == "help":
f.usage()
return a, ErrHelp
case f.ParseErrorsWhitelist.UnknownFlags:
// --unknown=unknownval arg ...
// we do not want to lose arg in this case
if len(split) >= 2 {
return a, nil
}
return stripUnknownFlagValue(a), nil
default:
err = f.failf("unknown flag: --%s", name)
return
}
}
var value string
if len(split) == 2 {
// '--flag=arg'
value = split[1]
} else if len(flag.NoOptDefVal) > 0 {
} else if flag.NoOptDefVal != "" {
// '--flag' (arg was optional)
value = flag.NoOptDefVal
} else if len(a) > 0 {
@ -786,55 +981,87 @@ func (f *FlagSet) parseLongArg(s string, args []string) (a []string, err error)
err = f.failf("flag needs an argument: %s", s)
return
}
err = f.setFlag(flag, value, s)
err = fn(flag, value)
if err != nil {
f.failf(err.Error())
}
return
}
func (f *FlagSet) parseSingleShortArg(shorthands string, args []string) (outShorts string, outArgs []string, err error) {
func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) {
outArgs = args
if strings.HasPrefix(shorthands, "test.") {
return
}
outArgs = args
outShorts = shorthands[1:]
c := shorthands[0]
flag, alreadythere := f.shorthands[c]
if !alreadythere {
if c == 'h' { // special case for nice help message.
flag, exists := f.shorthands[c]
if !exists {
switch {
case c == 'h':
f.usage()
err = ErrHelp
return
case f.ParseErrorsWhitelist.UnknownFlags:
// '-f=arg arg ...'
// we do not want to lose arg in this case
if len(shorthands) > 2 && shorthands[1] == '=' {
outShorts = ""
return
}
//TODO continue on error
outArgs = stripUnknownFlagValue(outArgs)
return
default:
err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands)
return
}
}
var value string
if len(shorthands) > 2 && shorthands[1] == '=' {
// '-f=arg'
value = shorthands[2:]
outShorts = ""
} else if len(flag.NoOptDefVal) > 0 {
} else if flag.NoOptDefVal != "" {
// '-f' (arg was optional)
value = flag.NoOptDefVal
} else if len(shorthands) > 1 {
// '-farg'
value = shorthands[1:]
outShorts = ""
} else if len(args) > 0 {
// '-f arg'
value = args[0]
outArgs = args[1:]
} else {
// '-f' (arg was required)
err = f.failf("flag needs an argument: %q in -%s", c, shorthands)
return
}
err = f.setFlag(flag, value, shorthands)
if flag.ShorthandDeprecated != "" {
fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated)
}
err = fn(flag, value)
if err != nil {
f.failf(err.Error())
}
return
}
func (f *FlagSet) parseShortArg(s string, args []string) (a []string, err error) {
func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []string, err error) {
a = args
shorthands := s[1:]
// "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv").
for len(shorthands) > 0 {
shorthands, a, err = f.parseSingleShortArg(shorthands, args)
shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn)
if err != nil {
return
}
@ -843,7 +1070,7 @@ func (f *FlagSet) parseShortArg(s string, args []string) (a []string, err error)
return
}
func (f *FlagSet) parseArgs(args []string) (err error) {
func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) {
for len(args) > 0 {
s := args[0]
args = args[1:]
@ -863,9 +1090,9 @@ func (f *FlagSet) parseArgs(args []string) (err error) {
f.args = append(f.args, args...)
break
}
args, err = f.parseLongArg(s, args)
args, err = f.parseLongArg(s, args, fn)
} else {
args, err = f.parseShortArg(s, args)
args, err = f.parseShortArg(s, args, fn)
}
if err != nil {
return
@ -879,9 +1106,50 @@ func (f *FlagSet) parseArgs(args []string) (err error) {
// are defined and before flags are accessed by the program.
// The return value will be ErrHelp if -help was set but not defined.
func (f *FlagSet) Parse(arguments []string) error {
if f.addedGoFlagSets != nil {
for _, goFlagSet := range f.addedGoFlagSets {
goFlagSet.Parse(nil)
}
}
f.parsed = true
if len(arguments) < 0 {
return nil
}
f.args = make([]string, 0, len(arguments))
set := func(flag *Flag, value string) error {
return f.Set(flag.Name, value)
}
err := f.parseArgs(arguments, set)
if err != nil {
switch f.errorHandling {
case ContinueOnError:
return err
case ExitOnError:
fmt.Println(err)
os.Exit(2)
case PanicOnError:
panic(err)
}
}
return nil
}
type parseFunc func(flag *Flag, value string) error
// ParseAll parses flag definitions from the argument list, which should not
// include the command name. The arguments for fn are flag and value. Must be
// called after all flags in the FlagSet are defined and before flags are
// accessed by the program. The return value will be ErrHelp if -help was set
// but not defined.
func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) error) error {
f.parsed = true
f.args = make([]string, 0, len(arguments))
err := f.parseArgs(arguments)
err := f.parseArgs(arguments, fn)
if err != nil {
switch f.errorHandling {
case ContinueOnError:
@ -907,6 +1175,14 @@ func Parse() {
CommandLine.Parse(os.Args[1:])
}
// ParseAll parses the command-line flags from os.Args[1:] and called fn for each.
// The arguments for fn are flag and value. Must be called after all flags are
// defined and before flags are accessed by the program.
func ParseAll(fn func(flag *Flag, value string) error) {
// Ignore errors; CommandLine is set for ExitOnError.
CommandLine.ParseAll(os.Args[1:], fn)
}
// SetInterspersed sets whether to support interspersed option/non-option arguments.
func SetInterspersed(interspersed bool) {
CommandLine.SetInterspersed(interspersed)
@ -920,14 +1196,15 @@ func Parsed() bool {
// CommandLine is the default set of command-line flags, parsed from os.Args.
var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
// NewFlagSet returns a new, empty flag set with the specified name and
// error handling property.
// NewFlagSet returns a new, empty flag set with the specified name,
// error handling property and SortFlags set to true.
func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
f := &FlagSet{
name: name,
errorHandling: errorHandling,
argsLenAtDash: -1,
interspersed: true,
SortFlags: true,
}
return f
}

View file

@ -6,13 +6,10 @@ package pflag
import (
goflag "flag"
"fmt"
"reflect"
"strings"
)
var _ = fmt.Print
// flagValueWrapper implements pflag.Value around a flag.Value. The main
// difference here is the addition of the Type method that returns a string
// name of the type. As this is generally unknown, we approximate that with
@ -101,4 +98,8 @@ func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) {
newSet.VisitAll(func(goflag *goflag.Flag) {
f.AddGoFlag(goflag)
})
if f.addedGoFlagSets == nil {
f.addedGoFlagSets = make([]*goflag.FlagSet, 0)
}
f.addedGoFlagSets = append(f.addedGoFlagSets, newSet)
}

88
vendor/github.com/spf13/pflag/int16.go generated vendored Normal file
View file

@ -0,0 +1,88 @@
package pflag
import "strconv"
// -- int16 Value
type int16Value int16
func newInt16Value(val int16, p *int16) *int16Value {
*p = val
return (*int16Value)(p)
}
func (i *int16Value) Set(s string) error {
v, err := strconv.ParseInt(s, 0, 16)
*i = int16Value(v)
return err
}
func (i *int16Value) Type() string {
return "int16"
}
func (i *int16Value) String() string { return strconv.FormatInt(int64(*i), 10) }
func int16Conv(sval string) (interface{}, error) {
v, err := strconv.ParseInt(sval, 0, 16)
if err != nil {
return 0, err
}
return int16(v), nil
}
// GetInt16 returns the int16 value of a flag with the given name
func (f *FlagSet) GetInt16(name string) (int16, error) {
val, err := f.getFlagType(name, "int16", int16Conv)
if err != nil {
return 0, err
}
return val.(int16), nil
}
// Int16Var defines an int16 flag with specified name, default value, and usage string.
// The argument p points to an int16 variable in which to store the value of the flag.
func (f *FlagSet) Int16Var(p *int16, name string, value int16, usage string) {
f.VarP(newInt16Value(value, p), name, "", usage)
}
// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
f.VarP(newInt16Value(value, p), name, shorthand, usage)
}
// Int16Var defines an int16 flag with specified name, default value, and usage string.
// The argument p points to an int16 variable in which to store the value of the flag.
func Int16Var(p *int16, name string, value int16, usage string) {
CommandLine.VarP(newInt16Value(value, p), name, "", usage)
}
// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
func Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
CommandLine.VarP(newInt16Value(value, p), name, shorthand, usage)
}
// Int16 defines an int16 flag with specified name, default value, and usage string.
// The return value is the address of an int16 variable that stores the value of the flag.
func (f *FlagSet) Int16(name string, value int16, usage string) *int16 {
p := new(int16)
f.Int16VarP(p, name, "", value, usage)
return p
}
// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Int16P(name, shorthand string, value int16, usage string) *int16 {
p := new(int16)
f.Int16VarP(p, name, shorthand, value, usage)
return p
}
// Int16 defines an int16 flag with specified name, default value, and usage string.
// The return value is the address of an int16 variable that stores the value of the flag.
func Int16(name string, value int16, usage string) *int16 {
return CommandLine.Int16P(name, "", value, usage)
}
// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
func Int16P(name, shorthand string, value int16, usage string) *int16 {
return CommandLine.Int16P(name, shorthand, value, usage)
}

View file

@ -6,8 +6,6 @@ import (
"strings"
)
var _ = strings.TrimSpace
// -- net.IP value
type ipValue net.IP

148
vendor/github.com/spf13/pflag/ip_slice.go generated vendored Normal file
View file

@ -0,0 +1,148 @@
package pflag
import (
"fmt"
"io"
"net"
"strings"
)
// -- ipSlice Value
type ipSliceValue struct {
value *[]net.IP
changed bool
}
func newIPSliceValue(val []net.IP, p *[]net.IP) *ipSliceValue {
ipsv := new(ipSliceValue)
ipsv.value = p
*ipsv.value = val
return ipsv
}
// Set converts, and assigns, the comma-separated IP argument string representation as the []net.IP value of this flag.
// If Set is called on a flag that already has a []net.IP assigned, the newly converted values will be appended.
func (s *ipSliceValue) Set(val string) error {
// remove all quote characters
rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
// read flag arguments with CSV parser
ipStrSlice, err := readAsCSV(rmQuote.Replace(val))
if err != nil && err != io.EOF {
return err
}
// parse ip values into slice
out := make([]net.IP, 0, len(ipStrSlice))
for _, ipStr := range ipStrSlice {
ip := net.ParseIP(strings.TrimSpace(ipStr))
if ip == nil {
return fmt.Errorf("invalid string being converted to IP address: %s", ipStr)
}
out = append(out, ip)
}
if !s.changed {
*s.value = out
} else {
*s.value = append(*s.value, out...)
}
s.changed = true
return nil
}
// Type returns a string that uniquely represents this flag's type.
func (s *ipSliceValue) Type() string {
return "ipSlice"
}
// String defines a "native" format for this net.IP slice flag value.
func (s *ipSliceValue) String() string {
ipStrSlice := make([]string, len(*s.value))
for i, ip := range *s.value {
ipStrSlice[i] = ip.String()
}
out, _ := writeAsCSV(ipStrSlice)
return "[" + out + "]"
}
func ipSliceConv(val string) (interface{}, error) {
val = strings.Trim(val, "[]")
// Emtpy string would cause a slice with one (empty) entry
if len(val) == 0 {
return []net.IP{}, nil
}
ss := strings.Split(val, ",")
out := make([]net.IP, len(ss))
for i, sval := range ss {
ip := net.ParseIP(strings.TrimSpace(sval))
if ip == nil {
return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
}
out[i] = ip
}
return out, nil
}
// GetIPSlice returns the []net.IP value of a flag with the given name
func (f *FlagSet) GetIPSlice(name string) ([]net.IP, error) {
val, err := f.getFlagType(name, "ipSlice", ipSliceConv)
if err != nil {
return []net.IP{}, err
}
return val.([]net.IP), nil
}
// IPSliceVar defines a ipSlice flag with specified name, default value, and usage string.
// The argument p points to a []net.IP variable in which to store the value of the flag.
func (f *FlagSet) IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
f.VarP(newIPSliceValue(value, p), name, "", usage)
}
// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
f.VarP(newIPSliceValue(value, p), name, shorthand, usage)
}
// IPSliceVar defines a []net.IP flag with specified name, default value, and usage string.
// The argument p points to a []net.IP variable in which to store the value of the flag.
func IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
CommandLine.VarP(newIPSliceValue(value, p), name, "", usage)
}
// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
func IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
CommandLine.VarP(newIPSliceValue(value, p), name, shorthand, usage)
}
// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
// The return value is the address of a []net.IP variable that stores the value of that flag.
func (f *FlagSet) IPSlice(name string, value []net.IP, usage string) *[]net.IP {
p := []net.IP{}
f.IPSliceVarP(&p, name, "", value, usage)
return &p
}
// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
p := []net.IP{}
f.IPSliceVarP(&p, name, shorthand, value, usage)
return &p
}
// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
// The return value is the address of a []net.IP variable that stores the value of the flag.
func IPSlice(name string, value []net.IP, usage string) *[]net.IP {
return CommandLine.IPSliceP(name, "", value, usage)
}
// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
func IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
return CommandLine.IPSliceP(name, shorthand, value, usage)
}

View file

@ -27,8 +27,6 @@ func (*ipNetValue) Type() string {
return "ipNet"
}
var _ = strings.TrimSpace
func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue {
*p = val
return (*ipNetValue)(p)

View file

@ -1,12 +1,5 @@
package pflag
import (
"fmt"
"strings"
)
var _ = fmt.Fprint
// -- stringArray Value
type stringArrayValue struct {
value *[]string
@ -40,7 +33,7 @@ func (s *stringArrayValue) String() string {
}
func stringArrayConv(sval string) (interface{}, error) {
sval = strings.Trim(sval, "[]")
sval = sval[1 : len(sval)-1]
// An empty string would cause a array with one (empty) string
if len(sval) == 0 {
return []string{}, nil
@ -59,7 +52,7 @@ func (f *FlagSet) GetStringArray(name string) ([]string, error) {
// StringArrayVar defines a string flag with specified name, default value, and usage string.
// The argument p points to a []string variable in which to store the values of the multiple flags.
// The value of each argument will not try to be separated by comma
// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) {
f.VarP(newStringArrayValue(value, p), name, "", usage)
}
@ -71,7 +64,7 @@ func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []s
// StringArrayVar defines a string flag with specified name, default value, and usage string.
// The argument p points to a []string variable in which to store the value of the flag.
// The value of each argument will not try to be separated by comma
// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
func StringArrayVar(p *[]string, name string, value []string, usage string) {
CommandLine.VarP(newStringArrayValue(value, p), name, "", usage)
}
@ -83,7 +76,7 @@ func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage
// StringArray defines a string flag with specified name, default value, and usage string.
// The return value is the address of a []string variable that stores the value of the flag.
// The value of each argument will not try to be separated by comma
// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string {
p := []string{}
f.StringArrayVarP(&p, name, "", value, usage)
@ -99,7 +92,7 @@ func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage str
// StringArray defines a string flag with specified name, default value, and usage string.
// The return value is the address of a []string variable that stores the value of the flag.
// The value of each argument will not try to be separated by comma
// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
func StringArray(name string, value []string, usage string) *[]string {
return CommandLine.StringArrayP(name, "", value, usage)
}

View file

@ -3,12 +3,9 @@ package pflag
import (
"bytes"
"encoding/csv"
"fmt"
"strings"
)
var _ = fmt.Fprint
// -- stringSlice Value
type stringSliceValue struct {
value *[]string
@ -39,7 +36,7 @@ func writeAsCSV(vals []string) (string, error) {
return "", err
}
w.Flush()
return strings.TrimSuffix(b.String(), fmt.Sprintln()), nil
return strings.TrimSuffix(b.String(), "\n"), nil
}
func (s *stringSliceValue) Set(val string) error {
@ -66,7 +63,7 @@ func (s *stringSliceValue) String() string {
}
func stringSliceConv(sval string) (interface{}, error) {
sval = strings.Trim(sval, "[]")
sval = sval[1 : len(sval)-1]
// An empty string would cause a slice with one (empty) string
if len(sval) == 0 {
return []string{}, nil
@ -85,6 +82,11 @@ func (f *FlagSet) GetStringSlice(name string) ([]string, error) {
// StringSliceVar defines a string flag with specified name, default value, and usage string.
// The argument p points to a []string variable in which to store the value of the flag.
// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
// For example:
// --ss="v1,v2" -ss="v3"
// will result in
// []string{"v1", "v2", "v3"}
func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) {
f.VarP(newStringSliceValue(value, p), name, "", usage)
}
@ -96,6 +98,11 @@ func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []s
// StringSliceVar defines a string flag with specified name, default value, and usage string.
// The argument p points to a []string variable in which to store the value of the flag.
// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
// For example:
// --ss="v1,v2" -ss="v3"
// will result in
// []string{"v1", "v2", "v3"}
func StringSliceVar(p *[]string, name string, value []string, usage string) {
CommandLine.VarP(newStringSliceValue(value, p), name, "", usage)
}
@ -107,6 +114,11 @@ func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage
// StringSlice defines a string flag with specified name, default value, and usage string.
// The return value is the address of a []string variable that stores the value of the flag.
// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
// For example:
// --ss="v1,v2" -ss="v3"
// will result in
// []string{"v1", "v2", "v3"}
func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string {
p := []string{}
f.StringSliceVarP(&p, name, "", value, usage)
@ -122,6 +134,11 @@ func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage str
// StringSlice defines a string flag with specified name, default value, and usage string.
// The return value is the address of a []string variable that stores the value of the flag.
// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
// For example:
// --ss="v1,v2" -ss="v3"
// will result in
// []string{"v1", "v2", "v3"}
func StringSlice(name string, value []string, usage string) *[]string {
return CommandLine.StringSliceP(name, "", value, usage)
}

126
vendor/github.com/spf13/pflag/uint_slice.go generated vendored Normal file
View file

@ -0,0 +1,126 @@
package pflag
import (
"fmt"
"strconv"
"strings"
)
// -- uintSlice Value
type uintSliceValue struct {
value *[]uint
changed bool
}
func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue {
uisv := new(uintSliceValue)
uisv.value = p
*uisv.value = val
return uisv
}
func (s *uintSliceValue) Set(val string) error {
ss := strings.Split(val, ",")
out := make([]uint, len(ss))
for i, d := range ss {
u, err := strconv.ParseUint(d, 10, 0)
if err != nil {
return err
}
out[i] = uint(u)
}
if !s.changed {
*s.value = out
} else {
*s.value = append(*s.value, out...)
}
s.changed = true
return nil
}
func (s *uintSliceValue) Type() string {
return "uintSlice"
}
func (s *uintSliceValue) String() string {
out := make([]string, len(*s.value))
for i, d := range *s.value {
out[i] = fmt.Sprintf("%d", d)
}
return "[" + strings.Join(out, ",") + "]"
}
func uintSliceConv(val string) (interface{}, error) {
val = strings.Trim(val, "[]")
// Empty string would cause a slice with one (empty) entry
if len(val) == 0 {
return []uint{}, nil
}
ss := strings.Split(val, ",")
out := make([]uint, len(ss))
for i, d := range ss {
u, err := strconv.ParseUint(d, 10, 0)
if err != nil {
return nil, err
}
out[i] = uint(u)
}
return out, nil
}
// GetUintSlice returns the []uint value of a flag with the given name.
func (f *FlagSet) GetUintSlice(name string) ([]uint, error) {
val, err := f.getFlagType(name, "uintSlice", uintSliceConv)
if err != nil {
return []uint{}, err
}
return val.([]uint), nil
}
// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string.
// The argument p points to a []uint variable in which to store the value of the flag.
func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) {
f.VarP(newUintSliceValue(value, p), name, "", usage)
}
// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
f.VarP(newUintSliceValue(value, p), name, shorthand, usage)
}
// UintSliceVar defines a uint[] flag with specified name, default value, and usage string.
// The argument p points to a uint[] variable in which to store the value of the flag.
func UintSliceVar(p *[]uint, name string, value []uint, usage string) {
CommandLine.VarP(newUintSliceValue(value, p), name, "", usage)
}
// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage)
}
// UintSlice defines a []uint flag with specified name, default value, and usage string.
// The return value is the address of a []uint variable that stores the value of the flag.
func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint {
p := []uint{}
f.UintSliceVarP(&p, name, "", value, usage)
return &p
}
// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
p := []uint{}
f.UintSliceVarP(&p, name, shorthand, value, usage)
return &p
}
// UintSlice defines a []uint flag with specified name, default value, and usage string.
// The return value is the address of a []uint variable that stores the value of the flag.
func UintSlice(name string, value []uint, usage string) *[]uint {
return CommandLine.UintSliceP(name, "", value, usage)
}
// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
return CommandLine.UintSliceP(name, shorthand, value, usage)
}

3
vendor/golang.org/x/net/AUTHORS generated vendored Normal file
View file

@ -0,0 +1,3 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

3
vendor/golang.org/x/net/CONTRIBUTORS generated vendored Normal file
View file

@ -0,0 +1,3 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

27
vendor/golang.org/x/net/LICENSE generated vendored Normal file
View file

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/net/PATENTS generated vendored Normal file
View file

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

56
vendor/golang.org/x/net/context/context.go generated vendored Normal file
View file

@ -0,0 +1,56 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package context defines the Context type, which carries deadlines,
// cancelation signals, and other request-scoped values across API boundaries
// and between processes.
// As of Go 1.7 this package is available in the standard library under the
// name context. https://golang.org/pkg/context.
//
// Incoming requests to a server should create a Context, and outgoing calls to
// servers should accept a Context. The chain of function calls between must
// propagate the Context, optionally replacing it with a modified copy created
// using WithDeadline, WithTimeout, WithCancel, or WithValue.
//
// Programs that use Contexts should follow these rules to keep interfaces
// consistent across packages and enable static analysis tools to check context
// propagation:
//
// Do not store Contexts inside a struct type; instead, pass a Context
// explicitly to each function that needs it. The Context should be the first
// parameter, typically named ctx:
//
// func DoSomething(ctx context.Context, arg Arg) error {
// // ... use ctx ...
// }
//
// Do not pass a nil Context, even if a function permits it. Pass context.TODO
// if you are unsure about which Context to use.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
//
// The same Context may be passed to functions running in different goroutines;
// Contexts are safe for simultaneous use by multiple goroutines.
//
// See http://blog.golang.org/context for example code for a server that uses
// Contexts.
package context // import "golang.org/x/net/context"
// Background returns a non-nil, empty Context. It is never canceled, has no
// values, and has no deadline. It is typically used by the main function,
// initialization, and tests, and as the top-level Context for incoming
// requests.
func Background() Context {
return background
}
// TODO returns a non-nil, empty Context. Code should use context.TODO when
// it's unclear which Context to use or it is not yet available (because the
// surrounding function has not yet been extended to accept a Context
// parameter). TODO is recognized by static analysis tools that determine
// whether Contexts are propagated correctly in a program.
func TODO() Context {
return todo
}

72
vendor/golang.org/x/net/context/go17.go generated vendored Normal file
View file

@ -0,0 +1,72 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7
package context
import (
"context" // standard library's context, as of Go 1.7
"time"
)
var (
todo = context.TODO()
background = context.Background()
)
// Canceled is the error returned by Context.Err when the context is canceled.
var Canceled = context.Canceled
// DeadlineExceeded is the error returned by Context.Err when the context's
// deadline passes.
var DeadlineExceeded = context.DeadlineExceeded
// WithCancel returns a copy of parent with a new Done channel. The returned
// context's Done channel is closed when the returned cancel function is called
// or when the parent context's Done channel is closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
ctx, f := context.WithCancel(parent)
return ctx, CancelFunc(f)
}
// WithDeadline returns a copy of the parent context with the deadline adjusted
// to be no later than d. If the parent's deadline is already earlier than d,
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
ctx, f := context.WithDeadline(parent, deadline)
return ctx, CancelFunc(f)
}
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete:
//
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}
// WithValue returns a copy of parent in which the value associated with key is
// val.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
func WithValue(parent Context, key interface{}, val interface{}) Context {
return context.WithValue(parent, key, val)
}

20
vendor/golang.org/x/net/context/go19.go generated vendored Normal file
View file

@ -0,0 +1,20 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.9
package context
import "context" // standard library's context, as of Go 1.7
// A Context carries a deadline, a cancelation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context = context.Context
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc = context.CancelFunc

300
vendor/golang.org/x/net/context/pre_go17.go generated vendored Normal file
View file

@ -0,0 +1,300 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.7
package context
import (
"errors"
"fmt"
"sync"
"time"
)
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
// struct{}, since vars of this type must have distinct addresses.
type emptyCtx int
func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
return
}
func (*emptyCtx) Done() <-chan struct{} {
return nil
}
func (*emptyCtx) Err() error {
return nil
}
func (*emptyCtx) Value(key interface{}) interface{} {
return nil
}
func (e *emptyCtx) String() string {
switch e {
case background:
return "context.Background"
case todo:
return "context.TODO"
}
return "unknown empty Context"
}
var (
background = new(emptyCtx)
todo = new(emptyCtx)
)
// Canceled is the error returned by Context.Err when the context is canceled.
var Canceled = errors.New("context canceled")
// DeadlineExceeded is the error returned by Context.Err when the context's
// deadline passes.
var DeadlineExceeded = errors.New("context deadline exceeded")
// WithCancel returns a copy of parent with a new Done channel. The returned
// context's Done channel is closed when the returned cancel function is called
// or when the parent context's Done channel is closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
c := newCancelCtx(parent)
propagateCancel(parent, c)
return c, func() { c.cancel(true, Canceled) }
}
// newCancelCtx returns an initialized cancelCtx.
func newCancelCtx(parent Context) *cancelCtx {
return &cancelCtx{
Context: parent,
done: make(chan struct{}),
}
}
// propagateCancel arranges for child to be canceled when parent is.
func propagateCancel(parent Context, child canceler) {
if parent.Done() == nil {
return // parent is never canceled
}
if p, ok := parentCancelCtx(parent); ok {
p.mu.Lock()
if p.err != nil {
// parent has already been canceled
child.cancel(false, p.err)
} else {
if p.children == nil {
p.children = make(map[canceler]bool)
}
p.children[child] = true
}
p.mu.Unlock()
} else {
go func() {
select {
case <-parent.Done():
child.cancel(false, parent.Err())
case <-child.Done():
}
}()
}
}
// parentCancelCtx follows a chain of parent references until it finds a
// *cancelCtx. This function understands how each of the concrete types in this
// package represents its parent.
func parentCancelCtx(parent Context) (*cancelCtx, bool) {
for {
switch c := parent.(type) {
case *cancelCtx:
return c, true
case *timerCtx:
return c.cancelCtx, true
case *valueCtx:
parent = c.Context
default:
return nil, false
}
}
}
// removeChild removes a context from its parent.
func removeChild(parent Context, child canceler) {
p, ok := parentCancelCtx(parent)
if !ok {
return
}
p.mu.Lock()
if p.children != nil {
delete(p.children, child)
}
p.mu.Unlock()
}
// A canceler is a context type that can be canceled directly. The
// implementations are *cancelCtx and *timerCtx.
type canceler interface {
cancel(removeFromParent bool, err error)
Done() <-chan struct{}
}
// A cancelCtx can be canceled. When canceled, it also cancels any children
// that implement canceler.
type cancelCtx struct {
Context
done chan struct{} // closed by the first cancel call.
mu sync.Mutex
children map[canceler]bool // set to nil by the first cancel call
err error // set to non-nil by the first cancel call
}
func (c *cancelCtx) Done() <-chan struct{} {
return c.done
}
func (c *cancelCtx) Err() error {
c.mu.Lock()
defer c.mu.Unlock()
return c.err
}
func (c *cancelCtx) String() string {
return fmt.Sprintf("%v.WithCancel", c.Context)
}
// cancel closes c.done, cancels each of c's children, and, if
// removeFromParent is true, removes c from its parent's children.
func (c *cancelCtx) cancel(removeFromParent bool, err error) {
if err == nil {
panic("context: internal error: missing cancel error")
}
c.mu.Lock()
if c.err != nil {
c.mu.Unlock()
return // already canceled
}
c.err = err
close(c.done)
for child := range c.children {
// NOTE: acquiring the child's lock while holding parent's lock.
child.cancel(false, err)
}
c.children = nil
c.mu.Unlock()
if removeFromParent {
removeChild(c.Context, c)
}
}
// WithDeadline returns a copy of the parent context with the deadline adjusted
// to be no later than d. If the parent's deadline is already earlier than d,
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
// The current deadline is already sooner than the new one.
return WithCancel(parent)
}
c := &timerCtx{
cancelCtx: newCancelCtx(parent),
deadline: deadline,
}
propagateCancel(parent, c)
d := deadline.Sub(time.Now())
if d <= 0 {
c.cancel(true, DeadlineExceeded) // deadline has already passed
return c, func() { c.cancel(true, Canceled) }
}
c.mu.Lock()
defer c.mu.Unlock()
if c.err == nil {
c.timer = time.AfterFunc(d, func() {
c.cancel(true, DeadlineExceeded)
})
}
return c, func() { c.cancel(true, Canceled) }
}
// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
// implement Done and Err. It implements cancel by stopping its timer then
// delegating to cancelCtx.cancel.
type timerCtx struct {
*cancelCtx
timer *time.Timer // Under cancelCtx.mu.
deadline time.Time
}
func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
return c.deadline, true
}
func (c *timerCtx) String() string {
return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
}
func (c *timerCtx) cancel(removeFromParent bool, err error) {
c.cancelCtx.cancel(false, err)
if removeFromParent {
// Remove this timerCtx from its parent cancelCtx's children.
removeChild(c.cancelCtx.Context, c)
}
c.mu.Lock()
if c.timer != nil {
c.timer.Stop()
c.timer = nil
}
c.mu.Unlock()
}
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete:
//
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}
// WithValue returns a copy of parent in which the value associated with key is
// val.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
func WithValue(parent Context, key interface{}, val interface{}) Context {
return &valueCtx{parent, key, val}
}
// A valueCtx carries a key-value pair. It implements Value for that key and
// delegates all other calls to the embedded Context.
type valueCtx struct {
Context
key, val interface{}
}
func (c *valueCtx) String() string {
return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
}
func (c *valueCtx) Value(key interface{}) interface{} {
if c.key == key {
return c.val
}
return c.Context.Value(key)
}

109
vendor/golang.org/x/net/context/pre_go19.go generated vendored Normal file
View file

@ -0,0 +1,109 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.9
package context
import "time"
// A Context carries a deadline, a cancelation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context interface {
// Deadline returns the time when work done on behalf of this context
// should be canceled. Deadline returns ok==false when no deadline is
// set. Successive calls to Deadline return the same results.
Deadline() (deadline time.Time, ok bool)
// Done returns a channel that's closed when work done on behalf of this
// context should be canceled. Done may return nil if this context can
// never be canceled. Successive calls to Done return the same value.
//
// WithCancel arranges for Done to be closed when cancel is called;
// WithDeadline arranges for Done to be closed when the deadline
// expires; WithTimeout arranges for Done to be closed when the timeout
// elapses.
//
// Done is provided for use in select statements:
//
// // Stream generates values with DoSomething and sends them to out
// // until DoSomething returns an error or ctx.Done is closed.
// func Stream(ctx context.Context, out chan<- Value) error {
// for {
// v, err := DoSomething(ctx)
// if err != nil {
// return err
// }
// select {
// case <-ctx.Done():
// return ctx.Err()
// case out <- v:
// }
// }
// }
//
// See http://blog.golang.org/pipelines for more examples of how to use
// a Done channel for cancelation.
Done() <-chan struct{}
// Err returns a non-nil error value after Done is closed. Err returns
// Canceled if the context was canceled or DeadlineExceeded if the
// context's deadline passed. No other values for Err are defined.
// After Done is closed, successive calls to Err return the same value.
Err() error
// Value returns the value associated with this context for key, or nil
// if no value is associated with key. Successive calls to Value with
// the same key returns the same result.
//
// Use context values only for request-scoped data that transits
// processes and API boundaries, not for passing optional parameters to
// functions.
//
// A key identifies a specific value in a Context. Functions that wish
// to store values in Context typically allocate a key in a global
// variable then use that key as the argument to context.WithValue and
// Context.Value. A key can be any type that supports equality;
// packages should define keys as an unexported type to avoid
// collisions.
//
// Packages that define a Context key should provide type-safe accessors
// for the values stores using that key:
//
// // Package user defines a User type that's stored in Contexts.
// package user
//
// import "golang.org/x/net/context"
//
// // User is the type of value stored in the Contexts.
// type User struct {...}
//
// // key is an unexported type for keys defined in this package.
// // This prevents collisions with keys defined in other packages.
// type key int
//
// // userKey is the key for user.User values in Contexts. It is
// // unexported; clients use user.NewContext and user.FromContext
// // instead of using this key directly.
// var userKey key = 0
//
// // NewContext returns a new Context that carries value u.
// func NewContext(ctx context.Context, u *User) context.Context {
// return context.WithValue(ctx, userKey, u)
// }
//
// // FromContext returns the User value stored in ctx, if any.
// func FromContext(ctx context.Context) (*User, bool) {
// u, ok := ctx.Value(userKey).(*User)
// return u, ok
// }
Value(key interface{}) interface{}
}
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc func()

796
vendor/golang.org/x/net/webdav/file.go generated vendored Normal file
View file

@ -0,0 +1,796 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package webdav
import (
"encoding/xml"
"io"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"sync"
"time"
"golang.org/x/net/context"
)
// slashClean is equivalent to but slightly more efficient than
// path.Clean("/" + name).
func slashClean(name string) string {
if name == "" || name[0] != '/' {
name = "/" + name
}
return path.Clean(name)
}
// A FileSystem implements access to a collection of named files. The elements
// in a file path are separated by slash ('/', U+002F) characters, regardless
// of host operating system convention.
//
// Each method has the same semantics as the os package's function of the same
// name.
//
// Note that the os.Rename documentation says that "OS-specific restrictions
// might apply". In particular, whether or not renaming a file or directory
// overwriting another existing file or directory is an error is OS-dependent.
type FileSystem interface {
Mkdir(ctx context.Context, name string, perm os.FileMode) error
OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error)
RemoveAll(ctx context.Context, name string) error
Rename(ctx context.Context, oldName, newName string) error
Stat(ctx context.Context, name string) (os.FileInfo, error)
}
// A File is returned by a FileSystem's OpenFile method and can be served by a
// Handler.
//
// A File may optionally implement the DeadPropsHolder interface, if it can
// load and save dead properties.
type File interface {
http.File
io.Writer
}
// A Dir implements FileSystem using the native file system restricted to a
// specific directory tree.
//
// While the FileSystem.OpenFile method takes '/'-separated paths, a Dir's
// string value is a filename on the native file system, not a URL, so it is
// separated by filepath.Separator, which isn't necessarily '/'.
//
// An empty Dir is treated as ".".
type Dir string
func (d Dir) resolve(name string) string {
// This implementation is based on Dir.Open's code in the standard net/http package.
if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
strings.Contains(name, "\x00") {
return ""
}
dir := string(d)
if dir == "" {
dir = "."
}
return filepath.Join(dir, filepath.FromSlash(slashClean(name)))
}
func (d Dir) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
if name = d.resolve(name); name == "" {
return os.ErrNotExist
}
return os.Mkdir(name, perm)
}
func (d Dir) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) {
if name = d.resolve(name); name == "" {
return nil, os.ErrNotExist
}
f, err := os.OpenFile(name, flag, perm)
if err != nil {
return nil, err
}
return f, nil
}
func (d Dir) RemoveAll(ctx context.Context, name string) error {
if name = d.resolve(name); name == "" {
return os.ErrNotExist
}
if name == filepath.Clean(string(d)) {
// Prohibit removing the virtual root directory.
return os.ErrInvalid
}
return os.RemoveAll(name)
}
func (d Dir) Rename(ctx context.Context, oldName, newName string) error {
if oldName = d.resolve(oldName); oldName == "" {
return os.ErrNotExist
}
if newName = d.resolve(newName); newName == "" {
return os.ErrNotExist
}
if root := filepath.Clean(string(d)); root == oldName || root == newName {
// Prohibit renaming from or to the virtual root directory.
return os.ErrInvalid
}
return os.Rename(oldName, newName)
}
func (d Dir) Stat(ctx context.Context, name string) (os.FileInfo, error) {
if name = d.resolve(name); name == "" {
return nil, os.ErrNotExist
}
return os.Stat(name)
}
// NewMemFS returns a new in-memory FileSystem implementation.
func NewMemFS() FileSystem {
return &memFS{
root: memFSNode{
children: make(map[string]*memFSNode),
mode: 0660 | os.ModeDir,
modTime: time.Now(),
},
}
}
// A memFS implements FileSystem, storing all metadata and actual file data
// in-memory. No limits on filesystem size are used, so it is not recommended
// this be used where the clients are untrusted.
//
// Concurrent access is permitted. The tree structure is protected by a mutex,
// and each node's contents and metadata are protected by a per-node mutex.
//
// TODO: Enforce file permissions.
type memFS struct {
mu sync.Mutex
root memFSNode
}
// TODO: clean up and rationalize the walk/find code.
// walk walks the directory tree for the fullname, calling f at each step. If f
// returns an error, the walk will be aborted and return that same error.
//
// dir is the directory at that step, frag is the name fragment, and final is
// whether it is the final step. For example, walking "/foo/bar/x" will result
// in 3 calls to f:
// - "/", "foo", false
// - "/foo/", "bar", false
// - "/foo/bar/", "x", true
// The frag argument will be empty only if dir is the root node and the walk
// ends at that root node.
func (fs *memFS) walk(op, fullname string, f func(dir *memFSNode, frag string, final bool) error) error {
original := fullname
fullname = slashClean(fullname)
// Strip any leading "/"s to make fullname a relative path, as the walk
// starts at fs.root.
if fullname[0] == '/' {
fullname = fullname[1:]
}
dir := &fs.root
for {
frag, remaining := fullname, ""
i := strings.IndexRune(fullname, '/')
final := i < 0
if !final {
frag, remaining = fullname[:i], fullname[i+1:]
}
if frag == "" && dir != &fs.root {
panic("webdav: empty path fragment for a clean path")
}
if err := f(dir, frag, final); err != nil {
return &os.PathError{
Op: op,
Path: original,
Err: err,
}
}
if final {
break
}
child := dir.children[frag]
if child == nil {
return &os.PathError{
Op: op,
Path: original,
Err: os.ErrNotExist,
}
}
if !child.mode.IsDir() {
return &os.PathError{
Op: op,
Path: original,
Err: os.ErrInvalid,
}
}
dir, fullname = child, remaining
}
return nil
}
// find returns the parent of the named node and the relative name fragment
// from the parent to the child. For example, if finding "/foo/bar/baz" then
// parent will be the node for "/foo/bar" and frag will be "baz".
//
// If the fullname names the root node, then parent, frag and err will be zero.
//
// find returns an error if the parent does not already exist or the parent
// isn't a directory, but it will not return an error per se if the child does
// not already exist. The error returned is either nil or an *os.PathError
// whose Op is op.
func (fs *memFS) find(op, fullname string) (parent *memFSNode, frag string, err error) {
err = fs.walk(op, fullname, func(parent0 *memFSNode, frag0 string, final bool) error {
if !final {
return nil
}
if frag0 != "" {
parent, frag = parent0, frag0
}
return nil
})
return parent, frag, err
}
func (fs *memFS) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
fs.mu.Lock()
defer fs.mu.Unlock()
dir, frag, err := fs.find("mkdir", name)
if err != nil {
return err
}
if dir == nil {
// We can't create the root.
return os.ErrInvalid
}
if _, ok := dir.children[frag]; ok {
return os.ErrExist
}
dir.children[frag] = &memFSNode{
children: make(map[string]*memFSNode),
mode: perm.Perm() | os.ModeDir,
modTime: time.Now(),
}
return nil
}
func (fs *memFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) {
fs.mu.Lock()
defer fs.mu.Unlock()
dir, frag, err := fs.find("open", name)
if err != nil {
return nil, err
}
var n *memFSNode
if dir == nil {
// We're opening the root.
if flag&(os.O_WRONLY|os.O_RDWR) != 0 {
return nil, os.ErrPermission
}
n, frag = &fs.root, "/"
} else {
n = dir.children[frag]
if flag&(os.O_SYNC|os.O_APPEND) != 0 {
// memFile doesn't support these flags yet.
return nil, os.ErrInvalid
}
if flag&os.O_CREATE != 0 {
if flag&os.O_EXCL != 0 && n != nil {
return nil, os.ErrExist
}
if n == nil {
n = &memFSNode{
mode: perm.Perm(),
}
dir.children[frag] = n
}
}
if n == nil {
return nil, os.ErrNotExist
}
if flag&(os.O_WRONLY|os.O_RDWR) != 0 && flag&os.O_TRUNC != 0 {
n.mu.Lock()
n.data = nil
n.mu.Unlock()
}
}
children := make([]os.FileInfo, 0, len(n.children))
for cName, c := range n.children {
children = append(children, c.stat(cName))
}
return &memFile{
n: n,
nameSnapshot: frag,
childrenSnapshot: children,
}, nil
}
func (fs *memFS) RemoveAll(ctx context.Context, name string) error {
fs.mu.Lock()
defer fs.mu.Unlock()
dir, frag, err := fs.find("remove", name)
if err != nil {
return err
}
if dir == nil {
// We can't remove the root.
return os.ErrInvalid
}
delete(dir.children, frag)
return nil
}
func (fs *memFS) Rename(ctx context.Context, oldName, newName string) error {
fs.mu.Lock()
defer fs.mu.Unlock()
oldName = slashClean(oldName)
newName = slashClean(newName)
if oldName == newName {
return nil
}
if strings.HasPrefix(newName, oldName+"/") {
// We can't rename oldName to be a sub-directory of itself.
return os.ErrInvalid
}
oDir, oFrag, err := fs.find("rename", oldName)
if err != nil {
return err
}
if oDir == nil {
// We can't rename from the root.
return os.ErrInvalid
}
nDir, nFrag, err := fs.find("rename", newName)
if err != nil {
return err
}
if nDir == nil {
// We can't rename to the root.
return os.ErrInvalid
}
oNode, ok := oDir.children[oFrag]
if !ok {
return os.ErrNotExist
}
if oNode.children != nil {
if nNode, ok := nDir.children[nFrag]; ok {
if nNode.children == nil {
return errNotADirectory
}
if len(nNode.children) != 0 {
return errDirectoryNotEmpty
}
}
}
delete(oDir.children, oFrag)
nDir.children[nFrag] = oNode
return nil
}
func (fs *memFS) Stat(ctx context.Context, name string) (os.FileInfo, error) {
fs.mu.Lock()
defer fs.mu.Unlock()
dir, frag, err := fs.find("stat", name)
if err != nil {
return nil, err
}
if dir == nil {
// We're stat'ting the root.
return fs.root.stat("/"), nil
}
if n, ok := dir.children[frag]; ok {
return n.stat(path.Base(name)), nil
}
return nil, os.ErrNotExist
}
// A memFSNode represents a single entry in the in-memory filesystem and also
// implements os.FileInfo.
type memFSNode struct {
// children is protected by memFS.mu.
children map[string]*memFSNode
mu sync.Mutex
data []byte
mode os.FileMode
modTime time.Time
deadProps map[xml.Name]Property
}
func (n *memFSNode) stat(name string) *memFileInfo {
n.mu.Lock()
defer n.mu.Unlock()
return &memFileInfo{
name: name,
size: int64(len(n.data)),
mode: n.mode,
modTime: n.modTime,
}
}
func (n *memFSNode) DeadProps() (map[xml.Name]Property, error) {
n.mu.Lock()
defer n.mu.Unlock()
if len(n.deadProps) == 0 {
return nil, nil
}
ret := make(map[xml.Name]Property, len(n.deadProps))
for k, v := range n.deadProps {
ret[k] = v
}
return ret, nil
}
func (n *memFSNode) Patch(patches []Proppatch) ([]Propstat, error) {
n.mu.Lock()
defer n.mu.Unlock()
pstat := Propstat{Status: http.StatusOK}
for _, patch := range patches {
for _, p := range patch.Props {
pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName})
if patch.Remove {
delete(n.deadProps, p.XMLName)
continue
}
if n.deadProps == nil {
n.deadProps = map[xml.Name]Property{}
}
n.deadProps[p.XMLName] = p
}
}
return []Propstat{pstat}, nil
}
type memFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
func (f *memFileInfo) Name() string { return f.name }
func (f *memFileInfo) Size() int64 { return f.size }
func (f *memFileInfo) Mode() os.FileMode { return f.mode }
func (f *memFileInfo) ModTime() time.Time { return f.modTime }
func (f *memFileInfo) IsDir() bool { return f.mode.IsDir() }
func (f *memFileInfo) Sys() interface{} { return nil }
// A memFile is a File implementation for a memFSNode. It is a per-file (not
// per-node) read/write position, and a snapshot of the memFS' tree structure
// (a node's name and children) for that node.
type memFile struct {
n *memFSNode
nameSnapshot string
childrenSnapshot []os.FileInfo
// pos is protected by n.mu.
pos int
}
// A *memFile implements the optional DeadPropsHolder interface.
var _ DeadPropsHolder = (*memFile)(nil)
func (f *memFile) DeadProps() (map[xml.Name]Property, error) { return f.n.DeadProps() }
func (f *memFile) Patch(patches []Proppatch) ([]Propstat, error) { return f.n.Patch(patches) }
func (f *memFile) Close() error {
return nil
}
func (f *memFile) Read(p []byte) (int, error) {
f.n.mu.Lock()
defer f.n.mu.Unlock()
if f.n.mode.IsDir() {
return 0, os.ErrInvalid
}
if f.pos >= len(f.n.data) {
return 0, io.EOF
}
n := copy(p, f.n.data[f.pos:])
f.pos += n
return n, nil
}
func (f *memFile) Readdir(count int) ([]os.FileInfo, error) {
f.n.mu.Lock()
defer f.n.mu.Unlock()
if !f.n.mode.IsDir() {
return nil, os.ErrInvalid
}
old := f.pos
if old >= len(f.childrenSnapshot) {
// The os.File Readdir docs say that at the end of a directory,
// the error is io.EOF if count > 0 and nil if count <= 0.
if count > 0 {
return nil, io.EOF
}
return nil, nil
}
if count > 0 {
f.pos += count
if f.pos > len(f.childrenSnapshot) {
f.pos = len(f.childrenSnapshot)
}
} else {
f.pos = len(f.childrenSnapshot)
old = 0
}
return f.childrenSnapshot[old:f.pos], nil
}
func (f *memFile) Seek(offset int64, whence int) (int64, error) {
f.n.mu.Lock()
defer f.n.mu.Unlock()
npos := f.pos
// TODO: How to handle offsets greater than the size of system int?
switch whence {
case os.SEEK_SET:
npos = int(offset)
case os.SEEK_CUR:
npos += int(offset)
case os.SEEK_END:
npos = len(f.n.data) + int(offset)
default:
npos = -1
}
if npos < 0 {
return 0, os.ErrInvalid
}
f.pos = npos
return int64(f.pos), nil
}
func (f *memFile) Stat() (os.FileInfo, error) {
return f.n.stat(f.nameSnapshot), nil
}
func (f *memFile) Write(p []byte) (int, error) {
lenp := len(p)
f.n.mu.Lock()
defer f.n.mu.Unlock()
if f.n.mode.IsDir() {
return 0, os.ErrInvalid
}
if f.pos < len(f.n.data) {
n := copy(f.n.data[f.pos:], p)
f.pos += n
p = p[n:]
} else if f.pos > len(f.n.data) {
// Write permits the creation of holes, if we've seek'ed past the
// existing end of file.
if f.pos <= cap(f.n.data) {
oldLen := len(f.n.data)
f.n.data = f.n.data[:f.pos]
hole := f.n.data[oldLen:]
for i := range hole {
hole[i] = 0
}
} else {
d := make([]byte, f.pos, f.pos+len(p))
copy(d, f.n.data)
f.n.data = d
}
}
if len(p) > 0 {
// We should only get here if f.pos == len(f.n.data).
f.n.data = append(f.n.data, p...)
f.pos = len(f.n.data)
}
f.n.modTime = time.Now()
return lenp, nil
}
// moveFiles moves files and/or directories from src to dst.
//
// See section 9.9.4 for when various HTTP status codes apply.
func moveFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool) (status int, err error) {
created := false
if _, err := fs.Stat(ctx, dst); err != nil {
if !os.IsNotExist(err) {
return http.StatusForbidden, err
}
created = true
} else if overwrite {
// Section 9.9.3 says that "If a resource exists at the destination
// and the Overwrite header is "T", then prior to performing the move,
// the server must perform a DELETE with "Depth: infinity" on the
// destination resource.
if err := fs.RemoveAll(ctx, dst); err != nil {
return http.StatusForbidden, err
}
} else {
return http.StatusPreconditionFailed, os.ErrExist
}
if err := fs.Rename(ctx, src, dst); err != nil {
return http.StatusForbidden, err
}
if created {
return http.StatusCreated, nil
}
return http.StatusNoContent, nil
}
func copyProps(dst, src File) error {
d, ok := dst.(DeadPropsHolder)
if !ok {
return nil
}
s, ok := src.(DeadPropsHolder)
if !ok {
return nil
}
m, err := s.DeadProps()
if err != nil {
return err
}
props := make([]Property, 0, len(m))
for _, prop := range m {
props = append(props, prop)
}
_, err = d.Patch([]Proppatch{{Props: props}})
return err
}
// copyFiles copies files and/or directories from src to dst.
//
// See section 9.8.5 for when various HTTP status codes apply.
func copyFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool, depth int, recursion int) (status int, err error) {
if recursion == 1000 {
return http.StatusInternalServerError, errRecursionTooDeep
}
recursion++
// TODO: section 9.8.3 says that "Note that an infinite-depth COPY of /A/
// into /A/B/ could lead to infinite recursion if not handled correctly."
srcFile, err := fs.OpenFile(ctx, src, os.O_RDONLY, 0)
if err != nil {
if os.IsNotExist(err) {
return http.StatusNotFound, err
}
return http.StatusInternalServerError, err
}
defer srcFile.Close()
srcStat, err := srcFile.Stat()
if err != nil {
if os.IsNotExist(err) {
return http.StatusNotFound, err
}
return http.StatusInternalServerError, err
}
srcPerm := srcStat.Mode() & os.ModePerm
created := false
if _, err := fs.Stat(ctx, dst); err != nil {
if os.IsNotExist(err) {
created = true
} else {
return http.StatusForbidden, err
}
} else {
if !overwrite {
return http.StatusPreconditionFailed, os.ErrExist
}
if err := fs.RemoveAll(ctx, dst); err != nil && !os.IsNotExist(err) {
return http.StatusForbidden, err
}
}
if srcStat.IsDir() {
if err := fs.Mkdir(ctx, dst, srcPerm); err != nil {
return http.StatusForbidden, err
}
if depth == infiniteDepth {
children, err := srcFile.Readdir(-1)
if err != nil {
return http.StatusForbidden, err
}
for _, c := range children {
name := c.Name()
s := path.Join(src, name)
d := path.Join(dst, name)
cStatus, cErr := copyFiles(ctx, fs, s, d, overwrite, depth, recursion)
if cErr != nil {
// TODO: MultiStatus.
return cStatus, cErr
}
}
}
} else {
dstFile, err := fs.OpenFile(ctx, dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, srcPerm)
if err != nil {
if os.IsNotExist(err) {
return http.StatusConflict, err
}
return http.StatusForbidden, err
}
_, copyErr := io.Copy(dstFile, srcFile)
propsErr := copyProps(dstFile, srcFile)
closeErr := dstFile.Close()
if copyErr != nil {
return http.StatusInternalServerError, copyErr
}
if propsErr != nil {
return http.StatusInternalServerError, propsErr
}
if closeErr != nil {
return http.StatusInternalServerError, closeErr
}
}
if created {
return http.StatusCreated, nil
}
return http.StatusNoContent, nil
}
// walkFS traverses filesystem fs starting at name up to depth levels.
//
// Allowed values for depth are 0, 1 or infiniteDepth. For each visited node,
// walkFS calls walkFn. If a visited file system node is a directory and
// walkFn returns filepath.SkipDir, walkFS will skip traversal of this node.
func walkFS(ctx context.Context, fs FileSystem, depth int, name string, info os.FileInfo, walkFn filepath.WalkFunc) error {
// This implementation is based on Walk's code in the standard path/filepath package.
err := walkFn(name, info, nil)
if err != nil {
if info.IsDir() && err == filepath.SkipDir {
return nil
}
return err
}
if !info.IsDir() || depth == 0 {
return nil
}
if depth == 1 {
depth = 0
}
// Read directory names.
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
if err != nil {
return walkFn(name, info, err)
}
fileInfos, err := f.Readdir(0)
f.Close()
if err != nil {
return walkFn(name, info, err)
}
for _, fileInfo := range fileInfos {
filename := path.Join(name, fileInfo.Name())
fileInfo, err := fs.Stat(ctx, filename)
if err != nil {
if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
return err
}
} else {
err = walkFS(ctx, fs, depth, filename, fileInfo, walkFn)
if err != nil {
if !fileInfo.IsDir() || err != filepath.SkipDir {
return err
}
}
}
}
return nil
}

17
vendor/golang.org/x/net/webdav/file_go1.6.go generated vendored Normal file
View file

@ -0,0 +1,17 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.7
package webdav
import (
"net/http"
"golang.org/x/net/context"
)
func getContext(r *http.Request) context.Context {
return context.Background()
}

16
vendor/golang.org/x/net/webdav/file_go1.7.go generated vendored Normal file
View file

@ -0,0 +1,16 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7
package webdav
import (
"context"
"net/http"
)
func getContext(r *http.Request) context.Context {
return r.Context()
}

173
vendor/golang.org/x/net/webdav/if.go generated vendored Normal file
View file

@ -0,0 +1,173 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package webdav
// The If header is covered by Section 10.4.
// http://www.webdav.org/specs/rfc4918.html#HEADER_If
import (
"strings"
)
// ifHeader is a disjunction (OR) of ifLists.
type ifHeader struct {
lists []ifList
}
// ifList is a conjunction (AND) of Conditions, and an optional resource tag.
type ifList struct {
resourceTag string
conditions []Condition
}
// parseIfHeader parses the "If: foo bar" HTTP header. The httpHeader string
// should omit the "If:" prefix and have any "\r\n"s collapsed to a " ", as is
// returned by req.Header.Get("If") for a http.Request req.
func parseIfHeader(httpHeader string) (h ifHeader, ok bool) {
s := strings.TrimSpace(httpHeader)
switch tokenType, _, _ := lex(s); tokenType {
case '(':
return parseNoTagLists(s)
case angleTokenType:
return parseTaggedLists(s)
default:
return ifHeader{}, false
}
}
func parseNoTagLists(s string) (h ifHeader, ok bool) {
for {
l, remaining, ok := parseList(s)
if !ok {
return ifHeader{}, false
}
h.lists = append(h.lists, l)
if remaining == "" {
return h, true
}
s = remaining
}
}
func parseTaggedLists(s string) (h ifHeader, ok bool) {
resourceTag, n := "", 0
for first := true; ; first = false {
tokenType, tokenStr, remaining := lex(s)
switch tokenType {
case angleTokenType:
if !first && n == 0 {
return ifHeader{}, false
}
resourceTag, n = tokenStr, 0
s = remaining
case '(':
n++
l, remaining, ok := parseList(s)
if !ok {
return ifHeader{}, false
}
l.resourceTag = resourceTag
h.lists = append(h.lists, l)
if remaining == "" {
return h, true
}
s = remaining
default:
return ifHeader{}, false
}
}
}
func parseList(s string) (l ifList, remaining string, ok bool) {
tokenType, _, s := lex(s)
if tokenType != '(' {
return ifList{}, "", false
}
for {
tokenType, _, remaining = lex(s)
if tokenType == ')' {
if len(l.conditions) == 0 {
return ifList{}, "", false
}
return l, remaining, true
}
c, remaining, ok := parseCondition(s)
if !ok {
return ifList{}, "", false
}
l.conditions = append(l.conditions, c)
s = remaining
}
}
func parseCondition(s string) (c Condition, remaining string, ok bool) {
tokenType, tokenStr, s := lex(s)
if tokenType == notTokenType {
c.Not = true
tokenType, tokenStr, s = lex(s)
}
switch tokenType {
case strTokenType, angleTokenType:
c.Token = tokenStr
case squareTokenType:
c.ETag = tokenStr
default:
return Condition{}, "", false
}
return c, s, true
}
// Single-rune tokens like '(' or ')' have a token type equal to their rune.
// All other tokens have a negative token type.
const (
errTokenType = rune(-1)
eofTokenType = rune(-2)
strTokenType = rune(-3)
notTokenType = rune(-4)
angleTokenType = rune(-5)
squareTokenType = rune(-6)
)
func lex(s string) (tokenType rune, tokenStr string, remaining string) {
// The net/textproto Reader that parses the HTTP header will collapse
// Linear White Space that spans multiple "\r\n" lines to a single " ",
// so we don't need to look for '\r' or '\n'.
for len(s) > 0 && (s[0] == '\t' || s[0] == ' ') {
s = s[1:]
}
if len(s) == 0 {
return eofTokenType, "", ""
}
i := 0
loop:
for ; i < len(s); i++ {
switch s[i] {
case '\t', ' ', '(', ')', '<', '>', '[', ']':
break loop
}
}
if i != 0 {
tokenStr, remaining = s[:i], s[i:]
if tokenStr == "Not" {
return notTokenType, "", remaining
}
return strTokenType, tokenStr, remaining
}
j := 0
switch s[0] {
case '<':
j, tokenType = strings.IndexByte(s, '>'), angleTokenType
case '[':
j, tokenType = strings.IndexByte(s, ']'), squareTokenType
default:
return rune(s[0]), "", s[1:]
}
if j < 0 {
return errTokenType, "", ""
}
return tokenType, s[1:j], s[j+1:]
}

1223
vendor/golang.org/x/net/webdav/internal/xml/marshal.go generated vendored Normal file

File diff suppressed because it is too large Load diff

692
vendor/golang.org/x/net/webdav/internal/xml/read.go generated vendored Normal file
View file

@ -0,0 +1,692 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xml
import (
"bytes"
"encoding"
"errors"
"fmt"
"reflect"
"strconv"
"strings"
)
// BUG(rsc): Mapping between XML elements and data structures is inherently flawed:
// an XML element is an order-dependent collection of anonymous
// values, while a data structure is an order-independent collection
// of named values.
// See package json for a textual representation more suitable
// to data structures.
// Unmarshal parses the XML-encoded data and stores the result in
// the value pointed to by v, which must be an arbitrary struct,
// slice, or string. Well-formed data that does not fit into v is
// discarded.
//
// Because Unmarshal uses the reflect package, it can only assign
// to exported (upper case) fields. Unmarshal uses a case-sensitive
// comparison to match XML element names to tag values and struct
// field names.
//
// Unmarshal maps an XML element to a struct using the following rules.
// In the rules, the tag of a field refers to the value associated with the
// key 'xml' in the struct field's tag (see the example above).
//
// * If the struct has a field of type []byte or string with tag
// ",innerxml", Unmarshal accumulates the raw XML nested inside the
// element in that field. The rest of the rules still apply.
//
// * If the struct has a field named XMLName of type xml.Name,
// Unmarshal records the element name in that field.
//
// * If the XMLName field has an associated tag of the form
// "name" or "namespace-URL name", the XML element must have
// the given name (and, optionally, name space) or else Unmarshal
// returns an error.
//
// * If the XML element has an attribute whose name matches a
// struct field name with an associated tag containing ",attr" or
// the explicit name in a struct field tag of the form "name,attr",
// Unmarshal records the attribute value in that field.
//
// * If the XML element contains character data, that data is
// accumulated in the first struct field that has tag ",chardata".
// The struct field may have type []byte or string.
// If there is no such field, the character data is discarded.
//
// * If the XML element contains comments, they are accumulated in
// the first struct field that has tag ",comment". The struct
// field may have type []byte or string. If there is no such
// field, the comments are discarded.
//
// * If the XML element contains a sub-element whose name matches
// the prefix of a tag formatted as "a" or "a>b>c", unmarshal
// will descend into the XML structure looking for elements with the
// given names, and will map the innermost elements to that struct
// field. A tag starting with ">" is equivalent to one starting
// with the field name followed by ">".
//
// * If the XML element contains a sub-element whose name matches
// a struct field's XMLName tag and the struct field has no
// explicit name tag as per the previous rule, unmarshal maps
// the sub-element to that struct field.
//
// * If the XML element contains a sub-element whose name matches a
// field without any mode flags (",attr", ",chardata", etc), Unmarshal
// maps the sub-element to that struct field.
//
// * If the XML element contains a sub-element that hasn't matched any
// of the above rules and the struct has a field with tag ",any",
// unmarshal maps the sub-element to that struct field.
//
// * An anonymous struct field is handled as if the fields of its
// value were part of the outer struct.
//
// * A struct field with tag "-" is never unmarshalled into.
//
// Unmarshal maps an XML element to a string or []byte by saving the
// concatenation of that element's character data in the string or
// []byte. The saved []byte is never nil.
//
// Unmarshal maps an attribute value to a string or []byte by saving
// the value in the string or slice.
//
// Unmarshal maps an XML element to a slice by extending the length of
// the slice and mapping the element to the newly created value.
//
// Unmarshal maps an XML element or attribute value to a bool by
// setting it to the boolean value represented by the string.
//
// Unmarshal maps an XML element or attribute value to an integer or
// floating-point field by setting the field to the result of
// interpreting the string value in decimal. There is no check for
// overflow.
//
// Unmarshal maps an XML element to an xml.Name by recording the
// element name.
//
// Unmarshal maps an XML element to a pointer by setting the pointer
// to a freshly allocated value and then mapping the element to that value.
//
func Unmarshal(data []byte, v interface{}) error {
return NewDecoder(bytes.NewReader(data)).Decode(v)
}
// Decode works like xml.Unmarshal, except it reads the decoder
// stream to find the start element.
func (d *Decoder) Decode(v interface{}) error {
return d.DecodeElement(v, nil)
}
// DecodeElement works like xml.Unmarshal except that it takes
// a pointer to the start XML element to decode into v.
// It is useful when a client reads some raw XML tokens itself
// but also wants to defer to Unmarshal for some elements.
func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error {
val := reflect.ValueOf(v)
if val.Kind() != reflect.Ptr {
return errors.New("non-pointer passed to Unmarshal")
}
return d.unmarshal(val.Elem(), start)
}
// An UnmarshalError represents an error in the unmarshalling process.
type UnmarshalError string
func (e UnmarshalError) Error() string { return string(e) }
// Unmarshaler is the interface implemented by objects that can unmarshal
// an XML element description of themselves.
//
// UnmarshalXML decodes a single XML element
// beginning with the given start element.
// If it returns an error, the outer call to Unmarshal stops and
// returns that error.
// UnmarshalXML must consume exactly one XML element.
// One common implementation strategy is to unmarshal into
// a separate value with a layout matching the expected XML
// using d.DecodeElement, and then to copy the data from
// that value into the receiver.
// Another common strategy is to use d.Token to process the
// XML object one token at a time.
// UnmarshalXML may not use d.RawToken.
type Unmarshaler interface {
UnmarshalXML(d *Decoder, start StartElement) error
}
// UnmarshalerAttr is the interface implemented by objects that can unmarshal
// an XML attribute description of themselves.
//
// UnmarshalXMLAttr decodes a single XML attribute.
// If it returns an error, the outer call to Unmarshal stops and
// returns that error.
// UnmarshalXMLAttr is used only for struct fields with the
// "attr" option in the field tag.
type UnmarshalerAttr interface {
UnmarshalXMLAttr(attr Attr) error
}
// receiverType returns the receiver type to use in an expression like "%s.MethodName".
func receiverType(val interface{}) string {
t := reflect.TypeOf(val)
if t.Name() != "" {
return t.String()
}
return "(" + t.String() + ")"
}
// unmarshalInterface unmarshals a single XML element into val.
// start is the opening tag of the element.
func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error {
// Record that decoder must stop at end tag corresponding to start.
p.pushEOF()
p.unmarshalDepth++
err := val.UnmarshalXML(p, *start)
p.unmarshalDepth--
if err != nil {
p.popEOF()
return err
}
if !p.popEOF() {
return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local)
}
return nil
}
// unmarshalTextInterface unmarshals a single XML element into val.
// The chardata contained in the element (but not its children)
// is passed to the text unmarshaler.
func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error {
var buf []byte
depth := 1
for depth > 0 {
t, err := p.Token()
if err != nil {
return err
}
switch t := t.(type) {
case CharData:
if depth == 1 {
buf = append(buf, t...)
}
case StartElement:
depth++
case EndElement:
depth--
}
}
return val.UnmarshalText(buf)
}
// unmarshalAttr unmarshals a single XML attribute into val.
func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error {
if val.Kind() == reflect.Ptr {
if val.IsNil() {
val.Set(reflect.New(val.Type().Elem()))
}
val = val.Elem()
}
if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) {
// This is an unmarshaler with a non-pointer receiver,
// so it's likely to be incorrect, but we do what we're told.
return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr)
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) {
return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr)
}
}
// Not an UnmarshalerAttr; try encoding.TextUnmarshaler.
if val.CanInterface() && val.Type().Implements(textUnmarshalerType) {
// This is an unmarshaler with a non-pointer receiver,
// so it's likely to be incorrect, but we do what we're told.
return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value))
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value))
}
}
copyValue(val, []byte(attr.Value))
return nil
}
var (
unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem()
textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
)
// Unmarshal a single XML element into val.
func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error {
// Find start element if we need it.
if start == nil {
for {
tok, err := p.Token()
if err != nil {
return err
}
if t, ok := tok.(StartElement); ok {
start = &t
break
}
}
}
// Load value from interface, but only if the result will be
// usefully addressable.
if val.Kind() == reflect.Interface && !val.IsNil() {
e := val.Elem()
if e.Kind() == reflect.Ptr && !e.IsNil() {
val = e
}
}
if val.Kind() == reflect.Ptr {
if val.IsNil() {
val.Set(reflect.New(val.Type().Elem()))
}
val = val.Elem()
}
if val.CanInterface() && val.Type().Implements(unmarshalerType) {
// This is an unmarshaler with a non-pointer receiver,
// so it's likely to be incorrect, but we do what we're told.
return p.unmarshalInterface(val.Interface().(Unmarshaler), start)
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() && pv.Type().Implements(unmarshalerType) {
return p.unmarshalInterface(pv.Interface().(Unmarshaler), start)
}
}
if val.CanInterface() && val.Type().Implements(textUnmarshalerType) {
return p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start)
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
return p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start)
}
}
var (
data []byte
saveData reflect.Value
comment []byte
saveComment reflect.Value
saveXML reflect.Value
saveXMLIndex int
saveXMLData []byte
saveAny reflect.Value
sv reflect.Value
tinfo *typeInfo
err error
)
switch v := val; v.Kind() {
default:
return errors.New("unknown type " + v.Type().String())
case reflect.Interface:
// TODO: For now, simply ignore the field. In the near
// future we may choose to unmarshal the start
// element on it, if not nil.
return p.Skip()
case reflect.Slice:
typ := v.Type()
if typ.Elem().Kind() == reflect.Uint8 {
// []byte
saveData = v
break
}
// Slice of element values.
// Grow slice.
n := v.Len()
if n >= v.Cap() {
ncap := 2 * n
if ncap < 4 {
ncap = 4
}
new := reflect.MakeSlice(typ, n, ncap)
reflect.Copy(new, v)
v.Set(new)
}
v.SetLen(n + 1)
// Recur to read element into slice.
if err := p.unmarshal(v.Index(n), start); err != nil {
v.SetLen(n)
return err
}
return nil
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String:
saveData = v
case reflect.Struct:
typ := v.Type()
if typ == nameType {
v.Set(reflect.ValueOf(start.Name))
break
}
sv = v
tinfo, err = getTypeInfo(typ)
if err != nil {
return err
}
// Validate and assign element name.
if tinfo.xmlname != nil {
finfo := tinfo.xmlname
if finfo.name != "" && finfo.name != start.Name.Local {
return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">")
}
if finfo.xmlns != "" && finfo.xmlns != start.Name.Space {
e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have "
if start.Name.Space == "" {
e += "no name space"
} else {
e += start.Name.Space
}
return UnmarshalError(e)
}
fv := finfo.value(sv)
if _, ok := fv.Interface().(Name); ok {
fv.Set(reflect.ValueOf(start.Name))
}
}
// Assign attributes.
// Also, determine whether we need to save character data or comments.
for i := range tinfo.fields {
finfo := &tinfo.fields[i]
switch finfo.flags & fMode {
case fAttr:
strv := finfo.value(sv)
// Look for attribute.
for _, a := range start.Attr {
if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) {
if err := p.unmarshalAttr(strv, a); err != nil {
return err
}
break
}
}
case fCharData:
if !saveData.IsValid() {
saveData = finfo.value(sv)
}
case fComment:
if !saveComment.IsValid() {
saveComment = finfo.value(sv)
}
case fAny, fAny | fElement:
if !saveAny.IsValid() {
saveAny = finfo.value(sv)
}
case fInnerXml:
if !saveXML.IsValid() {
saveXML = finfo.value(sv)
if p.saved == nil {
saveXMLIndex = 0
p.saved = new(bytes.Buffer)
} else {
saveXMLIndex = p.savedOffset()
}
}
}
}
}
// Find end element.
// Process sub-elements along the way.
Loop:
for {
var savedOffset int
if saveXML.IsValid() {
savedOffset = p.savedOffset()
}
tok, err := p.Token()
if err != nil {
return err
}
switch t := tok.(type) {
case StartElement:
consumed := false
if sv.IsValid() {
consumed, err = p.unmarshalPath(tinfo, sv, nil, &t)
if err != nil {
return err
}
if !consumed && saveAny.IsValid() {
consumed = true
if err := p.unmarshal(saveAny, &t); err != nil {
return err
}
}
}
if !consumed {
if err := p.Skip(); err != nil {
return err
}
}
case EndElement:
if saveXML.IsValid() {
saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset]
if saveXMLIndex == 0 {
p.saved = nil
}
}
break Loop
case CharData:
if saveData.IsValid() {
data = append(data, t...)
}
case Comment:
if saveComment.IsValid() {
comment = append(comment, t...)
}
}
}
if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) {
if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {
return err
}
saveData = reflect.Value{}
}
if saveData.IsValid() && saveData.CanAddr() {
pv := saveData.Addr()
if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {
return err
}
saveData = reflect.Value{}
}
}
if err := copyValue(saveData, data); err != nil {
return err
}
switch t := saveComment; t.Kind() {
case reflect.String:
t.SetString(string(comment))
case reflect.Slice:
t.Set(reflect.ValueOf(comment))
}
switch t := saveXML; t.Kind() {
case reflect.String:
t.SetString(string(saveXMLData))
case reflect.Slice:
t.Set(reflect.ValueOf(saveXMLData))
}
return nil
}
func copyValue(dst reflect.Value, src []byte) (err error) {
dst0 := dst
if dst.Kind() == reflect.Ptr {
if dst.IsNil() {
dst.Set(reflect.New(dst.Type().Elem()))
}
dst = dst.Elem()
}
// Save accumulated data.
switch dst.Kind() {
case reflect.Invalid:
// Probably a comment.
default:
return errors.New("cannot unmarshal into " + dst0.Type().String())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits())
if err != nil {
return err
}
dst.SetInt(itmp)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
utmp, err := strconv.ParseUint(string(src), 10, dst.Type().Bits())
if err != nil {
return err
}
dst.SetUint(utmp)
case reflect.Float32, reflect.Float64:
ftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits())
if err != nil {
return err
}
dst.SetFloat(ftmp)
case reflect.Bool:
value, err := strconv.ParseBool(strings.TrimSpace(string(src)))
if err != nil {
return err
}
dst.SetBool(value)
case reflect.String:
dst.SetString(string(src))
case reflect.Slice:
if len(src) == 0 {
// non-nil to flag presence
src = []byte{}
}
dst.SetBytes(src)
}
return nil
}
// unmarshalPath walks down an XML structure looking for wanted
// paths, and calls unmarshal on them.
// The consumed result tells whether XML elements have been consumed
// from the Decoder until start's matching end element, or if it's
// still untouched because start is uninteresting for sv's fields.
func (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) {
recurse := false
Loop:
for i := range tinfo.fields {
finfo := &tinfo.fields[i]
if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space {
continue
}
for j := range parents {
if parents[j] != finfo.parents[j] {
continue Loop
}
}
if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local {
// It's a perfect match, unmarshal the field.
return true, p.unmarshal(finfo.value(sv), start)
}
if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local {
// It's a prefix for the field. Break and recurse
// since it's not ok for one field path to be itself
// the prefix for another field path.
recurse = true
// We can reuse the same slice as long as we
// don't try to append to it.
parents = finfo.parents[:len(parents)+1]
break
}
}
if !recurse {
// We have no business with this element.
return false, nil
}
// The element is not a perfect match for any field, but one
// or more fields have the path to this element as a parent
// prefix. Recurse and attempt to match these.
for {
var tok Token
tok, err = p.Token()
if err != nil {
return true, err
}
switch t := tok.(type) {
case StartElement:
consumed2, err := p.unmarshalPath(tinfo, sv, parents, &t)
if err != nil {
return true, err
}
if !consumed2 {
if err := p.Skip(); err != nil {
return true, err
}
}
case EndElement:
return true, nil
}
}
}
// Skip reads tokens until it has consumed the end element
// matching the most recent start element already consumed.
// It recurs if it encounters a start element, so it can be used to
// skip nested structures.
// It returns nil if it finds an end element matching the start
// element; otherwise it returns an error describing the problem.
func (d *Decoder) Skip() error {
for {
tok, err := d.Token()
if err != nil {
return err
}
switch tok.(type) {
case StartElement:
if err := d.Skip(); err != nil {
return err
}
case EndElement:
return nil
}
}
}

371
vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go generated vendored Normal file
View file

@ -0,0 +1,371 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xml
import (
"fmt"
"reflect"
"strings"
"sync"
)
// typeInfo holds details for the xml representation of a type.
type typeInfo struct {
xmlname *fieldInfo
fields []fieldInfo
}
// fieldInfo holds details for the xml representation of a single field.
type fieldInfo struct {
idx []int
name string
xmlns string
flags fieldFlags
parents []string
}
type fieldFlags int
const (
fElement fieldFlags = 1 << iota
fAttr
fCharData
fInnerXml
fComment
fAny
fOmitEmpty
fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny
)
var tinfoMap = make(map[reflect.Type]*typeInfo)
var tinfoLock sync.RWMutex
var nameType = reflect.TypeOf(Name{})
// getTypeInfo returns the typeInfo structure with details necessary
// for marshalling and unmarshalling typ.
func getTypeInfo(typ reflect.Type) (*typeInfo, error) {
tinfoLock.RLock()
tinfo, ok := tinfoMap[typ]
tinfoLock.RUnlock()
if ok {
return tinfo, nil
}
tinfo = &typeInfo{}
if typ.Kind() == reflect.Struct && typ != nameType {
n := typ.NumField()
for i := 0; i < n; i++ {
f := typ.Field(i)
if f.PkgPath != "" || f.Tag.Get("xml") == "-" {
continue // Private field
}
// For embedded structs, embed its fields.
if f.Anonymous {
t := f.Type
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
if t.Kind() == reflect.Struct {
inner, err := getTypeInfo(t)
if err != nil {
return nil, err
}
if tinfo.xmlname == nil {
tinfo.xmlname = inner.xmlname
}
for _, finfo := range inner.fields {
finfo.idx = append([]int{i}, finfo.idx...)
if err := addFieldInfo(typ, tinfo, &finfo); err != nil {
return nil, err
}
}
continue
}
}
finfo, err := structFieldInfo(typ, &f)
if err != nil {
return nil, err
}
if f.Name == "XMLName" {
tinfo.xmlname = finfo
continue
}
// Add the field if it doesn't conflict with other fields.
if err := addFieldInfo(typ, tinfo, finfo); err != nil {
return nil, err
}
}
}
tinfoLock.Lock()
tinfoMap[typ] = tinfo
tinfoLock.Unlock()
return tinfo, nil
}
// structFieldInfo builds and returns a fieldInfo for f.
func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) {
finfo := &fieldInfo{idx: f.Index}
// Split the tag from the xml namespace if necessary.
tag := f.Tag.Get("xml")
if i := strings.Index(tag, " "); i >= 0 {
finfo.xmlns, tag = tag[:i], tag[i+1:]
}
// Parse flags.
tokens := strings.Split(tag, ",")
if len(tokens) == 1 {
finfo.flags = fElement
} else {
tag = tokens[0]
for _, flag := range tokens[1:] {
switch flag {
case "attr":
finfo.flags |= fAttr
case "chardata":
finfo.flags |= fCharData
case "innerxml":
finfo.flags |= fInnerXml
case "comment":
finfo.flags |= fComment
case "any":
finfo.flags |= fAny
case "omitempty":
finfo.flags |= fOmitEmpty
}
}
// Validate the flags used.
valid := true
switch mode := finfo.flags & fMode; mode {
case 0:
finfo.flags |= fElement
case fAttr, fCharData, fInnerXml, fComment, fAny:
if f.Name == "XMLName" || tag != "" && mode != fAttr {
valid = false
}
default:
// This will also catch multiple modes in a single field.
valid = false
}
if finfo.flags&fMode == fAny {
finfo.flags |= fElement
}
if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 {
valid = false
}
if !valid {
return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q",
f.Name, typ, f.Tag.Get("xml"))
}
}
// Use of xmlns without a name is not allowed.
if finfo.xmlns != "" && tag == "" {
return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q",
f.Name, typ, f.Tag.Get("xml"))
}
if f.Name == "XMLName" {
// The XMLName field records the XML element name. Don't
// process it as usual because its name should default to
// empty rather than to the field name.
finfo.name = tag
return finfo, nil
}
if tag == "" {
// If the name part of the tag is completely empty, get
// default from XMLName of underlying struct if feasible,
// or field name otherwise.
if xmlname := lookupXMLName(f.Type); xmlname != nil {
finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name
} else {
finfo.name = f.Name
}
return finfo, nil
}
if finfo.xmlns == "" && finfo.flags&fAttr == 0 {
// If it's an element no namespace specified, get the default
// from the XMLName of enclosing struct if possible.
if xmlname := lookupXMLName(typ); xmlname != nil {
finfo.xmlns = xmlname.xmlns
}
}
// Prepare field name and parents.
parents := strings.Split(tag, ">")
if parents[0] == "" {
parents[0] = f.Name
}
if parents[len(parents)-1] == "" {
return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ)
}
finfo.name = parents[len(parents)-1]
if len(parents) > 1 {
if (finfo.flags & fElement) == 0 {
return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ","))
}
finfo.parents = parents[:len(parents)-1]
}
// If the field type has an XMLName field, the names must match
// so that the behavior of both marshalling and unmarshalling
// is straightforward and unambiguous.
if finfo.flags&fElement != 0 {
ftyp := f.Type
xmlname := lookupXMLName(ftyp)
if xmlname != nil && xmlname.name != finfo.name {
return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName",
finfo.name, typ, f.Name, xmlname.name, ftyp)
}
}
return finfo, nil
}
// lookupXMLName returns the fieldInfo for typ's XMLName field
// in case it exists and has a valid xml field tag, otherwise
// it returns nil.
func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) {
for typ.Kind() == reflect.Ptr {
typ = typ.Elem()
}
if typ.Kind() != reflect.Struct {
return nil
}
for i, n := 0, typ.NumField(); i < n; i++ {
f := typ.Field(i)
if f.Name != "XMLName" {
continue
}
finfo, err := structFieldInfo(typ, &f)
if finfo.name != "" && err == nil {
return finfo
}
// Also consider errors as a non-existent field tag
// and let getTypeInfo itself report the error.
break
}
return nil
}
func min(a, b int) int {
if a <= b {
return a
}
return b
}
// addFieldInfo adds finfo to tinfo.fields if there are no
// conflicts, or if conflicts arise from previous fields that were
// obtained from deeper embedded structures than finfo. In the latter
// case, the conflicting entries are dropped.
// A conflict occurs when the path (parent + name) to a field is
// itself a prefix of another path, or when two paths match exactly.
// It is okay for field paths to share a common, shorter prefix.
func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error {
var conflicts []int
Loop:
// First, figure all conflicts. Most working code will have none.
for i := range tinfo.fields {
oldf := &tinfo.fields[i]
if oldf.flags&fMode != newf.flags&fMode {
continue
}
if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns {
continue
}
minl := min(len(newf.parents), len(oldf.parents))
for p := 0; p < minl; p++ {
if oldf.parents[p] != newf.parents[p] {
continue Loop
}
}
if len(oldf.parents) > len(newf.parents) {
if oldf.parents[len(newf.parents)] == newf.name {
conflicts = append(conflicts, i)
}
} else if len(oldf.parents) < len(newf.parents) {
if newf.parents[len(oldf.parents)] == oldf.name {
conflicts = append(conflicts, i)
}
} else {
if newf.name == oldf.name {
conflicts = append(conflicts, i)
}
}
}
// Without conflicts, add the new field and return.
if conflicts == nil {
tinfo.fields = append(tinfo.fields, *newf)
return nil
}
// If any conflict is shallower, ignore the new field.
// This matches the Go field resolution on embedding.
for _, i := range conflicts {
if len(tinfo.fields[i].idx) < len(newf.idx) {
return nil
}
}
// Otherwise, if any of them is at the same depth level, it's an error.
for _, i := range conflicts {
oldf := &tinfo.fields[i]
if len(oldf.idx) == len(newf.idx) {
f1 := typ.FieldByIndex(oldf.idx)
f2 := typ.FieldByIndex(newf.idx)
return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")}
}
}
// Otherwise, the new field is shallower, and thus takes precedence,
// so drop the conflicting fields from tinfo and append the new one.
for c := len(conflicts) - 1; c >= 0; c-- {
i := conflicts[c]
copy(tinfo.fields[i:], tinfo.fields[i+1:])
tinfo.fields = tinfo.fields[:len(tinfo.fields)-1]
}
tinfo.fields = append(tinfo.fields, *newf)
return nil
}
// A TagPathError represents an error in the unmarshalling process
// caused by the use of field tags with conflicting paths.
type TagPathError struct {
Struct reflect.Type
Field1, Tag1 string
Field2, Tag2 string
}
func (e *TagPathError) Error() string {
return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2)
}
// value returns v's field value corresponding to finfo.
// It's equivalent to v.FieldByIndex(finfo.idx), but initializes
// and dereferences pointers as necessary.
func (finfo *fieldInfo) value(v reflect.Value) reflect.Value {
for i, x := range finfo.idx {
if i > 0 {
t := v.Type()
if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
}
}
v = v.Field(x)
}
return v
}

1998
vendor/golang.org/x/net/webdav/internal/xml/xml.go generated vendored Normal file

File diff suppressed because it is too large Load diff

94
vendor/golang.org/x/net/webdav/litmus_test_server.go generated vendored Normal file
View file

@ -0,0 +1,94 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
/*
This program is a server for the WebDAV 'litmus' compliance test at
http://www.webdav.org/neon/litmus/
To run the test:
go run litmus_test_server.go
and separately, from the downloaded litmus-xxx directory:
make URL=http://localhost:9999/ check
*/
package main
import (
"flag"
"fmt"
"log"
"net/http"
"net/url"
"golang.org/x/net/webdav"
)
var port = flag.Int("port", 9999, "server port")
func main() {
flag.Parse()
log.SetFlags(0)
h := &webdav.Handler{
FileSystem: webdav.NewMemFS(),
LockSystem: webdav.NewMemLS(),
Logger: func(r *http.Request, err error) {
litmus := r.Header.Get("X-Litmus")
if len(litmus) > 19 {
litmus = litmus[:16] + "..."
}
switch r.Method {
case "COPY", "MOVE":
dst := ""
if u, err := url.Parse(r.Header.Get("Destination")); err == nil {
dst = u.Path
}
o := r.Header.Get("Overwrite")
log.Printf("%-20s%-10s%-30s%-30so=%-2s%v", litmus, r.Method, r.URL.Path, dst, o, err)
default:
log.Printf("%-20s%-10s%-30s%v", litmus, r.Method, r.URL.Path, err)
}
},
}
// The next line would normally be:
// http.Handle("/", h)
// but we wrap that HTTP handler h to cater for a special case.
//
// The propfind_invalid2 litmus test case expects an empty namespace prefix
// declaration to be an error. The FAQ in the webdav litmus test says:
//
// "What does the "propfind_invalid2" test check for?...
//
// If a request was sent with an XML body which included an empty namespace
// prefix declaration (xmlns:ns1=""), then the server must reject that with
// a "400 Bad Request" response, as it is invalid according to the XML
// Namespace specification."
//
// On the other hand, the Go standard library's encoding/xml package
// accepts an empty xmlns namespace, as per the discussion at
// https://github.com/golang/go/issues/8068
//
// Empty namespaces seem disallowed in the second (2006) edition of the XML
// standard, but allowed in a later edition. The grammar differs between
// http://www.w3.org/TR/2006/REC-xml-names-20060816/#ns-decl and
// http://www.w3.org/TR/REC-xml-names/#dt-prefix
//
// Thus, we assume that the propfind_invalid2 test is obsolete, and
// hard-code the 400 Bad Request response that the test expects.
http.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("X-Litmus") == "props: 3 (propfind_invalid2)" {
http.Error(w, "400 Bad Request", http.StatusBadRequest)
return
}
h.ServeHTTP(w, r)
}))
addr := fmt.Sprintf(":%d", *port)
log.Printf("Serving %v", addr)
log.Fatal(http.ListenAndServe(addr, nil))
}

445
vendor/golang.org/x/net/webdav/lock.go generated vendored Normal file
View file

@ -0,0 +1,445 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package webdav
import (
"container/heap"
"errors"
"strconv"
"strings"
"sync"
"time"
)
var (
// ErrConfirmationFailed is returned by a LockSystem's Confirm method.
ErrConfirmationFailed = errors.New("webdav: confirmation failed")
// ErrForbidden is returned by a LockSystem's Unlock method.
ErrForbidden = errors.New("webdav: forbidden")
// ErrLocked is returned by a LockSystem's Create, Refresh and Unlock methods.
ErrLocked = errors.New("webdav: locked")
// ErrNoSuchLock is returned by a LockSystem's Refresh and Unlock methods.
ErrNoSuchLock = errors.New("webdav: no such lock")
)
// Condition can match a WebDAV resource, based on a token or ETag.
// Exactly one of Token and ETag should be non-empty.
type Condition struct {
Not bool
Token string
ETag string
}
// LockSystem manages access to a collection of named resources. The elements
// in a lock name are separated by slash ('/', U+002F) characters, regardless
// of host operating system convention.
type LockSystem interface {
// Confirm confirms that the caller can claim all of the locks specified by
// the given conditions, and that holding the union of all of those locks
// gives exclusive access to all of the named resources. Up to two resources
// can be named. Empty names are ignored.
//
// Exactly one of release and err will be non-nil. If release is non-nil,
// all of the requested locks are held until release is called. Calling
// release does not unlock the lock, in the WebDAV UNLOCK sense, but once
// Confirm has confirmed that a lock claim is valid, that lock cannot be
// Confirmed again until it has been released.
//
// If Confirm returns ErrConfirmationFailed then the Handler will continue
// to try any other set of locks presented (a WebDAV HTTP request can
// present more than one set of locks). If it returns any other non-nil
// error, the Handler will write a "500 Internal Server Error" HTTP status.
Confirm(now time.Time, name0, name1 string, conditions ...Condition) (release func(), err error)
// Create creates a lock with the given depth, duration, owner and root
// (name). The depth will either be negative (meaning infinite) or zero.
//
// If Create returns ErrLocked then the Handler will write a "423 Locked"
// HTTP status. If it returns any other non-nil error, the Handler will
// write a "500 Internal Server Error" HTTP status.
//
// See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for
// when to use each error.
//
// The token returned identifies the created lock. It should be an absolute
// URI as defined by RFC 3986, Section 4.3. In particular, it should not
// contain whitespace.
Create(now time.Time, details LockDetails) (token string, err error)
// Refresh refreshes the lock with the given token.
//
// If Refresh returns ErrLocked then the Handler will write a "423 Locked"
// HTTP Status. If Refresh returns ErrNoSuchLock then the Handler will write
// a "412 Precondition Failed" HTTP Status. If it returns any other non-nil
// error, the Handler will write a "500 Internal Server Error" HTTP status.
//
// See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for
// when to use each error.
Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error)
// Unlock unlocks the lock with the given token.
//
// If Unlock returns ErrForbidden then the Handler will write a "403
// Forbidden" HTTP Status. If Unlock returns ErrLocked then the Handler
// will write a "423 Locked" HTTP status. If Unlock returns ErrNoSuchLock
// then the Handler will write a "409 Conflict" HTTP Status. If it returns
// any other non-nil error, the Handler will write a "500 Internal Server
// Error" HTTP status.
//
// See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.11.1 for
// when to use each error.
Unlock(now time.Time, token string) error
}
// LockDetails are a lock's metadata.
type LockDetails struct {
// Root is the root resource name being locked. For a zero-depth lock, the
// root is the only resource being locked.
Root string
// Duration is the lock timeout. A negative duration means infinite.
Duration time.Duration
// OwnerXML is the verbatim <owner> XML given in a LOCK HTTP request.
//
// TODO: does the "verbatim" nature play well with XML namespaces?
// Does the OwnerXML field need to have more structure? See
// https://codereview.appspot.com/175140043/#msg2
OwnerXML string
// ZeroDepth is whether the lock has zero depth. If it does not have zero
// depth, it has infinite depth.
ZeroDepth bool
}
// NewMemLS returns a new in-memory LockSystem.
func NewMemLS() LockSystem {
return &memLS{
byName: make(map[string]*memLSNode),
byToken: make(map[string]*memLSNode),
gen: uint64(time.Now().Unix()),
}
}
type memLS struct {
mu sync.Mutex
byName map[string]*memLSNode
byToken map[string]*memLSNode
gen uint64
// byExpiry only contains those nodes whose LockDetails have a finite
// Duration and are yet to expire.
byExpiry byExpiry
}
func (m *memLS) nextToken() string {
m.gen++
return strconv.FormatUint(m.gen, 10)
}
func (m *memLS) collectExpiredNodes(now time.Time) {
for len(m.byExpiry) > 0 {
if now.Before(m.byExpiry[0].expiry) {
break
}
m.remove(m.byExpiry[0])
}
}
func (m *memLS) Confirm(now time.Time, name0, name1 string, conditions ...Condition) (func(), error) {
m.mu.Lock()
defer m.mu.Unlock()
m.collectExpiredNodes(now)
var n0, n1 *memLSNode
if name0 != "" {
if n0 = m.lookup(slashClean(name0), conditions...); n0 == nil {
return nil, ErrConfirmationFailed
}
}
if name1 != "" {
if n1 = m.lookup(slashClean(name1), conditions...); n1 == nil {
return nil, ErrConfirmationFailed
}
}
// Don't hold the same node twice.
if n1 == n0 {
n1 = nil
}
if n0 != nil {
m.hold(n0)
}
if n1 != nil {
m.hold(n1)
}
return func() {
m.mu.Lock()
defer m.mu.Unlock()
if n1 != nil {
m.unhold(n1)
}
if n0 != nil {
m.unhold(n0)
}
}, nil
}
// lookup returns the node n that locks the named resource, provided that n
// matches at least one of the given conditions and that lock isn't held by
// another party. Otherwise, it returns nil.
//
// n may be a parent of the named resource, if n is an infinite depth lock.
func (m *memLS) lookup(name string, conditions ...Condition) (n *memLSNode) {
// TODO: support Condition.Not and Condition.ETag.
for _, c := range conditions {
n = m.byToken[c.Token]
if n == nil || n.held {
continue
}
if name == n.details.Root {
return n
}
if n.details.ZeroDepth {
continue
}
if n.details.Root == "/" || strings.HasPrefix(name, n.details.Root+"/") {
return n
}
}
return nil
}
func (m *memLS) hold(n *memLSNode) {
if n.held {
panic("webdav: memLS inconsistent held state")
}
n.held = true
if n.details.Duration >= 0 && n.byExpiryIndex >= 0 {
heap.Remove(&m.byExpiry, n.byExpiryIndex)
}
}
func (m *memLS) unhold(n *memLSNode) {
if !n.held {
panic("webdav: memLS inconsistent held state")
}
n.held = false
if n.details.Duration >= 0 {
heap.Push(&m.byExpiry, n)
}
}
func (m *memLS) Create(now time.Time, details LockDetails) (string, error) {
m.mu.Lock()
defer m.mu.Unlock()
m.collectExpiredNodes(now)
details.Root = slashClean(details.Root)
if !m.canCreate(details.Root, details.ZeroDepth) {
return "", ErrLocked
}
n := m.create(details.Root)
n.token = m.nextToken()
m.byToken[n.token] = n
n.details = details
if n.details.Duration >= 0 {
n.expiry = now.Add(n.details.Duration)
heap.Push(&m.byExpiry, n)
}
return n.token, nil
}
func (m *memLS) Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) {
m.mu.Lock()
defer m.mu.Unlock()
m.collectExpiredNodes(now)
n := m.byToken[token]
if n == nil {
return LockDetails{}, ErrNoSuchLock
}
if n.held {
return LockDetails{}, ErrLocked
}
if n.byExpiryIndex >= 0 {
heap.Remove(&m.byExpiry, n.byExpiryIndex)
}
n.details.Duration = duration
if n.details.Duration >= 0 {
n.expiry = now.Add(n.details.Duration)
heap.Push(&m.byExpiry, n)
}
return n.details, nil
}
func (m *memLS) Unlock(now time.Time, token string) error {
m.mu.Lock()
defer m.mu.Unlock()
m.collectExpiredNodes(now)
n := m.byToken[token]
if n == nil {
return ErrNoSuchLock
}
if n.held {
return ErrLocked
}
m.remove(n)
return nil
}
func (m *memLS) canCreate(name string, zeroDepth bool) bool {
return walkToRoot(name, func(name0 string, first bool) bool {
n := m.byName[name0]
if n == nil {
return true
}
if first {
if n.token != "" {
// The target node is already locked.
return false
}
if !zeroDepth {
// The requested lock depth is infinite, and the fact that n exists
// (n != nil) means that a descendent of the target node is locked.
return false
}
} else if n.token != "" && !n.details.ZeroDepth {
// An ancestor of the target node is locked with infinite depth.
return false
}
return true
})
}
func (m *memLS) create(name string) (ret *memLSNode) {
walkToRoot(name, func(name0 string, first bool) bool {
n := m.byName[name0]
if n == nil {
n = &memLSNode{
details: LockDetails{
Root: name0,
},
byExpiryIndex: -1,
}
m.byName[name0] = n
}
n.refCount++
if first {
ret = n
}
return true
})
return ret
}
func (m *memLS) remove(n *memLSNode) {
delete(m.byToken, n.token)
n.token = ""
walkToRoot(n.details.Root, func(name0 string, first bool) bool {
x := m.byName[name0]
x.refCount--
if x.refCount == 0 {
delete(m.byName, name0)
}
return true
})
if n.byExpiryIndex >= 0 {
heap.Remove(&m.byExpiry, n.byExpiryIndex)
}
}
func walkToRoot(name string, f func(name0 string, first bool) bool) bool {
for first := true; ; first = false {
if !f(name, first) {
return false
}
if name == "/" {
break
}
name = name[:strings.LastIndex(name, "/")]
if name == "" {
name = "/"
}
}
return true
}
type memLSNode struct {
// details are the lock metadata. Even if this node's name is not explicitly locked,
// details.Root will still equal the node's name.
details LockDetails
// token is the unique identifier for this node's lock. An empty token means that
// this node is not explicitly locked.
token string
// refCount is the number of self-or-descendent nodes that are explicitly locked.
refCount int
// expiry is when this node's lock expires.
expiry time.Time
// byExpiryIndex is the index of this node in memLS.byExpiry. It is -1
// if this node does not expire, or has expired.
byExpiryIndex int
// held is whether this node's lock is actively held by a Confirm call.
held bool
}
type byExpiry []*memLSNode
func (b *byExpiry) Len() int {
return len(*b)
}
func (b *byExpiry) Less(i, j int) bool {
return (*b)[i].expiry.Before((*b)[j].expiry)
}
func (b *byExpiry) Swap(i, j int) {
(*b)[i], (*b)[j] = (*b)[j], (*b)[i]
(*b)[i].byExpiryIndex = i
(*b)[j].byExpiryIndex = j
}
func (b *byExpiry) Push(x interface{}) {
n := x.(*memLSNode)
n.byExpiryIndex = len(*b)
*b = append(*b, n)
}
func (b *byExpiry) Pop() interface{} {
i := len(*b) - 1
n := (*b)[i]
(*b)[i] = nil
n.byExpiryIndex = -1
*b = (*b)[:i]
return n
}
const infiniteTimeout = -1
// parseTimeout parses the Timeout HTTP header, as per section 10.7. If s is
// empty, an infiniteTimeout is returned.
func parseTimeout(s string) (time.Duration, error) {
if s == "" {
return infiniteTimeout, nil
}
if i := strings.IndexByte(s, ','); i >= 0 {
s = s[:i]
}
s = strings.TrimSpace(s)
if s == "Infinite" {
return infiniteTimeout, nil
}
const pre = "Second-"
if !strings.HasPrefix(s, pre) {
return 0, errInvalidTimeout
}
s = s[len(pre):]
if s == "" || s[0] < '0' || '9' < s[0] {
return 0, errInvalidTimeout
}
n, err := strconv.ParseInt(s, 10, 64)
if err != nil || 1<<32-1 < n {
return 0, errInvalidTimeout
}
return time.Duration(n) * time.Second, nil
}

470
vendor/golang.org/x/net/webdav/prop.go generated vendored Normal file
View file

@ -0,0 +1,470 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package webdav
import (
"bytes"
"encoding/xml"
"errors"
"fmt"
"io"
"mime"
"net/http"
"os"
"path/filepath"
"strconv"
"golang.org/x/net/context"
)
// Proppatch describes a property update instruction as defined in RFC 4918.
// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH
type Proppatch struct {
// Remove specifies whether this patch removes properties. If it does not
// remove them, it sets them.
Remove bool
// Props contains the properties to be set or removed.
Props []Property
}
// Propstat describes a XML propstat element as defined in RFC 4918.
// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat
type Propstat struct {
// Props contains the properties for which Status applies.
Props []Property
// Status defines the HTTP status code of the properties in Prop.
// Allowed values include, but are not limited to the WebDAV status
// code extensions for HTTP/1.1.
// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11
Status int
// XMLError contains the XML representation of the optional error element.
// XML content within this field must not rely on any predefined
// namespace declarations or prefixes. If empty, the XML error element
// is omitted.
XMLError string
// ResponseDescription contains the contents of the optional
// responsedescription field. If empty, the XML element is omitted.
ResponseDescription string
}
// makePropstats returns a slice containing those of x and y whose Props slice
// is non-empty. If both are empty, it returns a slice containing an otherwise
// zero Propstat whose HTTP status code is 200 OK.
func makePropstats(x, y Propstat) []Propstat {
pstats := make([]Propstat, 0, 2)
if len(x.Props) != 0 {
pstats = append(pstats, x)
}
if len(y.Props) != 0 {
pstats = append(pstats, y)
}
if len(pstats) == 0 {
pstats = append(pstats, Propstat{
Status: http.StatusOK,
})
}
return pstats
}
// DeadPropsHolder holds the dead properties of a resource.
//
// Dead properties are those properties that are explicitly defined. In
// comparison, live properties, such as DAV:getcontentlength, are implicitly
// defined by the underlying resource, and cannot be explicitly overridden or
// removed. See the Terminology section of
// http://www.webdav.org/specs/rfc4918.html#rfc.section.3
//
// There is a whitelist of the names of live properties. This package handles
// all live properties, and will only pass non-whitelisted names to the Patch
// method of DeadPropsHolder implementations.
type DeadPropsHolder interface {
// DeadProps returns a copy of the dead properties held.
DeadProps() (map[xml.Name]Property, error)
// Patch patches the dead properties held.
//
// Patching is atomic; either all or no patches succeed. It returns (nil,
// non-nil) if an internal server error occurred, otherwise the Propstats
// collectively contain one Property for each proposed patch Property. If
// all patches succeed, Patch returns a slice of length one and a Propstat
// element with a 200 OK HTTP status code. If none succeed, for reasons
// other than an internal server error, no Propstat has status 200 OK.
//
// For more details on when various HTTP status codes apply, see
// http://www.webdav.org/specs/rfc4918.html#PROPPATCH-status
Patch([]Proppatch) ([]Propstat, error)
}
// liveProps contains all supported, protected DAV: properties.
var liveProps = map[xml.Name]struct {
// findFn implements the propfind function of this property. If nil,
// it indicates a hidden property.
findFn func(context.Context, FileSystem, LockSystem, string, os.FileInfo) (string, error)
// dir is true if the property applies to directories.
dir bool
}{
{Space: "DAV:", Local: "resourcetype"}: {
findFn: findResourceType,
dir: true,
},
{Space: "DAV:", Local: "displayname"}: {
findFn: findDisplayName,
dir: true,
},
{Space: "DAV:", Local: "getcontentlength"}: {
findFn: findContentLength,
dir: false,
},
{Space: "DAV:", Local: "getlastmodified"}: {
findFn: findLastModified,
// http://webdav.org/specs/rfc4918.html#PROPERTY_getlastmodified
// suggests that getlastmodified should only apply to GETable
// resources, and this package does not support GET on directories.
//
// Nonetheless, some WebDAV clients expect child directories to be
// sortable by getlastmodified date, so this value is true, not false.
// See golang.org/issue/15334.
dir: true,
},
{Space: "DAV:", Local: "creationdate"}: {
findFn: nil,
dir: false,
},
{Space: "DAV:", Local: "getcontentlanguage"}: {
findFn: nil,
dir: false,
},
{Space: "DAV:", Local: "getcontenttype"}: {
findFn: findContentType,
dir: false,
},
{Space: "DAV:", Local: "getetag"}: {
findFn: findETag,
// findETag implements ETag as the concatenated hex values of a file's
// modification time and size. This is not a reliable synchronization
// mechanism for directories, so we do not advertise getetag for DAV
// collections.
dir: false,
},
// TODO: The lockdiscovery property requires LockSystem to list the
// active locks on a resource.
{Space: "DAV:", Local: "lockdiscovery"}: {},
{Space: "DAV:", Local: "supportedlock"}: {
findFn: findSupportedLock,
dir: true,
},
}
// TODO(nigeltao) merge props and allprop?
// Props returns the status of the properties named pnames for resource name.
//
// Each Propstat has a unique status and each property name will only be part
// of one Propstat element.
func props(ctx context.Context, fs FileSystem, ls LockSystem, name string, pnames []xml.Name) ([]Propstat, error) {
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
if err != nil {
return nil, err
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
return nil, err
}
isDir := fi.IsDir()
var deadProps map[xml.Name]Property
if dph, ok := f.(DeadPropsHolder); ok {
deadProps, err = dph.DeadProps()
if err != nil {
return nil, err
}
}
pstatOK := Propstat{Status: http.StatusOK}
pstatNotFound := Propstat{Status: http.StatusNotFound}
for _, pn := range pnames {
// If this file has dead properties, check if they contain pn.
if dp, ok := deadProps[pn]; ok {
pstatOK.Props = append(pstatOK.Props, dp)
continue
}
// Otherwise, it must either be a live property or we don't know it.
if prop := liveProps[pn]; prop.findFn != nil && (prop.dir || !isDir) {
innerXML, err := prop.findFn(ctx, fs, ls, name, fi)
if err != nil {
return nil, err
}
pstatOK.Props = append(pstatOK.Props, Property{
XMLName: pn,
InnerXML: []byte(innerXML),
})
} else {
pstatNotFound.Props = append(pstatNotFound.Props, Property{
XMLName: pn,
})
}
}
return makePropstats(pstatOK, pstatNotFound), nil
}
// Propnames returns the property names defined for resource name.
func propnames(ctx context.Context, fs FileSystem, ls LockSystem, name string) ([]xml.Name, error) {
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
if err != nil {
return nil, err
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
return nil, err
}
isDir := fi.IsDir()
var deadProps map[xml.Name]Property
if dph, ok := f.(DeadPropsHolder); ok {
deadProps, err = dph.DeadProps()
if err != nil {
return nil, err
}
}
pnames := make([]xml.Name, 0, len(liveProps)+len(deadProps))
for pn, prop := range liveProps {
if prop.findFn != nil && (prop.dir || !isDir) {
pnames = append(pnames, pn)
}
}
for pn := range deadProps {
pnames = append(pnames, pn)
}
return pnames, nil
}
// Allprop returns the properties defined for resource name and the properties
// named in include.
//
// Note that RFC 4918 defines 'allprop' to return the DAV: properties defined
// within the RFC plus dead properties. Other live properties should only be
// returned if they are named in 'include'.
//
// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND
func allprop(ctx context.Context, fs FileSystem, ls LockSystem, name string, include []xml.Name) ([]Propstat, error) {
pnames, err := propnames(ctx, fs, ls, name)
if err != nil {
return nil, err
}
// Add names from include if they are not already covered in pnames.
nameset := make(map[xml.Name]bool)
for _, pn := range pnames {
nameset[pn] = true
}
for _, pn := range include {
if !nameset[pn] {
pnames = append(pnames, pn)
}
}
return props(ctx, fs, ls, name, pnames)
}
// Patch patches the properties of resource name. The return values are
// constrained in the same manner as DeadPropsHolder.Patch.
func patch(ctx context.Context, fs FileSystem, ls LockSystem, name string, patches []Proppatch) ([]Propstat, error) {
conflict := false
loop:
for _, patch := range patches {
for _, p := range patch.Props {
if _, ok := liveProps[p.XMLName]; ok {
conflict = true
break loop
}
}
}
if conflict {
pstatForbidden := Propstat{
Status: http.StatusForbidden,
XMLError: `<D:cannot-modify-protected-property xmlns:D="DAV:"/>`,
}
pstatFailedDep := Propstat{
Status: StatusFailedDependency,
}
for _, patch := range patches {
for _, p := range patch.Props {
if _, ok := liveProps[p.XMLName]; ok {
pstatForbidden.Props = append(pstatForbidden.Props, Property{XMLName: p.XMLName})
} else {
pstatFailedDep.Props = append(pstatFailedDep.Props, Property{XMLName: p.XMLName})
}
}
}
return makePropstats(pstatForbidden, pstatFailedDep), nil
}
f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0)
if err != nil {
return nil, err
}
defer f.Close()
if dph, ok := f.(DeadPropsHolder); ok {
ret, err := dph.Patch(patches)
if err != nil {
return nil, err
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat says that
// "The contents of the prop XML element must only list the names of
// properties to which the result in the status element applies."
for _, pstat := range ret {
for i, p := range pstat.Props {
pstat.Props[i] = Property{XMLName: p.XMLName}
}
}
return ret, nil
}
// The file doesn't implement the optional DeadPropsHolder interface, so
// all patches are forbidden.
pstat := Propstat{Status: http.StatusForbidden}
for _, patch := range patches {
for _, p := range patch.Props {
pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName})
}
}
return []Propstat{pstat}, nil
}
func escapeXML(s string) string {
for i := 0; i < len(s); i++ {
// As an optimization, if s contains only ASCII letters, digits or a
// few special characters, the escaped value is s itself and we don't
// need to allocate a buffer and convert between string and []byte.
switch c := s[i]; {
case c == ' ' || c == '_' ||
('+' <= c && c <= '9') || // Digits as well as + , - . and /
('A' <= c && c <= 'Z') ||
('a' <= c && c <= 'z'):
continue
}
// Otherwise, go through the full escaping process.
var buf bytes.Buffer
xml.EscapeText(&buf, []byte(s))
return buf.String()
}
return s
}
func findResourceType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
if fi.IsDir() {
return `<D:collection xmlns:D="DAV:"/>`, nil
}
return "", nil
}
func findDisplayName(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
if slashClean(name) == "/" {
// Hide the real name of a possibly prefixed root directory.
return "", nil
}
return escapeXML(fi.Name()), nil
}
func findContentLength(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
return strconv.FormatInt(fi.Size(), 10), nil
}
func findLastModified(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
return fi.ModTime().UTC().Format(http.TimeFormat), nil
}
// ErrNotImplemented should be returned by optional interfaces if they
// want the original implementation to be used.
var ErrNotImplemented = errors.New("not implemented")
// ContentTyper is an optional interface for the os.FileInfo
// objects returned by the FileSystem.
//
// If this interface is defined then it will be used to read the
// content type from the object.
//
// If this interface is not defined the file will be opened and the
// content type will be guessed from the initial contents of the file.
type ContentTyper interface {
// ContentType returns the content type for the file.
//
// If this returns error ErrNotImplemented then the error will
// be ignored and the base implementation will be used
// instead.
ContentType(ctx context.Context) (string, error)
}
func findContentType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
if do, ok := fi.(ContentTyper); ok {
ctype, err := do.ContentType(ctx)
if err != ErrNotImplemented {
return ctype, err
}
}
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
if err != nil {
return "", err
}
defer f.Close()
// This implementation is based on serveContent's code in the standard net/http package.
ctype := mime.TypeByExtension(filepath.Ext(name))
if ctype != "" {
return ctype, nil
}
// Read a chunk to decide between utf-8 text and binary.
var buf [512]byte
n, err := io.ReadFull(f, buf[:])
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
return "", err
}
ctype = http.DetectContentType(buf[:n])
// Rewind file.
_, err = f.Seek(0, os.SEEK_SET)
return ctype, err
}
// ETager is an optional interface for the os.FileInfo objects
// returned by the FileSystem.
//
// If this interface is defined then it will be used to read the ETag
// for the object.
//
// If this interface is not defined an ETag will be computed using the
// ModTime() and the Size() methods of the os.FileInfo object.
type ETager interface {
// ETag returns an ETag for the file. This should be of the
// form "value" or W/"value"
//
// If this returns error ErrNotImplemented then the error will
// be ignored and the base implementation will be used
// instead.
ETag(ctx context.Context) (string, error)
}
func findETag(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
if do, ok := fi.(ETager); ok {
etag, err := do.ETag(ctx)
if err != ErrNotImplemented {
return etag, err
}
}
// The Apache http 2.4 web server by default concatenates the
// modification time and size of a file. We replicate the heuristic
// with nanosecond granularity.
return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.Size()), nil
}
func findSupportedLock(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
return `` +
`<D:lockentry xmlns:D="DAV:">` +
`<D:lockscope><D:exclusive/></D:lockscope>` +
`<D:locktype><D:write/></D:locktype>` +
`</D:lockentry>`, nil
}

702
vendor/golang.org/x/net/webdav/webdav.go generated vendored Normal file
View file

@ -0,0 +1,702 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package webdav provides a WebDAV server implementation.
package webdav // import "golang.org/x/net/webdav"
import (
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path"
"strings"
"time"
)
type Handler struct {
// Prefix is the URL path prefix to strip from WebDAV resource paths.
Prefix string
// FileSystem is the virtual file system.
FileSystem FileSystem
// LockSystem is the lock management system.
LockSystem LockSystem
// Logger is an optional error logger. If non-nil, it will be called
// for all HTTP requests.
Logger func(*http.Request, error)
}
func (h *Handler) stripPrefix(p string) (string, int, error) {
if h.Prefix == "" {
return p, http.StatusOK, nil
}
if r := strings.TrimPrefix(p, h.Prefix); len(r) < len(p) {
return r, http.StatusOK, nil
}
return p, http.StatusNotFound, errPrefixMismatch
}
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
status, err := http.StatusBadRequest, errUnsupportedMethod
if h.FileSystem == nil {
status, err = http.StatusInternalServerError, errNoFileSystem
} else if h.LockSystem == nil {
status, err = http.StatusInternalServerError, errNoLockSystem
} else {
switch r.Method {
case "OPTIONS":
status, err = h.handleOptions(w, r)
case "GET", "HEAD", "POST":
status, err = h.handleGetHeadPost(w, r)
case "DELETE":
status, err = h.handleDelete(w, r)
case "PUT":
status, err = h.handlePut(w, r)
case "MKCOL":
status, err = h.handleMkcol(w, r)
case "COPY", "MOVE":
status, err = h.handleCopyMove(w, r)
case "LOCK":
status, err = h.handleLock(w, r)
case "UNLOCK":
status, err = h.handleUnlock(w, r)
case "PROPFIND":
status, err = h.handlePropfind(w, r)
case "PROPPATCH":
status, err = h.handleProppatch(w, r)
}
}
if status != 0 {
w.WriteHeader(status)
if status != http.StatusNoContent {
w.Write([]byte(StatusText(status)))
}
}
if h.Logger != nil {
h.Logger(r, err)
}
}
func (h *Handler) lock(now time.Time, root string) (token string, status int, err error) {
token, err = h.LockSystem.Create(now, LockDetails{
Root: root,
Duration: infiniteTimeout,
ZeroDepth: true,
})
if err != nil {
if err == ErrLocked {
return "", StatusLocked, err
}
return "", http.StatusInternalServerError, err
}
return token, 0, nil
}
func (h *Handler) confirmLocks(r *http.Request, src, dst string) (release func(), status int, err error) {
hdr := r.Header.Get("If")
if hdr == "" {
// An empty If header means that the client hasn't previously created locks.
// Even if this client doesn't care about locks, we still need to check that
// the resources aren't locked by another client, so we create temporary
// locks that would conflict with another client's locks. These temporary
// locks are unlocked at the end of the HTTP request.
now, srcToken, dstToken := time.Now(), "", ""
if src != "" {
srcToken, status, err = h.lock(now, src)
if err != nil {
return nil, status, err
}
}
if dst != "" {
dstToken, status, err = h.lock(now, dst)
if err != nil {
if srcToken != "" {
h.LockSystem.Unlock(now, srcToken)
}
return nil, status, err
}
}
return func() {
if dstToken != "" {
h.LockSystem.Unlock(now, dstToken)
}
if srcToken != "" {
h.LockSystem.Unlock(now, srcToken)
}
}, 0, nil
}
ih, ok := parseIfHeader(hdr)
if !ok {
return nil, http.StatusBadRequest, errInvalidIfHeader
}
// ih is a disjunction (OR) of ifLists, so any ifList will do.
for _, l := range ih.lists {
lsrc := l.resourceTag
if lsrc == "" {
lsrc = src
} else {
u, err := url.Parse(lsrc)
if err != nil {
continue
}
if u.Host != r.Host {
continue
}
lsrc, status, err = h.stripPrefix(u.Path)
if err != nil {
return nil, status, err
}
}
release, err = h.LockSystem.Confirm(time.Now(), lsrc, dst, l.conditions...)
if err == ErrConfirmationFailed {
continue
}
if err != nil {
return nil, http.StatusInternalServerError, err
}
return release, 0, nil
}
// Section 10.4.1 says that "If this header is evaluated and all state lists
// fail, then the request must fail with a 412 (Precondition Failed) status."
// We follow the spec even though the cond_put_corrupt_token test case from
// the litmus test warns on seeing a 412 instead of a 423 (Locked).
return nil, http.StatusPreconditionFailed, ErrLocked
}
func (h *Handler) handleOptions(w http.ResponseWriter, r *http.Request) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
ctx := getContext(r)
allow := "OPTIONS, LOCK, PUT, MKCOL"
if fi, err := h.FileSystem.Stat(ctx, reqPath); err == nil {
if fi.IsDir() {
allow = "OPTIONS, LOCK, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND"
} else {
allow = "OPTIONS, LOCK, GET, HEAD, POST, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND, PUT"
}
}
w.Header().Set("Allow", allow)
// http://www.webdav.org/specs/rfc4918.html#dav.compliance.classes
w.Header().Set("DAV", "1, 2")
// http://msdn.microsoft.com/en-au/library/cc250217.aspx
w.Header().Set("MS-Author-Via", "DAV")
return 0, nil
}
func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
// TODO: check locks for read-only access??
ctx := getContext(r)
f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDONLY, 0)
if err != nil {
return http.StatusNotFound, err
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
return http.StatusNotFound, err
}
if fi.IsDir() {
return http.StatusMethodNotAllowed, nil
}
etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi)
if err != nil {
return http.StatusInternalServerError, err
}
w.Header().Set("ETag", etag)
// Let ServeContent determine the Content-Type header.
http.ServeContent(w, r, reqPath, fi.ModTime(), f)
return 0, nil
}
func (h *Handler) handleDelete(w http.ResponseWriter, r *http.Request) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
release, status, err := h.confirmLocks(r, reqPath, "")
if err != nil {
return status, err
}
defer release()
ctx := getContext(r)
// TODO: return MultiStatus where appropriate.
// "godoc os RemoveAll" says that "If the path does not exist, RemoveAll
// returns nil (no error)." WebDAV semantics are that it should return a
// "404 Not Found". We therefore have to Stat before we RemoveAll.
if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {
if os.IsNotExist(err) {
return http.StatusNotFound, err
}
return http.StatusMethodNotAllowed, err
}
if err := h.FileSystem.RemoveAll(ctx, reqPath); err != nil {
return http.StatusMethodNotAllowed, err
}
return http.StatusNoContent, nil
}
func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
release, status, err := h.confirmLocks(r, reqPath, "")
if err != nil {
return status, err
}
defer release()
// TODO(rost): Support the If-Match, If-None-Match headers? See bradfitz'
// comments in http.checkEtag.
ctx := getContext(r)
f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return http.StatusNotFound, err
}
_, copyErr := io.Copy(f, r.Body)
fi, statErr := f.Stat()
closeErr := f.Close()
// TODO(rost): Returning 405 Method Not Allowed might not be appropriate.
if copyErr != nil {
return http.StatusMethodNotAllowed, copyErr
}
if statErr != nil {
return http.StatusMethodNotAllowed, statErr
}
if closeErr != nil {
return http.StatusMethodNotAllowed, closeErr
}
etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi)
if err != nil {
return http.StatusInternalServerError, err
}
w.Header().Set("ETag", etag)
return http.StatusCreated, nil
}
func (h *Handler) handleMkcol(w http.ResponseWriter, r *http.Request) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
release, status, err := h.confirmLocks(r, reqPath, "")
if err != nil {
return status, err
}
defer release()
ctx := getContext(r)
if r.ContentLength > 0 {
return http.StatusUnsupportedMediaType, nil
}
if err := h.FileSystem.Mkdir(ctx, reqPath, 0777); err != nil {
if os.IsNotExist(err) {
return http.StatusConflict, err
}
return http.StatusMethodNotAllowed, err
}
return http.StatusCreated, nil
}
func (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request) (status int, err error) {
hdr := r.Header.Get("Destination")
if hdr == "" {
return http.StatusBadRequest, errInvalidDestination
}
u, err := url.Parse(hdr)
if err != nil {
return http.StatusBadRequest, errInvalidDestination
}
if u.Host != r.Host {
return http.StatusBadGateway, errInvalidDestination
}
src, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
dst, status, err := h.stripPrefix(u.Path)
if err != nil {
return status, err
}
if dst == "" {
return http.StatusBadGateway, errInvalidDestination
}
if dst == src {
return http.StatusForbidden, errDestinationEqualsSource
}
ctx := getContext(r)
if r.Method == "COPY" {
// Section 7.5.1 says that a COPY only needs to lock the destination,
// not both destination and source. Strictly speaking, this is racy,
// even though a COPY doesn't modify the source, if a concurrent
// operation modifies the source. However, the litmus test explicitly
// checks that COPYing a locked-by-another source is OK.
release, status, err := h.confirmLocks(r, "", dst)
if err != nil {
return status, err
}
defer release()
// Section 9.8.3 says that "The COPY method on a collection without a Depth
// header must act as if a Depth header with value "infinity" was included".
depth := infiniteDepth
if hdr := r.Header.Get("Depth"); hdr != "" {
depth = parseDepth(hdr)
if depth != 0 && depth != infiniteDepth {
// Section 9.8.3 says that "A client may submit a Depth header on a
// COPY on a collection with a value of "0" or "infinity"."
return http.StatusBadRequest, errInvalidDepth
}
}
return copyFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") != "F", depth, 0)
}
release, status, err := h.confirmLocks(r, src, dst)
if err != nil {
return status, err
}
defer release()
// Section 9.9.2 says that "The MOVE method on a collection must act as if
// a "Depth: infinity" header was used on it. A client must not submit a
// Depth header on a MOVE on a collection with any value but "infinity"."
if hdr := r.Header.Get("Depth"); hdr != "" {
if parseDepth(hdr) != infiniteDepth {
return http.StatusBadRequest, errInvalidDepth
}
}
return moveFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") == "T")
}
func (h *Handler) handleLock(w http.ResponseWriter, r *http.Request) (retStatus int, retErr error) {
duration, err := parseTimeout(r.Header.Get("Timeout"))
if err != nil {
return http.StatusBadRequest, err
}
li, status, err := readLockInfo(r.Body)
if err != nil {
return status, err
}
ctx := getContext(r)
token, ld, now, created := "", LockDetails{}, time.Now(), false
if li == (lockInfo{}) {
// An empty lockInfo means to refresh the lock.
ih, ok := parseIfHeader(r.Header.Get("If"))
if !ok {
return http.StatusBadRequest, errInvalidIfHeader
}
if len(ih.lists) == 1 && len(ih.lists[0].conditions) == 1 {
token = ih.lists[0].conditions[0].Token
}
if token == "" {
return http.StatusBadRequest, errInvalidLockToken
}
ld, err = h.LockSystem.Refresh(now, token, duration)
if err != nil {
if err == ErrNoSuchLock {
return http.StatusPreconditionFailed, err
}
return http.StatusInternalServerError, err
}
} else {
// Section 9.10.3 says that "If no Depth header is submitted on a LOCK request,
// then the request MUST act as if a "Depth:infinity" had been submitted."
depth := infiniteDepth
if hdr := r.Header.Get("Depth"); hdr != "" {
depth = parseDepth(hdr)
if depth != 0 && depth != infiniteDepth {
// Section 9.10.3 says that "Values other than 0 or infinity must not be
// used with the Depth header on a LOCK method".
return http.StatusBadRequest, errInvalidDepth
}
}
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
ld = LockDetails{
Root: reqPath,
Duration: duration,
OwnerXML: li.Owner.InnerXML,
ZeroDepth: depth == 0,
}
token, err = h.LockSystem.Create(now, ld)
if err != nil {
if err == ErrLocked {
return StatusLocked, err
}
return http.StatusInternalServerError, err
}
defer func() {
if retErr != nil {
h.LockSystem.Unlock(now, token)
}
}()
// Create the resource if it didn't previously exist.
if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {
f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
// TODO: detect missing intermediate dirs and return http.StatusConflict?
return http.StatusInternalServerError, err
}
f.Close()
created = true
}
// http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the
// Lock-Token value is a Coded-URL. We add angle brackets.
w.Header().Set("Lock-Token", "<"+token+">")
}
w.Header().Set("Content-Type", "application/xml; charset=utf-8")
if created {
// This is "w.WriteHeader(http.StatusCreated)" and not "return
// http.StatusCreated, nil" because we write our own (XML) response to w
// and Handler.ServeHTTP would otherwise write "Created".
w.WriteHeader(http.StatusCreated)
}
writeLockInfo(w, token, ld)
return 0, nil
}
func (h *Handler) handleUnlock(w http.ResponseWriter, r *http.Request) (status int, err error) {
// http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the
// Lock-Token value is a Coded-URL. We strip its angle brackets.
t := r.Header.Get("Lock-Token")
if len(t) < 2 || t[0] != '<' || t[len(t)-1] != '>' {
return http.StatusBadRequest, errInvalidLockToken
}
t = t[1 : len(t)-1]
switch err = h.LockSystem.Unlock(time.Now(), t); err {
case nil:
return http.StatusNoContent, err
case ErrForbidden:
return http.StatusForbidden, err
case ErrLocked:
return StatusLocked, err
case ErrNoSuchLock:
return http.StatusConflict, err
default:
return http.StatusInternalServerError, err
}
}
func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
ctx := getContext(r)
fi, err := h.FileSystem.Stat(ctx, reqPath)
if err != nil {
if os.IsNotExist(err) {
return http.StatusNotFound, err
}
return http.StatusMethodNotAllowed, err
}
depth := infiniteDepth
if hdr := r.Header.Get("Depth"); hdr != "" {
depth = parseDepth(hdr)
if depth == invalidDepth {
return http.StatusBadRequest, errInvalidDepth
}
}
pf, status, err := readPropfind(r.Body)
if err != nil {
return status, err
}
mw := multistatusWriter{w: w}
walkFn := func(reqPath string, info os.FileInfo, err error) error {
if err != nil {
return err
}
var pstats []Propstat
if pf.Propname != nil {
pnames, err := propnames(ctx, h.FileSystem, h.LockSystem, reqPath)
if err != nil {
return err
}
pstat := Propstat{Status: http.StatusOK}
for _, xmlname := range pnames {
pstat.Props = append(pstat.Props, Property{XMLName: xmlname})
}
pstats = append(pstats, pstat)
} else if pf.Allprop != nil {
pstats, err = allprop(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop)
} else {
pstats, err = props(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop)
}
if err != nil {
return err
}
return mw.write(makePropstatResponse(path.Join(h.Prefix, reqPath), pstats))
}
walkErr := walkFS(ctx, h.FileSystem, depth, reqPath, fi, walkFn)
closeErr := mw.close()
if walkErr != nil {
return http.StatusInternalServerError, walkErr
}
if closeErr != nil {
return http.StatusInternalServerError, closeErr
}
return 0, nil
}
func (h *Handler) handleProppatch(w http.ResponseWriter, r *http.Request) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
release, status, err := h.confirmLocks(r, reqPath, "")
if err != nil {
return status, err
}
defer release()
ctx := getContext(r)
if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {
if os.IsNotExist(err) {
return http.StatusNotFound, err
}
return http.StatusMethodNotAllowed, err
}
patches, status, err := readProppatch(r.Body)
if err != nil {
return status, err
}
pstats, err := patch(ctx, h.FileSystem, h.LockSystem, reqPath, patches)
if err != nil {
return http.StatusInternalServerError, err
}
mw := multistatusWriter{w: w}
writeErr := mw.write(makePropstatResponse(r.URL.Path, pstats))
closeErr := mw.close()
if writeErr != nil {
return http.StatusInternalServerError, writeErr
}
if closeErr != nil {
return http.StatusInternalServerError, closeErr
}
return 0, nil
}
func makePropstatResponse(href string, pstats []Propstat) *response {
resp := response{
Href: []string{(&url.URL{Path: href}).EscapedPath()},
Propstat: make([]propstat, 0, len(pstats)),
}
for _, p := range pstats {
var xmlErr *xmlError
if p.XMLError != "" {
xmlErr = &xmlError{InnerXML: []byte(p.XMLError)}
}
resp.Propstat = append(resp.Propstat, propstat{
Status: fmt.Sprintf("HTTP/1.1 %d %s", p.Status, StatusText(p.Status)),
Prop: p.Props,
ResponseDescription: p.ResponseDescription,
Error: xmlErr,
})
}
return &resp
}
const (
infiniteDepth = -1
invalidDepth = -2
)
// parseDepth maps the strings "0", "1" and "infinity" to 0, 1 and
// infiniteDepth. Parsing any other string returns invalidDepth.
//
// Different WebDAV methods have further constraints on valid depths:
// - PROPFIND has no further restrictions, as per section 9.1.
// - COPY accepts only "0" or "infinity", as per section 9.8.3.
// - MOVE accepts only "infinity", as per section 9.9.2.
// - LOCK accepts only "0" or "infinity", as per section 9.10.3.
// These constraints are enforced by the handleXxx methods.
func parseDepth(s string) int {
switch s {
case "0":
return 0
case "1":
return 1
case "infinity":
return infiniteDepth
}
return invalidDepth
}
// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11
const (
StatusMulti = 207
StatusUnprocessableEntity = 422
StatusLocked = 423
StatusFailedDependency = 424
StatusInsufficientStorage = 507
)
func StatusText(code int) string {
switch code {
case StatusMulti:
return "Multi-Status"
case StatusUnprocessableEntity:
return "Unprocessable Entity"
case StatusLocked:
return "Locked"
case StatusFailedDependency:
return "Failed Dependency"
case StatusInsufficientStorage:
return "Insufficient Storage"
}
return http.StatusText(code)
}
var (
errDestinationEqualsSource = errors.New("webdav: destination equals source")
errDirectoryNotEmpty = errors.New("webdav: directory not empty")
errInvalidDepth = errors.New("webdav: invalid depth")
errInvalidDestination = errors.New("webdav: invalid destination")
errInvalidIfHeader = errors.New("webdav: invalid If header")
errInvalidLockInfo = errors.New("webdav: invalid lock info")
errInvalidLockToken = errors.New("webdav: invalid lock token")
errInvalidPropfind = errors.New("webdav: invalid propfind")
errInvalidProppatch = errors.New("webdav: invalid proppatch")
errInvalidResponse = errors.New("webdav: invalid response")
errInvalidTimeout = errors.New("webdav: invalid timeout")
errNoFileSystem = errors.New("webdav: no file system")
errNoLockSystem = errors.New("webdav: no lock system")
errNotADirectory = errors.New("webdav: not a directory")
errPrefixMismatch = errors.New("webdav: prefix mismatch")
errRecursionTooDeep = errors.New("webdav: recursion too deep")
errUnsupportedLockInfo = errors.New("webdav: unsupported lock info")
errUnsupportedMethod = errors.New("webdav: unsupported method")
)

519
vendor/golang.org/x/net/webdav/xml.go generated vendored Normal file
View file

@ -0,0 +1,519 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package webdav
// The XML encoding is covered by Section 14.
// http://www.webdav.org/specs/rfc4918.html#xml.element.definitions
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"net/http"
"time"
// As of https://go-review.googlesource.com/#/c/12772/ which was submitted
// in July 2015, this package uses an internal fork of the standard
// library's encoding/xml package, due to changes in the way namespaces
// were encoded. Such changes were introduced in the Go 1.5 cycle, but were
// rolled back in response to https://github.com/golang/go/issues/11841
//
// However, this package's exported API, specifically the Property and
// DeadPropsHolder types, need to refer to the standard library's version
// of the xml.Name type, as code that imports this package cannot refer to
// the internal version.
//
// This file therefore imports both the internal and external versions, as
// ixml and xml, and converts between them.
//
// In the long term, this package should use the standard library's version
// only, and the internal fork deleted, once
// https://github.com/golang/go/issues/13400 is resolved.
ixml "golang.org/x/net/webdav/internal/xml"
)
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_lockinfo
type lockInfo struct {
XMLName ixml.Name `xml:"lockinfo"`
Exclusive *struct{} `xml:"lockscope>exclusive"`
Shared *struct{} `xml:"lockscope>shared"`
Write *struct{} `xml:"locktype>write"`
Owner owner `xml:"owner"`
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_owner
type owner struct {
InnerXML string `xml:",innerxml"`
}
func readLockInfo(r io.Reader) (li lockInfo, status int, err error) {
c := &countingReader{r: r}
if err = ixml.NewDecoder(c).Decode(&li); err != nil {
if err == io.EOF {
if c.n == 0 {
// An empty body means to refresh the lock.
// http://www.webdav.org/specs/rfc4918.html#refreshing-locks
return lockInfo{}, 0, nil
}
err = errInvalidLockInfo
}
return lockInfo{}, http.StatusBadRequest, err
}
// We only support exclusive (non-shared) write locks. In practice, these are
// the only types of locks that seem to matter.
if li.Exclusive == nil || li.Shared != nil || li.Write == nil {
return lockInfo{}, http.StatusNotImplemented, errUnsupportedLockInfo
}
return li, 0, nil
}
type countingReader struct {
n int
r io.Reader
}
func (c *countingReader) Read(p []byte) (int, error) {
n, err := c.r.Read(p)
c.n += n
return n, err
}
func writeLockInfo(w io.Writer, token string, ld LockDetails) (int, error) {
depth := "infinity"
if ld.ZeroDepth {
depth = "0"
}
timeout := ld.Duration / time.Second
return fmt.Fprintf(w, "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n"+
"<D:prop xmlns:D=\"DAV:\"><D:lockdiscovery><D:activelock>\n"+
" <D:locktype><D:write/></D:locktype>\n"+
" <D:lockscope><D:exclusive/></D:lockscope>\n"+
" <D:depth>%s</D:depth>\n"+
" <D:owner>%s</D:owner>\n"+
" <D:timeout>Second-%d</D:timeout>\n"+
" <D:locktoken><D:href>%s</D:href></D:locktoken>\n"+
" <D:lockroot><D:href>%s</D:href></D:lockroot>\n"+
"</D:activelock></D:lockdiscovery></D:prop>",
depth, ld.OwnerXML, timeout, escape(token), escape(ld.Root),
)
}
func escape(s string) string {
for i := 0; i < len(s); i++ {
switch s[i] {
case '"', '&', '\'', '<', '>':
b := bytes.NewBuffer(nil)
ixml.EscapeText(b, []byte(s))
return b.String()
}
}
return s
}
// Next returns the next token, if any, in the XML stream of d.
// RFC 4918 requires to ignore comments, processing instructions
// and directives.
// http://www.webdav.org/specs/rfc4918.html#property_values
// http://www.webdav.org/specs/rfc4918.html#xml-extensibility
func next(d *ixml.Decoder) (ixml.Token, error) {
for {
t, err := d.Token()
if err != nil {
return t, err
}
switch t.(type) {
case ixml.Comment, ixml.Directive, ixml.ProcInst:
continue
default:
return t, nil
}
}
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for propfind)
type propfindProps []xml.Name
// UnmarshalXML appends the property names enclosed within start to pn.
//
// It returns an error if start does not contain any properties or if
// properties contain values. Character data between properties is ignored.
func (pn *propfindProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
for {
t, err := next(d)
if err != nil {
return err
}
switch t.(type) {
case ixml.EndElement:
if len(*pn) == 0 {
return fmt.Errorf("%s must not be empty", start.Name.Local)
}
return nil
case ixml.StartElement:
name := t.(ixml.StartElement).Name
t, err = next(d)
if err != nil {
return err
}
if _, ok := t.(ixml.EndElement); !ok {
return fmt.Errorf("unexpected token %T", t)
}
*pn = append(*pn, xml.Name(name))
}
}
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propfind
type propfind struct {
XMLName ixml.Name `xml:"DAV: propfind"`
Allprop *struct{} `xml:"DAV: allprop"`
Propname *struct{} `xml:"DAV: propname"`
Prop propfindProps `xml:"DAV: prop"`
Include propfindProps `xml:"DAV: include"`
}
func readPropfind(r io.Reader) (pf propfind, status int, err error) {
c := countingReader{r: r}
if err = ixml.NewDecoder(&c).Decode(&pf); err != nil {
if err == io.EOF {
if c.n == 0 {
// An empty body means to propfind allprop.
// http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND
return propfind{Allprop: new(struct{})}, 0, nil
}
err = errInvalidPropfind
}
return propfind{}, http.StatusBadRequest, err
}
if pf.Allprop == nil && pf.Include != nil {
return propfind{}, http.StatusBadRequest, errInvalidPropfind
}
if pf.Allprop != nil && (pf.Prop != nil || pf.Propname != nil) {
return propfind{}, http.StatusBadRequest, errInvalidPropfind
}
if pf.Prop != nil && pf.Propname != nil {
return propfind{}, http.StatusBadRequest, errInvalidPropfind
}
if pf.Propname == nil && pf.Allprop == nil && pf.Prop == nil {
return propfind{}, http.StatusBadRequest, errInvalidPropfind
}
return pf, 0, nil
}
// Property represents a single DAV resource property as defined in RFC 4918.
// See http://www.webdav.org/specs/rfc4918.html#data.model.for.resource.properties
type Property struct {
// XMLName is the fully qualified name that identifies this property.
XMLName xml.Name
// Lang is an optional xml:lang attribute.
Lang string `xml:"xml:lang,attr,omitempty"`
// InnerXML contains the XML representation of the property value.
// See http://www.webdav.org/specs/rfc4918.html#property_values
//
// Property values of complex type or mixed-content must have fully
// expanded XML namespaces or be self-contained with according
// XML namespace declarations. They must not rely on any XML
// namespace declarations within the scope of the XML document,
// even including the DAV: namespace.
InnerXML []byte `xml:",innerxml"`
}
// ixmlProperty is the same as the Property type except it holds an ixml.Name
// instead of an xml.Name.
type ixmlProperty struct {
XMLName ixml.Name
Lang string `xml:"xml:lang,attr,omitempty"`
InnerXML []byte `xml:",innerxml"`
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_error
// See multistatusWriter for the "D:" namespace prefix.
type xmlError struct {
XMLName ixml.Name `xml:"D:error"`
InnerXML []byte `xml:",innerxml"`
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat
// See multistatusWriter for the "D:" namespace prefix.
type propstat struct {
Prop []Property `xml:"D:prop>_ignored_"`
Status string `xml:"D:status"`
Error *xmlError `xml:"D:error"`
ResponseDescription string `xml:"D:responsedescription,omitempty"`
}
// ixmlPropstat is the same as the propstat type except it holds an ixml.Name
// instead of an xml.Name.
type ixmlPropstat struct {
Prop []ixmlProperty `xml:"D:prop>_ignored_"`
Status string `xml:"D:status"`
Error *xmlError `xml:"D:error"`
ResponseDescription string `xml:"D:responsedescription,omitempty"`
}
// MarshalXML prepends the "D:" namespace prefix on properties in the DAV: namespace
// before encoding. See multistatusWriter.
func (ps propstat) MarshalXML(e *ixml.Encoder, start ixml.StartElement) error {
// Convert from a propstat to an ixmlPropstat.
ixmlPs := ixmlPropstat{
Prop: make([]ixmlProperty, len(ps.Prop)),
Status: ps.Status,
Error: ps.Error,
ResponseDescription: ps.ResponseDescription,
}
for k, prop := range ps.Prop {
ixmlPs.Prop[k] = ixmlProperty{
XMLName: ixml.Name(prop.XMLName),
Lang: prop.Lang,
InnerXML: prop.InnerXML,
}
}
for k, prop := range ixmlPs.Prop {
if prop.XMLName.Space == "DAV:" {
prop.XMLName = ixml.Name{Space: "", Local: "D:" + prop.XMLName.Local}
ixmlPs.Prop[k] = prop
}
}
// Distinct type to avoid infinite recursion of MarshalXML.
type newpropstat ixmlPropstat
return e.EncodeElement(newpropstat(ixmlPs), start)
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_response
// See multistatusWriter for the "D:" namespace prefix.
type response struct {
XMLName ixml.Name `xml:"D:response"`
Href []string `xml:"D:href"`
Propstat []propstat `xml:"D:propstat"`
Status string `xml:"D:status,omitempty"`
Error *xmlError `xml:"D:error"`
ResponseDescription string `xml:"D:responsedescription,omitempty"`
}
// MultistatusWriter marshals one or more Responses into a XML
// multistatus response.
// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_multistatus
// TODO(rsto, mpl): As a workaround, the "D:" namespace prefix, defined as
// "DAV:" on this element, is prepended on the nested response, as well as on all
// its nested elements. All property names in the DAV: namespace are prefixed as
// well. This is because some versions of Mini-Redirector (on windows 7) ignore
// elements with a default namespace (no prefixed namespace). A less intrusive fix
// should be possible after golang.org/cl/11074. See https://golang.org/issue/11177
type multistatusWriter struct {
// ResponseDescription contains the optional responsedescription
// of the multistatus XML element. Only the latest content before
// close will be emitted. Empty response descriptions are not
// written.
responseDescription string
w http.ResponseWriter
enc *ixml.Encoder
}
// Write validates and emits a DAV response as part of a multistatus response
// element.
//
// It sets the HTTP status code of its underlying http.ResponseWriter to 207
// (Multi-Status) and populates the Content-Type header. If r is the
// first, valid response to be written, Write prepends the XML representation
// of r with a multistatus tag. Callers must call close after the last response
// has been written.
func (w *multistatusWriter) write(r *response) error {
switch len(r.Href) {
case 0:
return errInvalidResponse
case 1:
if len(r.Propstat) > 0 != (r.Status == "") {
return errInvalidResponse
}
default:
if len(r.Propstat) > 0 || r.Status == "" {
return errInvalidResponse
}
}
err := w.writeHeader()
if err != nil {
return err
}
return w.enc.Encode(r)
}
// writeHeader writes a XML multistatus start element on w's underlying
// http.ResponseWriter and returns the result of the write operation.
// After the first write attempt, writeHeader becomes a no-op.
func (w *multistatusWriter) writeHeader() error {
if w.enc != nil {
return nil
}
w.w.Header().Add("Content-Type", "text/xml; charset=utf-8")
w.w.WriteHeader(StatusMulti)
_, err := fmt.Fprintf(w.w, `<?xml version="1.0" encoding="UTF-8"?>`)
if err != nil {
return err
}
w.enc = ixml.NewEncoder(w.w)
return w.enc.EncodeToken(ixml.StartElement{
Name: ixml.Name{
Space: "DAV:",
Local: "multistatus",
},
Attr: []ixml.Attr{{
Name: ixml.Name{Space: "xmlns", Local: "D"},
Value: "DAV:",
}},
})
}
// Close completes the marshalling of the multistatus response. It returns
// an error if the multistatus response could not be completed. If both the
// return value and field enc of w are nil, then no multistatus response has
// been written.
func (w *multistatusWriter) close() error {
if w.enc == nil {
return nil
}
var end []ixml.Token
if w.responseDescription != "" {
name := ixml.Name{Space: "DAV:", Local: "responsedescription"}
end = append(end,
ixml.StartElement{Name: name},
ixml.CharData(w.responseDescription),
ixml.EndElement{Name: name},
)
}
end = append(end, ixml.EndElement{
Name: ixml.Name{Space: "DAV:", Local: "multistatus"},
})
for _, t := range end {
err := w.enc.EncodeToken(t)
if err != nil {
return err
}
}
return w.enc.Flush()
}
var xmlLangName = ixml.Name{Space: "http://www.w3.org/XML/1998/namespace", Local: "lang"}
func xmlLang(s ixml.StartElement, d string) string {
for _, attr := range s.Attr {
if attr.Name == xmlLangName {
return attr.Value
}
}
return d
}
type xmlValue []byte
func (v *xmlValue) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
// The XML value of a property can be arbitrary, mixed-content XML.
// To make sure that the unmarshalled value contains all required
// namespaces, we encode all the property value XML tokens into a
// buffer. This forces the encoder to redeclare any used namespaces.
var b bytes.Buffer
e := ixml.NewEncoder(&b)
for {
t, err := next(d)
if err != nil {
return err
}
if e, ok := t.(ixml.EndElement); ok && e.Name == start.Name {
break
}
if err = e.EncodeToken(t); err != nil {
return err
}
}
err := e.Flush()
if err != nil {
return err
}
*v = b.Bytes()
return nil
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for proppatch)
type proppatchProps []Property
// UnmarshalXML appends the property names and values enclosed within start
// to ps.
//
// An xml:lang attribute that is defined either on the DAV:prop or property
// name XML element is propagated to the property's Lang field.
//
// UnmarshalXML returns an error if start does not contain any properties or if
// property values contain syntactically incorrect XML.
func (ps *proppatchProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
lang := xmlLang(start, "")
for {
t, err := next(d)
if err != nil {
return err
}
switch elem := t.(type) {
case ixml.EndElement:
if len(*ps) == 0 {
return fmt.Errorf("%s must not be empty", start.Name.Local)
}
return nil
case ixml.StartElement:
p := Property{
XMLName: xml.Name(t.(ixml.StartElement).Name),
Lang: xmlLang(t.(ixml.StartElement), lang),
}
err = d.DecodeElement(((*xmlValue)(&p.InnerXML)), &elem)
if err != nil {
return err
}
*ps = append(*ps, p)
}
}
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_set
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_remove
type setRemove struct {
XMLName ixml.Name
Lang string `xml:"xml:lang,attr,omitempty"`
Prop proppatchProps `xml:"DAV: prop"`
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propertyupdate
type propertyupdate struct {
XMLName ixml.Name `xml:"DAV: propertyupdate"`
Lang string `xml:"xml:lang,attr,omitempty"`
SetRemove []setRemove `xml:",any"`
}
func readProppatch(r io.Reader) (patches []Proppatch, status int, err error) {
var pu propertyupdate
if err = ixml.NewDecoder(r).Decode(&pu); err != nil {
return nil, http.StatusBadRequest, err
}
for _, op := range pu.SetRemove {
remove := false
switch op.XMLName {
case ixml.Name{Space: "DAV:", Local: "set"}:
// No-op.
case ixml.Name{Space: "DAV:", Local: "remove"}:
for _, p := range op.Prop {
if len(p.InnerXML) > 0 {
return nil, http.StatusBadRequest, errInvalidProppatch
}
}
remove = true
default:
return nil, http.StatusBadRequest, errInvalidProppatch
}
patches = append(patches, Proppatch{Remove: remove, Props: op.Prop})
}
return patches, 0, nil
}