mirror of
https://github.com/Luzifer/badge-gen.git
synced 2024-11-09 13:50:03 +00:00
Deps: Update vendors
Signed-off-by: Knut Ahlers <knut@ahlers.me>
This commit is contained in:
parent
67a7786502
commit
1fe856f72c
76 changed files with 3160 additions and 6974 deletions
105
Gopkg.lock
generated
105
Gopkg.lock
generated
|
@ -2,109 +2,146 @@
|
|||
|
||||
|
||||
[[projects]]
|
||||
digest = "1:38fd40b35c4613c0793c6796528435a5358d95a77363d6c1fabe4e8d476b2896"
|
||||
name = "github.com/Luzifer/go_helpers"
|
||||
packages = [
|
||||
"accessLogger",
|
||||
"str"
|
||||
"str",
|
||||
]
|
||||
revision = "94b91ff63a5db8e22c4d121e6c5c17b44135be4d"
|
||||
version = "v2.5.0"
|
||||
pruneopts = "NUT"
|
||||
revision = "c3bea85c97943065c31d13b2193a9ef8c8488fb2"
|
||||
version = "v2.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6503b55a93848f2365ab2715ed64ec47ff245026668a0c5409807a347e0f6568"
|
||||
name = "github.com/Luzifer/rconfig"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "7aef1d393c1e2d0758901853b59981c7adc67c7e"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:707ebe952a8b3d00b343c01536c79c73771d100f63ec6babeaed5c79e2b8a8dd"
|
||||
name = "github.com/beorn7/perks"
|
||||
packages = ["quantile"]
|
||||
pruneopts = "NUT"
|
||||
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:62c57507df491b657e9ae2645f30958d9964c8eeb380600469eedc951ebb3a0e"
|
||||
name = "github.com/golang/freetype"
|
||||
packages = [
|
||||
"raster",
|
||||
"truetype"
|
||||
"truetype",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "e2365dfdc4a05e4b8299a783240d4a7d5a65d4e4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:97df918963298c287643883209a2c3f642e6593379f97ab400c2a2e219ab647d"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
|
||||
version = "v1.1.0"
|
||||
pruneopts = "NUT"
|
||||
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c01767916c59f084bb7c41a7d5877c0f3099b1595cfa066e84ec6ad6b084dd89"
|
||||
name = "github.com/gorilla/context"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "08b5f424b9271eedf6f9f0ce86cb9396ed337a42"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:bf5cf1d53d703332e9bd8984c69784645b73a938317bf5ace9aadf20ac49379a"
|
||||
name = "github.com/gorilla/mux"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "e3702bed27f0d39777b0b37b664b6280e8ef8fbf"
|
||||
version = "v1.6.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6"
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
packages = ["pbutil"]
|
||||
pruneopts = "NUT"
|
||||
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5cf3f025cbee5951a4ee961de067c8a89fc95a5adabead774f82822efabab121"
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:3e5fd795ebf6a9e13e67d644da76130af7a6003286531f9573f8074c228b66a3"
|
||||
name = "github.com/prometheus/client_golang"
|
||||
packages = ["prometheus"]
|
||||
pruneopts = "NUT"
|
||||
revision = "c5b7fccd204277076155f10851dad72b76a49317"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4"
|
||||
name = "github.com/prometheus/client_model"
|
||||
packages = ["go"]
|
||||
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
|
||||
pruneopts = "NUT"
|
||||
revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:fad5a35eea6a1a33d6c8f949fbc146f24275ca809ece854248187683f52cc30b"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = [
|
||||
"expfmt",
|
||||
"internal/bitbucket.org/ww/goautoneg",
|
||||
"model"
|
||||
"model",
|
||||
]
|
||||
revision = "7600349dcfe1abd18d72d3a1770870d9800a7801"
|
||||
pruneopts = "NUT"
|
||||
revision = "c7de2306084e37d54b8be01f3541a8464345e9a5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:102dea0c03a915acfc634b7c67f2662012b5483b56d9025e33f5188e112759b6"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = [
|
||||
".",
|
||||
"internal/util",
|
||||
"nfs",
|
||||
"xfs"
|
||||
"xfs",
|
||||
]
|
||||
revision = "94663424ae5ae9856b40a9f170762b4197024661"
|
||||
pruneopts = "NUT"
|
||||
revision = "185b4288413d2a0dd0806f78c90dde719829e5ae"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
|
||||
version = "v1.0.1"
|
||||
pruneopts = "NUT"
|
||||
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
|
||||
version = "v1.0.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:37c42f4f9fe651e3ed41d41d1ed2c98b0b9b5e10f86bb66bd57358023ab7ca99"
|
||||
name = "github.com/tdewolff/minify"
|
||||
packages = [
|
||||
".",
|
||||
"css",
|
||||
"svg"
|
||||
"svg",
|
||||
]
|
||||
revision = "222672169d634c440a73abc47685074e1a9daa60"
|
||||
version = "v2.3.4"
|
||||
pruneopts = "NUT"
|
||||
revision = "8d72a4127ae33b755e95bffede9b92e396267ce2"
|
||||
version = "v2.3.5"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:44aa13c140f0c337785468c9543a6dcf028d72d3ce678d1ce3309d041c3c9f39"
|
||||
name = "github.com/tdewolff/parse"
|
||||
packages = [
|
||||
".",
|
||||
|
@ -112,41 +149,63 @@
|
|||
"css",
|
||||
"strconv",
|
||||
"svg",
|
||||
"xml"
|
||||
"xml",
|
||||
]
|
||||
revision = "639f6272aec6b52094db77b9ec488214b0b4b1a1"
|
||||
version = "v2.3.2"
|
||||
pruneopts = "NUT"
|
||||
revision = "d739d6fccb0971177e06352fea02d3552625efb1"
|
||||
version = "v2.3.3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:6d674a768fd27ad126eb8357ebbc14cb5b000badd62d4b7cbe403abb231e099e"
|
||||
name = "golang.org/x/image"
|
||||
packages = [
|
||||
"font",
|
||||
"math/fixed"
|
||||
"math/fixed",
|
||||
]
|
||||
revision = "af66defab954cb421ca110193eed9477c8541e2a"
|
||||
pruneopts = "NUT"
|
||||
revision = "991ec62608f3c0da01d400756917825d1e2fd528"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:76ee51c3f468493aff39dbacc401e8831fbb765104cbf613b89bef01cf4bad70"
|
||||
name = "golang.org/x/net"
|
||||
packages = ["context"]
|
||||
revision = "1e491301e022f8f977054da4c2d852decd59571f"
|
||||
pruneopts = "NUT"
|
||||
revision = "146acd28ed5894421fb5aac80ca93bc1b1f46f87"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
digest = "1:1ab6db2d2bd353449c5d1e976ba7a92a0ece6e83aaab3e6674f8f2f1faebb85a"
|
||||
name = "gopkg.in/validator.v2"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "135c24b11c19e52befcae2ec3fca5d9b78c4e98e"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7c95b35057a0ff2e19f707173cc1a947fa43a6eb5c4d300d196ece0334046082"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||
version = "v2.2.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "e6aef6c9173fa0d080b8bc11f91175f7ec7ec623e016e9581947dce7ec50a1e7"
|
||||
input-imports = [
|
||||
"github.com/Luzifer/go_helpers/accessLogger",
|
||||
"github.com/Luzifer/go_helpers/str",
|
||||
"github.com/Luzifer/rconfig",
|
||||
"github.com/golang/freetype/truetype",
|
||||
"github.com/gorilla/mux",
|
||||
"github.com/pkg/errors",
|
||||
"github.com/prometheus/client_golang/prometheus",
|
||||
"github.com/tdewolff/minify",
|
||||
"github.com/tdewolff/minify/svg",
|
||||
"golang.org/x/image/math/fixed",
|
||||
"golang.org/x/net/context",
|
||||
"gopkg.in/yaml.v2",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
|
|
@ -41,6 +41,10 @@
|
|||
name = "github.com/gorilla/mux"
|
||||
version = "1.6.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/pkg/errors"
|
||||
version = "0.8.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
version = "0.8.0"
|
||||
|
@ -62,5 +66,6 @@
|
|||
version = "2.2.1"
|
||||
|
||||
[prune]
|
||||
non-go = true
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
|
|
202
vendor/github.com/Luzifer/go_helpers/LICENSE
generated
vendored
Normal file
202
vendor/github.com/Luzifer/go_helpers/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016- Knut Ahlers <knut@ahlers.me>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
8
vendor/github.com/Luzifer/rconfig/.travis.yml
generated
vendored
8
vendor/github.com/Luzifer/rconfig/.travis.yml
generated
vendored
|
@ -1,8 +0,0 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.6
|
||||
- 1.7
|
||||
- tip
|
||||
|
||||
script: go test -v -race -cover ./...
|
9
vendor/github.com/Luzifer/rconfig/History.md
generated
vendored
9
vendor/github.com/Luzifer/rconfig/History.md
generated
vendored
|
@ -1,9 +0,0 @@
|
|||
# 1.2.0 / 2017-06-19
|
||||
|
||||
* Add ParseAndValidate method
|
||||
|
||||
# 1.1.0 / 2016-06-28
|
||||
|
||||
* Support time.Duration config parameters
|
||||
* Added goreportcard badge
|
||||
* Added testcase for using bool with ENV and default
|
87
vendor/github.com/Luzifer/rconfig/README.md
generated
vendored
87
vendor/github.com/Luzifer/rconfig/README.md
generated
vendored
|
@ -1,87 +0,0 @@
|
|||
[![Build Status](https://travis-ci.org/Luzifer/rconfig.svg?branch=master)](https://travis-ci.org/Luzifer/rconfig)
|
||||
[![License: Apache v2.0](https://badge.luzifer.io/v1/badge?color=5d79b5&title=license&text=Apache+v2.0)](http://www.apache.org/licenses/LICENSE-2.0)
|
||||
[![Documentation](https://badge.luzifer.io/v1/badge?title=godoc&text=reference)](https://godoc.org/github.com/Luzifer/rconfig)
|
||||
[![Go Report](http://goreportcard.com/badge/Luzifer/rconfig)](http://goreportcard.com/report/Luzifer/rconfig)
|
||||
|
||||
## Description
|
||||
|
||||
> Package rconfig implements a CLI configuration reader with struct-embedded defaults, environment variables and posix compatible flag parsing using the [pflag](https://github.com/spf13/pflag) library.
|
||||
|
||||
## Installation
|
||||
|
||||
Install by running:
|
||||
|
||||
```
|
||||
go get -u github.com/Luzifer/rconfig
|
||||
```
|
||||
|
||||
OR fetch a specific version:
|
||||
|
||||
```
|
||||
go get -u gopkg.in/luzifer/rconfig.v1
|
||||
```
|
||||
|
||||
Run tests by running:
|
||||
|
||||
```
|
||||
go test -v -race -cover github.com/Luzifer/rconfig
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
A very simple usecase is to just configure a struct inside the vars section of your `main.go` and to parse the commandline flags from the `main()` function:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/Luzifer/rconfig"
|
||||
)
|
||||
|
||||
var (
|
||||
cfg = struct {
|
||||
Username string `default:"unknown" flag:"user" description:"Your name"`
|
||||
Details struct {
|
||||
Age int `default:"25" flag:"age" env:"age" description:"Your age"`
|
||||
}
|
||||
}{}
|
||||
)
|
||||
|
||||
func main() {
|
||||
rconfig.Parse(&cfg)
|
||||
|
||||
fmt.Printf("Hello %s, happy birthday for your %dth birthday.",
|
||||
cfg.Username,
|
||||
cfg.Details.Age)
|
||||
}
|
||||
```
|
||||
|
||||
### Provide variable defaults by using a file
|
||||
|
||||
Given you have a file `~/.myapp.yml` containing some secrets or usernames (for the example below username is assumed to be "luzifer") as a default configuration for your application you can use this source code to load the defaults from that file using the `vardefault` tag in your configuration struct.
|
||||
|
||||
The order of the directives (lower number = higher precedence):
|
||||
|
||||
1. Flags provided in command line
|
||||
1. Environment variables
|
||||
1. Variable defaults (`vardefault` tag in the struct)
|
||||
1. `default` tag in the struct
|
||||
|
||||
```go
|
||||
var cfg = struct {
|
||||
Username string `vardefault:"username" flag:"username" description:"Your username"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
rconfig.SetVariableDefaults(rconfig.VarDefaultsFromYAMLFile("~/.myapp.yml"))
|
||||
rconfig.Parse(&cfg)
|
||||
|
||||
fmt.Printf("Username = %s", cfg.Username)
|
||||
// Output: Username = luzifer
|
||||
}
|
||||
```
|
||||
|
||||
## More info
|
||||
|
||||
You can see the full reference documentation of the rconfig package [at godoc.org](https://godoc.org/github.com/Luzifer/rconfig), or through go's standard documentation system by running `godoc -http=:6060` and browsing to [http://localhost:6060/pkg/github.com/Luzifer/rconfig](http://localhost:6060/pkg/github.com/Luzifer/rconfig) after installation.
|
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
File diff suppressed because it is too large
Load diff
3
vendor/github.com/golang/protobuf/LICENSE
generated
vendored
3
vendor/github.com/golang/protobuf/LICENSE
generated
vendored
|
@ -1,7 +1,4 @@
|
|||
Go support for Protocol Buffers - Google's data interchange format
|
||||
|
||||
Copyright 2010 The Go Authors. All rights reserved.
|
||||
https://github.com/golang/protobuf
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
|
|
18
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
18
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
|
@ -37,27 +37,9 @@ package proto
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// RequiredNotSetError is the error returned if Marshal is called with
|
||||
// a protocol buffer struct whose required fields have not
|
||||
// all been initialized. It is also the error returned if Unmarshal is
|
||||
// called with an encoded protocol buffer that does not include all the
|
||||
// required fields.
|
||||
//
|
||||
// When printed, RequiredNotSetError reports the first unset required field in a
|
||||
// message. If the field cannot be precisely determined, it is reported as
|
||||
// "{Unknown}".
|
||||
type RequiredNotSetError struct {
|
||||
field string
|
||||
}
|
||||
|
||||
func (e *RequiredNotSetError) Error() string {
|
||||
return fmt.Sprintf("proto: required field %q not set", e.field)
|
||||
}
|
||||
|
||||
var (
|
||||
// errRepeatedHasNil is the error returned if Marshal is called with
|
||||
// a struct with a repeated field containing a nil element.
|
||||
|
|
62
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
62
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
|
@ -265,7 +265,6 @@ package proto
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
|
@ -274,7 +273,66 @@ import (
|
|||
"sync"
|
||||
)
|
||||
|
||||
var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string")
|
||||
// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
|
||||
// Marshal reports this when a required field is not initialized.
|
||||
// Unmarshal reports this when a required field is missing from the wire data.
|
||||
type RequiredNotSetError struct{ field string }
|
||||
|
||||
func (e *RequiredNotSetError) Error() string {
|
||||
if e.field == "" {
|
||||
return fmt.Sprintf("proto: required field not set")
|
||||
}
|
||||
return fmt.Sprintf("proto: required field %q not set", e.field)
|
||||
}
|
||||
func (e *RequiredNotSetError) RequiredNotSet() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type invalidUTF8Error struct{ field string }
|
||||
|
||||
func (e *invalidUTF8Error) Error() string {
|
||||
if e.field == "" {
|
||||
return "proto: invalid UTF-8 detected"
|
||||
}
|
||||
return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
|
||||
}
|
||||
func (e *invalidUTF8Error) InvalidUTF8() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
|
||||
// This error should not be exposed to the external API as such errors should
|
||||
// be recreated with the field information.
|
||||
var errInvalidUTF8 = &invalidUTF8Error{}
|
||||
|
||||
// isNonFatal reports whether the error is either a RequiredNotSet error
|
||||
// or a InvalidUTF8 error.
|
||||
func isNonFatal(err error) bool {
|
||||
if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
|
||||
return true
|
||||
}
|
||||
if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type nonFatal struct{ E error }
|
||||
|
||||
// Merge merges err into nf and reports whether it was successful.
|
||||
// Otherwise it returns false for any fatal non-nil errors.
|
||||
func (nf *nonFatal) Merge(err error) (ok bool) {
|
||||
if err == nil {
|
||||
return true // not an error
|
||||
}
|
||||
if !isNonFatal(err) {
|
||||
return false // fatal error
|
||||
}
|
||||
if nf.E == nil {
|
||||
nf.E = err // store first instance of non-fatal error
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Message is implemented by generated protocol buffer messages.
|
||||
type Message interface {
|
||||
|
|
14
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
14
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
|
@ -139,7 +139,7 @@ type Properties struct {
|
|||
Repeated bool
|
||||
Packed bool // relevant for repeated primitives only
|
||||
Enum string // set for enum types only
|
||||
proto3 bool // whether this is known to be a proto3 field; set for []byte only
|
||||
proto3 bool // whether this is known to be a proto3 field
|
||||
oneof bool // whether this is a oneof field
|
||||
|
||||
Default string // default value
|
||||
|
@ -149,8 +149,8 @@ type Properties struct {
|
|||
sprop *StructProperties // set for struct types only
|
||||
|
||||
mtype reflect.Type // set for map types only
|
||||
mkeyprop *Properties // set for map types only
|
||||
mvalprop *Properties // set for map types only
|
||||
MapKeyProp *Properties // set for map types only
|
||||
MapValProp *Properties // set for map types only
|
||||
}
|
||||
|
||||
// String formats the properties in the protobuf struct field tag style.
|
||||
|
@ -275,16 +275,16 @@ func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, loc
|
|||
|
||||
case reflect.Map:
|
||||
p.mtype = t1
|
||||
p.mkeyprop = &Properties{}
|
||||
p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
|
||||
p.mvalprop = &Properties{}
|
||||
p.MapKeyProp = &Properties{}
|
||||
p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
|
||||
p.MapValProp = &Properties{}
|
||||
vtype := p.mtype.Elem()
|
||||
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
|
||||
// The value type is not a message (*T) or bytes ([]byte),
|
||||
// so we need encoders for the pointer to this type.
|
||||
vtype = reflect.PtrTo(vtype)
|
||||
}
|
||||
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
|
||||
p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
|
||||
}
|
||||
|
||||
if p.stype != nil {
|
||||
|
|
190
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
190
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
|
@ -231,7 +231,7 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
|
|||
return b, err
|
||||
}
|
||||
|
||||
var err, errreq error
|
||||
var err, errLater error
|
||||
// The old marshaler encodes extensions at beginning.
|
||||
if u.extensions.IsValid() {
|
||||
e := ptr.offset(u.extensions).toExtensions()
|
||||
|
@ -252,11 +252,13 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
|
|||
}
|
||||
}
|
||||
for _, f := range u.fields {
|
||||
if f.required && errreq == nil {
|
||||
if f.required {
|
||||
if ptr.offset(f.field).getPointer().isNil() {
|
||||
// Required field is not set.
|
||||
// We record the error but keep going, to give a complete marshaling.
|
||||
errreq = &RequiredNotSetError{f.name}
|
||||
if errLater == nil {
|
||||
errLater = &RequiredNotSetError{f.name}
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
@ -269,14 +271,21 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
|
|||
if err1, ok := err.(*RequiredNotSetError); ok {
|
||||
// Required field in submessage is not set.
|
||||
// We record the error but keep going, to give a complete marshaling.
|
||||
if errreq == nil {
|
||||
errreq = &RequiredNotSetError{f.name + "." + err1.field}
|
||||
if errLater == nil {
|
||||
errLater = &RequiredNotSetError{f.name + "." + err1.field}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err == errRepeatedHasNil {
|
||||
err = errors.New("proto: repeated field " + f.name + " has nil element")
|
||||
}
|
||||
if err == errInvalidUTF8 {
|
||||
if errLater == nil {
|
||||
fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
|
||||
errLater = &invalidUTF8Error{fullName}
|
||||
}
|
||||
continue
|
||||
}
|
||||
return b, err
|
||||
}
|
||||
}
|
||||
|
@ -284,7 +293,7 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
|
|||
s := *ptr.offset(u.unrecognized).toBytes()
|
||||
b = append(b, s...)
|
||||
}
|
||||
return b, errreq
|
||||
return b, errLater
|
||||
}
|
||||
|
||||
// computeMarshalInfo initializes the marshal info.
|
||||
|
@ -530,6 +539,7 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma
|
|||
|
||||
packed := false
|
||||
proto3 := false
|
||||
validateUTF8 := true
|
||||
for i := 2; i < len(tags); i++ {
|
||||
if tags[i] == "packed" {
|
||||
packed = true
|
||||
|
@ -538,6 +548,7 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma
|
|||
proto3 = true
|
||||
}
|
||||
}
|
||||
validateUTF8 = validateUTF8 && proto3
|
||||
|
||||
switch t.Kind() {
|
||||
case reflect.Bool:
|
||||
|
@ -735,6 +746,18 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma
|
|||
}
|
||||
return sizeFloat64Value, appendFloat64Value
|
||||
case reflect.String:
|
||||
if validateUTF8 {
|
||||
if pointer {
|
||||
return sizeStringPtr, appendUTF8StringPtr
|
||||
}
|
||||
if slice {
|
||||
return sizeStringSlice, appendUTF8StringSlice
|
||||
}
|
||||
if nozero {
|
||||
return sizeStringValueNoZero, appendUTF8StringValueNoZero
|
||||
}
|
||||
return sizeStringValue, appendUTF8StringValue
|
||||
}
|
||||
if pointer {
|
||||
return sizeStringPtr, appendStringPtr
|
||||
}
|
||||
|
@ -1984,9 +2007,6 @@ func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byt
|
|||
}
|
||||
func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
|
||||
v := *ptr.toString()
|
||||
if !utf8.ValidString(v) {
|
||||
return nil, errInvalidUTF8
|
||||
}
|
||||
b = appendVarint(b, wiretag)
|
||||
b = appendVarint(b, uint64(len(v)))
|
||||
b = append(b, v...)
|
||||
|
@ -1997,9 +2017,6 @@ func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]b
|
|||
if v == "" {
|
||||
return b, nil
|
||||
}
|
||||
if !utf8.ValidString(v) {
|
||||
return nil, errInvalidUTF8
|
||||
}
|
||||
b = appendVarint(b, wiretag)
|
||||
b = appendVarint(b, uint64(len(v)))
|
||||
b = append(b, v...)
|
||||
|
@ -2011,9 +2028,6 @@ func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, err
|
|||
return b, nil
|
||||
}
|
||||
v := *p
|
||||
if !utf8.ValidString(v) {
|
||||
return nil, errInvalidUTF8
|
||||
}
|
||||
b = appendVarint(b, wiretag)
|
||||
b = appendVarint(b, uint64(len(v)))
|
||||
b = append(b, v...)
|
||||
|
@ -2022,12 +2036,74 @@ func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, err
|
|||
func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
|
||||
s := *ptr.toStringSlice()
|
||||
for _, v := range s {
|
||||
b = appendVarint(b, wiretag)
|
||||
b = appendVarint(b, uint64(len(v)))
|
||||
b = append(b, v...)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
|
||||
var invalidUTF8 bool
|
||||
v := *ptr.toString()
|
||||
if !utf8.ValidString(v) {
|
||||
return nil, errInvalidUTF8
|
||||
invalidUTF8 = true
|
||||
}
|
||||
b = appendVarint(b, wiretag)
|
||||
b = appendVarint(b, uint64(len(v)))
|
||||
b = append(b, v...)
|
||||
if invalidUTF8 {
|
||||
return b, errInvalidUTF8
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
|
||||
var invalidUTF8 bool
|
||||
v := *ptr.toString()
|
||||
if v == "" {
|
||||
return b, nil
|
||||
}
|
||||
if !utf8.ValidString(v) {
|
||||
invalidUTF8 = true
|
||||
}
|
||||
b = appendVarint(b, wiretag)
|
||||
b = appendVarint(b, uint64(len(v)))
|
||||
b = append(b, v...)
|
||||
if invalidUTF8 {
|
||||
return b, errInvalidUTF8
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
|
||||
var invalidUTF8 bool
|
||||
p := *ptr.toStringPtr()
|
||||
if p == nil {
|
||||
return b, nil
|
||||
}
|
||||
v := *p
|
||||
if !utf8.ValidString(v) {
|
||||
invalidUTF8 = true
|
||||
}
|
||||
b = appendVarint(b, wiretag)
|
||||
b = appendVarint(b, uint64(len(v)))
|
||||
b = append(b, v...)
|
||||
if invalidUTF8 {
|
||||
return b, errInvalidUTF8
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
|
||||
var invalidUTF8 bool
|
||||
s := *ptr.toStringSlice()
|
||||
for _, v := range s {
|
||||
if !utf8.ValidString(v) {
|
||||
invalidUTF8 = true
|
||||
}
|
||||
b = appendVarint(b, wiretag)
|
||||
b = appendVarint(b, uint64(len(v)))
|
||||
b = append(b, v...)
|
||||
}
|
||||
if invalidUTF8 {
|
||||
return b, errInvalidUTF8
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
@ -2107,7 +2183,8 @@ func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
|
|||
},
|
||||
func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
|
||||
s := ptr.getPointerSlice()
|
||||
var err, errreq error
|
||||
var err error
|
||||
var nerr nonFatal
|
||||
for _, v := range s {
|
||||
if v.isNil() {
|
||||
return b, errRepeatedHasNil
|
||||
|
@ -2115,22 +2192,14 @@ func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
|
|||
b = appendVarint(b, wiretag) // start group
|
||||
b, err = u.marshal(b, v, deterministic)
|
||||
b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
|
||||
if err != nil {
|
||||
if _, ok := err.(*RequiredNotSetError); ok {
|
||||
// Required field in submessage is not set.
|
||||
// We record the error but keep going, to give a complete marshaling.
|
||||
if errreq == nil {
|
||||
errreq = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !nerr.Merge(err) {
|
||||
if err == ErrNil {
|
||||
err = errRepeatedHasNil
|
||||
}
|
||||
return b, err
|
||||
}
|
||||
}
|
||||
return b, errreq
|
||||
return b, nerr.E
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2174,7 +2243,8 @@ func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
|
|||
},
|
||||
func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
|
||||
s := ptr.getPointerSlice()
|
||||
var err, errreq error
|
||||
var err error
|
||||
var nerr nonFatal
|
||||
for _, v := range s {
|
||||
if v.isNil() {
|
||||
return b, errRepeatedHasNil
|
||||
|
@ -2184,22 +2254,14 @@ func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
|
|||
b = appendVarint(b, uint64(siz))
|
||||
b, err = u.marshal(b, v, deterministic)
|
||||
|
||||
if err != nil {
|
||||
if _, ok := err.(*RequiredNotSetError); ok {
|
||||
// Required field in submessage is not set.
|
||||
// We record the error but keep going, to give a complete marshaling.
|
||||
if errreq == nil {
|
||||
errreq = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !nerr.Merge(err) {
|
||||
if err == ErrNil {
|
||||
err = errRepeatedHasNil
|
||||
}
|
||||
return b, err
|
||||
}
|
||||
}
|
||||
return b, errreq
|
||||
return b, nerr.E
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2223,6 +2285,25 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
|
|||
// value.
|
||||
// Key cannot be pointer-typed.
|
||||
valIsPtr := valType.Kind() == reflect.Ptr
|
||||
|
||||
// If value is a message with nested maps, calling
|
||||
// valSizer in marshal may be quadratic. We should use
|
||||
// cached version in marshal (but not in size).
|
||||
// If value is not message type, we don't have size cache,
|
||||
// but it cannot be nested either. Just use valSizer.
|
||||
valCachedSizer := valSizer
|
||||
if valIsPtr && valType.Elem().Kind() == reflect.Struct {
|
||||
u := getMarshalInfo(valType.Elem())
|
||||
valCachedSizer = func(ptr pointer, tagsize int) int {
|
||||
// Same as message sizer, but use cache.
|
||||
p := ptr.getPointer()
|
||||
if p.isNil() {
|
||||
return 0
|
||||
}
|
||||
siz := u.cachedsize(p)
|
||||
return siz + SizeVarint(uint64(siz)) + tagsize
|
||||
}
|
||||
}
|
||||
return func(ptr pointer, tagsize int) int {
|
||||
m := ptr.asPointerTo(t).Elem() // the map
|
||||
n := 0
|
||||
|
@ -2243,24 +2324,26 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
|
|||
if len(keys) > 1 && deterministic {
|
||||
sort.Sort(mapKeys(keys))
|
||||
}
|
||||
|
||||
var nerr nonFatal
|
||||
for _, k := range keys {
|
||||
ki := k.Interface()
|
||||
vi := m.MapIndex(k).Interface()
|
||||
kaddr := toAddrPointer(&ki, false) // pointer to key
|
||||
vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
|
||||
b = appendVarint(b, tag)
|
||||
siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
|
||||
siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
|
||||
b = appendVarint(b, uint64(siz))
|
||||
b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
|
||||
if err != nil {
|
||||
if !nerr.Merge(err) {
|
||||
return b, err
|
||||
}
|
||||
b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
|
||||
if err != nil && err != ErrNil { // allow nil value in map
|
||||
if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
|
||||
return b, err
|
||||
}
|
||||
}
|
||||
return b, nil
|
||||
return b, nerr.E
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2333,6 +2416,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
|
|||
defer mu.Unlock()
|
||||
|
||||
var err error
|
||||
var nerr nonFatal
|
||||
|
||||
// Fast-path for common cases: zero or one extensions.
|
||||
// Don't bother sorting the keys.
|
||||
|
@ -2352,11 +2436,11 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
|
|||
v := e.value
|
||||
p := toAddrPointer(&v, ei.isptr)
|
||||
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
|
||||
if err != nil {
|
||||
if !nerr.Merge(err) {
|
||||
return b, err
|
||||
}
|
||||
}
|
||||
return b, nil
|
||||
return b, nerr.E
|
||||
}
|
||||
|
||||
// Sort the keys to provide a deterministic encoding.
|
||||
|
@ -2383,11 +2467,11 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
|
|||
v := e.value
|
||||
p := toAddrPointer(&v, ei.isptr)
|
||||
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
|
||||
if err != nil {
|
||||
if !nerr.Merge(err) {
|
||||
return b, err
|
||||
}
|
||||
}
|
||||
return b, nil
|
||||
return b, nerr.E
|
||||
}
|
||||
|
||||
// message set format is:
|
||||
|
@ -2444,6 +2528,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
|
|||
defer mu.Unlock()
|
||||
|
||||
var err error
|
||||
var nerr nonFatal
|
||||
|
||||
// Fast-path for common cases: zero or one extensions.
|
||||
// Don't bother sorting the keys.
|
||||
|
@ -2470,12 +2555,12 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
|
|||
v := e.value
|
||||
p := toAddrPointer(&v, ei.isptr)
|
||||
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
|
||||
if err != nil {
|
||||
if !nerr.Merge(err) {
|
||||
return b, err
|
||||
}
|
||||
b = append(b, 1<<3|WireEndGroup)
|
||||
}
|
||||
return b, nil
|
||||
return b, nerr.E
|
||||
}
|
||||
|
||||
// Sort the keys to provide a deterministic encoding.
|
||||
|
@ -2509,11 +2594,11 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
|
|||
p := toAddrPointer(&v, ei.isptr)
|
||||
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
|
||||
b = append(b, 1<<3|WireEndGroup)
|
||||
if err != nil {
|
||||
if !nerr.Merge(err) {
|
||||
return b, err
|
||||
}
|
||||
}
|
||||
return b, nil
|
||||
return b, nerr.E
|
||||
}
|
||||
|
||||
// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
|
||||
|
@ -2556,6 +2641,7 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ
|
|||
sort.Ints(keys)
|
||||
|
||||
var err error
|
||||
var nerr nonFatal
|
||||
for _, k := range keys {
|
||||
e := m[int32(k)]
|
||||
if e.value == nil || e.desc == nil {
|
||||
|
@ -2572,11 +2658,11 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ
|
|||
v := e.value
|
||||
p := toAddrPointer(&v, ei.isptr)
|
||||
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
|
||||
if err != nil {
|
||||
if !nerr.Merge(err) {
|
||||
return b, err
|
||||
}
|
||||
}
|
||||
return b, nil
|
||||
return b, nerr.E
|
||||
}
|
||||
|
||||
// newMarshaler is the interface representing objects that can marshal themselves.
|
||||
|
|
144
vendor/github.com/golang/protobuf/proto/table_unmarshal.go
generated
vendored
144
vendor/github.com/golang/protobuf/proto/table_unmarshal.go
generated
vendored
|
@ -97,6 +97,8 @@ type unmarshalFieldInfo struct {
|
|||
|
||||
// if a required field, contains a single set bit at this field's index in the required field list.
|
||||
reqMask uint64
|
||||
|
||||
name string // name of the field, for error reporting
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -137,7 +139,7 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
|
|||
return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
|
||||
}
|
||||
var reqMask uint64 // bitmask of required fields we've seen.
|
||||
var rnse *RequiredNotSetError // an instance of a RequiredNotSetError returned by a submessage.
|
||||
var errLater error
|
||||
for len(b) > 0 {
|
||||
// Read tag and wire type.
|
||||
// Special case 1 and 2 byte varints.
|
||||
|
@ -176,11 +178,20 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
|
|||
if r, ok := err.(*RequiredNotSetError); ok {
|
||||
// Remember this error, but keep parsing. We need to produce
|
||||
// a full parse even if a required field is missing.
|
||||
rnse = r
|
||||
if errLater == nil {
|
||||
errLater = r
|
||||
}
|
||||
reqMask |= f.reqMask
|
||||
continue
|
||||
}
|
||||
if err != errInternalBadWireType {
|
||||
if err == errInvalidUTF8 {
|
||||
if errLater == nil {
|
||||
fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
|
||||
errLater = &invalidUTF8Error{fullName}
|
||||
}
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
// Fragments with bad wire type are treated as unknown fields.
|
||||
|
@ -239,20 +250,16 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
|
|||
emap[int32(tag)] = e
|
||||
}
|
||||
}
|
||||
if rnse != nil {
|
||||
// A required field of a submessage/group is missing. Return that error.
|
||||
return rnse
|
||||
}
|
||||
if reqMask != u.reqMask {
|
||||
if reqMask != u.reqMask && errLater == nil {
|
||||
// A required field of this message is missing.
|
||||
for _, n := range u.reqFields {
|
||||
if reqMask&1 == 0 {
|
||||
return &RequiredNotSetError{n}
|
||||
errLater = &RequiredNotSetError{n}
|
||||
}
|
||||
reqMask >>= 1
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return errLater
|
||||
}
|
||||
|
||||
// computeUnmarshalInfo fills in u with information for use
|
||||
|
@ -351,7 +358,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
|
|||
}
|
||||
|
||||
// Store the info in the correct slot in the message.
|
||||
u.setTag(tag, toField(&f), unmarshal, reqMask)
|
||||
u.setTag(tag, toField(&f), unmarshal, reqMask, name)
|
||||
}
|
||||
|
||||
// Find any types associated with oneof fields.
|
||||
|
@ -366,10 +373,17 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
|
|||
|
||||
f := typ.Field(0) // oneof implementers have one field
|
||||
baseUnmarshal := fieldUnmarshaler(&f)
|
||||
tagstr := strings.Split(f.Tag.Get("protobuf"), ",")[1]
|
||||
tag, err := strconv.Atoi(tagstr)
|
||||
tags := strings.Split(f.Tag.Get("protobuf"), ",")
|
||||
fieldNum, err := strconv.Atoi(tags[1])
|
||||
if err != nil {
|
||||
panic("protobuf tag field not an integer: " + tagstr)
|
||||
panic("protobuf tag field not an integer: " + tags[1])
|
||||
}
|
||||
var name string
|
||||
for _, tag := range tags {
|
||||
if strings.HasPrefix(tag, "name=") {
|
||||
name = strings.TrimPrefix(tag, "name=")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Find the oneof field that this struct implements.
|
||||
|
@ -380,7 +394,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
|
|||
// That lets us know where this struct should be stored
|
||||
// when we encounter it during unmarshaling.
|
||||
unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
|
||||
u.setTag(tag, of.field, unmarshal, 0)
|
||||
u.setTag(fieldNum, of.field, unmarshal, 0, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -401,7 +415,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
|
|||
// [0 0] is [tag=0/wiretype=varint varint-encoded-0].
|
||||
u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
|
||||
return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
|
||||
}, 0)
|
||||
}, 0, "")
|
||||
|
||||
// Set mask for required field check.
|
||||
u.reqMask = uint64(1)<<uint(len(u.reqFields)) - 1
|
||||
|
@ -413,8 +427,9 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
|
|||
// tag = tag # for field
|
||||
// field/unmarshal = unmarshal info for that field.
|
||||
// reqMask = if required, bitmask for field position in required field list. 0 otherwise.
|
||||
func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64) {
|
||||
i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask}
|
||||
// name = short name of the field.
|
||||
func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64, name string) {
|
||||
i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask, name: name}
|
||||
n := u.typ.NumField()
|
||||
if tag >= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
|
||||
for len(u.dense) <= tag {
|
||||
|
@ -442,11 +457,17 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
|
|||
tagArray := strings.Split(tags, ",")
|
||||
encoding := tagArray[0]
|
||||
name := "unknown"
|
||||
proto3 := false
|
||||
validateUTF8 := true
|
||||
for _, tag := range tagArray[3:] {
|
||||
if strings.HasPrefix(tag, "name=") {
|
||||
name = tag[5:]
|
||||
}
|
||||
if tag == "proto3" {
|
||||
proto3 = true
|
||||
}
|
||||
}
|
||||
validateUTF8 = validateUTF8 && proto3
|
||||
|
||||
// Figure out packaging (pointer, slice, or both)
|
||||
slice := false
|
||||
|
@ -594,6 +615,15 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
|
|||
}
|
||||
return unmarshalBytesValue
|
||||
case reflect.String:
|
||||
if validateUTF8 {
|
||||
if pointer {
|
||||
return unmarshalUTF8StringPtr
|
||||
}
|
||||
if slice {
|
||||
return unmarshalUTF8StringSlice
|
||||
}
|
||||
return unmarshalUTF8StringValue
|
||||
}
|
||||
if pointer {
|
||||
return unmarshalStringPtr
|
||||
}
|
||||
|
@ -1448,9 +1478,6 @@ func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
|
|||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
v := string(b[:x])
|
||||
if !utf8.ValidString(v) {
|
||||
return nil, errInvalidUTF8
|
||||
}
|
||||
*f.toString() = v
|
||||
return b[x:], nil
|
||||
}
|
||||
|
@ -1468,9 +1495,6 @@ func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
|
|||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
v := string(b[:x])
|
||||
if !utf8.ValidString(v) {
|
||||
return nil, errInvalidUTF8
|
||||
}
|
||||
*f.toStringPtr() = &v
|
||||
return b[x:], nil
|
||||
}
|
||||
|
@ -1488,14 +1512,72 @@ func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
|
|||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
v := string(b[:x])
|
||||
if !utf8.ValidString(v) {
|
||||
return nil, errInvalidUTF8
|
||||
}
|
||||
s := f.toStringSlice()
|
||||
*s = append(*s, v)
|
||||
return b[x:], nil
|
||||
}
|
||||
|
||||
func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
|
||||
if w != WireBytes {
|
||||
return b, errInternalBadWireType
|
||||
}
|
||||
x, n := decodeVarint(b)
|
||||
if n == 0 {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
b = b[n:]
|
||||
if x > uint64(len(b)) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
v := string(b[:x])
|
||||
*f.toString() = v
|
||||
if !utf8.ValidString(v) {
|
||||
return b[x:], errInvalidUTF8
|
||||
}
|
||||
return b[x:], nil
|
||||
}
|
||||
|
||||
func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
|
||||
if w != WireBytes {
|
||||
return b, errInternalBadWireType
|
||||
}
|
||||
x, n := decodeVarint(b)
|
||||
if n == 0 {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
b = b[n:]
|
||||
if x > uint64(len(b)) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
v := string(b[:x])
|
||||
*f.toStringPtr() = &v
|
||||
if !utf8.ValidString(v) {
|
||||
return b[x:], errInvalidUTF8
|
||||
}
|
||||
return b[x:], nil
|
||||
}
|
||||
|
||||
func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
|
||||
if w != WireBytes {
|
||||
return b, errInternalBadWireType
|
||||
}
|
||||
x, n := decodeVarint(b)
|
||||
if n == 0 {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
b = b[n:]
|
||||
if x > uint64(len(b)) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
v := string(b[:x])
|
||||
s := f.toStringSlice()
|
||||
*s = append(*s, v)
|
||||
if !utf8.ValidString(v) {
|
||||
return b[x:], errInvalidUTF8
|
||||
}
|
||||
return b[x:], nil
|
||||
}
|
||||
|
||||
var emptyBuf [0]byte
|
||||
|
||||
func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
|
||||
|
@ -1674,6 +1756,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
|
|||
// Maps will be somewhat slow. Oh well.
|
||||
|
||||
// Read key and value from data.
|
||||
var nerr nonFatal
|
||||
k := reflect.New(kt)
|
||||
v := reflect.New(vt)
|
||||
for len(b) > 0 {
|
||||
|
@ -1694,7 +1777,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
|
|||
err = errInternalBadWireType // skip unknown tag
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
if nerr.Merge(err) {
|
||||
continue
|
||||
}
|
||||
if err != errInternalBadWireType {
|
||||
|
@ -1717,7 +1800,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
|
|||
// Insert into map.
|
||||
m.SetMapIndex(k.Elem(), v.Elem())
|
||||
|
||||
return r, nil
|
||||
return r, nerr.E
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1743,15 +1826,16 @@ func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshal
|
|||
// Unmarshal data into holder.
|
||||
// We unmarshal into the first field of the holder object.
|
||||
var err error
|
||||
var nerr nonFatal
|
||||
b, err = unmarshal(b, valToPointer(v).offset(field0), w)
|
||||
if err != nil {
|
||||
if !nerr.Merge(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Write pointer to holder into target field.
|
||||
f.asPointerTo(ityp).Elem().Set(v)
|
||||
|
||||
return b, nil
|
||||
return b, nerr.E
|
||||
}
|
||||
}
|
||||
|
||||
|
|
4
vendor/github.com/golang/protobuf/proto/text.go
generated
vendored
4
vendor/github.com/golang/protobuf/proto/text.go
generated
vendored
|
@ -353,7 +353,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
|
||||
if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
|
@ -370,7 +370,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, val, props.mvalprop); err != nil {
|
||||
if err := tm.writeAny(w, val, props.MapValProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
|
|
6
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
6
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
|
@ -630,17 +630,17 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
|||
if err := p.consumeToken(":"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.readAny(key, props.mkeyprop); err != nil {
|
||||
if err := p.readAny(key, props.MapKeyProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
case "value":
|
||||
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
|
||||
if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.readAny(val, props.mvalprop); err != nil {
|
||||
if err := p.readAny(val, props.MapValProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
|
|
19
vendor/github.com/gorilla/context/.travis.yml
generated
vendored
19
vendor/github.com/gorilla/context/.travis.yml
generated
vendored
|
@ -1,19 +0,0 @@
|
|||
language: go
|
||||
sudo: false
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.3
|
||||
- go: 1.4
|
||||
- go: 1.5
|
||||
- go: 1.6
|
||||
- go: 1.7
|
||||
- go: tip
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
script:
|
||||
- go get -t -v ./...
|
||||
- diff -u <(echo -n) <(gofmt -d .)
|
||||
- go vet $(go list ./... | grep -v /vendor/)
|
||||
- go test -v -race ./...
|
10
vendor/github.com/gorilla/context/README.md
generated
vendored
10
vendor/github.com/gorilla/context/README.md
generated
vendored
|
@ -1,10 +0,0 @@
|
|||
context
|
||||
=======
|
||||
[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context)
|
||||
|
||||
gorilla/context is a general purpose registry for global request variables.
|
||||
|
||||
> Note: gorilla/context, having been born well before `context.Context` existed, does not play well
|
||||
> with the shallow copying of the request that [`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) (added to net/http Go 1.7 onwards) performs. You should either use *just* gorilla/context, or moving forward, the new `http.Request.Context()`.
|
||||
|
||||
Read the full documentation here: http://www.gorillatoolkit.org/pkg/context
|
23
vendor/github.com/gorilla/mux/.travis.yml
generated
vendored
23
vendor/github.com/gorilla/mux/.travis.yml
generated
vendored
|
@ -1,23 +0,0 @@
|
|||
language: go
|
||||
sudo: false
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.5.x
|
||||
- go: 1.6.x
|
||||
- go: 1.7.x
|
||||
- go: 1.8.x
|
||||
- go: 1.9.x
|
||||
- go: 1.10.x
|
||||
- go: tip
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
install:
|
||||
- # Skip
|
||||
|
||||
script:
|
||||
- go get -t -v ./...
|
||||
- diff -u <(echo -n) <(gofmt -d .)
|
||||
- go tool vet .
|
||||
- go test -v -race ./...
|
11
vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md
generated
vendored
11
vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md
generated
vendored
|
@ -1,11 +0,0 @@
|
|||
**What version of Go are you running?** (Paste the output of `go version`)
|
||||
|
||||
|
||||
**What version of gorilla/mux are you at?** (Paste the output of `git rev-parse HEAD` inside `$GOPATH/src/github.com/gorilla/mux`)
|
||||
|
||||
|
||||
**Describe your problem** (and what you have tried so far)
|
||||
|
||||
|
||||
**Paste a minimal, runnable, reproduction of your issue below** (use backticks to format it)
|
||||
|
649
vendor/github.com/gorilla/mux/README.md
generated
vendored
649
vendor/github.com/gorilla/mux/README.md
generated
vendored
|
@ -1,649 +0,0 @@
|
|||
# gorilla/mux
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
|
||||
[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux)
|
||||
[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge)
|
||||
|
||||
![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png)
|
||||
|
||||
http://www.gorillatoolkit.org/pkg/mux
|
||||
|
||||
Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to
|
||||
their respective handler.
|
||||
|
||||
The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are:
|
||||
|
||||
* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`.
|
||||
* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers.
|
||||
* URL hosts, paths and query values can have variables with an optional regular expression.
|
||||
* Registered URLs can be built, or "reversed", which helps maintaining references to resources.
|
||||
* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching.
|
||||
|
||||
---
|
||||
|
||||
* [Install](#install)
|
||||
* [Examples](#examples)
|
||||
* [Matching Routes](#matching-routes)
|
||||
* [Static Files](#static-files)
|
||||
* [Registered URLs](#registered-urls)
|
||||
* [Walking Routes](#walking-routes)
|
||||
* [Graceful Shutdown](#graceful-shutdown)
|
||||
* [Middleware](#middleware)
|
||||
* [Testing Handlers](#testing-handlers)
|
||||
* [Full Example](#full-example)
|
||||
|
||||
---
|
||||
|
||||
## Install
|
||||
|
||||
With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain:
|
||||
|
||||
```sh
|
||||
go get -u github.com/gorilla/mux
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
Let's start registering a couple of URL paths and handlers:
|
||||
|
||||
```go
|
||||
func main() {
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/", HomeHandler)
|
||||
r.HandleFunc("/products", ProductsHandler)
|
||||
r.HandleFunc("/articles", ArticlesHandler)
|
||||
http.Handle("/", r)
|
||||
}
|
||||
```
|
||||
|
||||
Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters.
|
||||
|
||||
Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example:
|
||||
|
||||
```go
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/products/{key}", ProductHandler)
|
||||
r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
|
||||
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
|
||||
```
|
||||
|
||||
The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`:
|
||||
|
||||
```go
|
||||
func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprintf(w, "Category: %v\n", vars["category"])
|
||||
}
|
||||
```
|
||||
|
||||
And this is all you need to know about the basic usage. More advanced options are explained below.
|
||||
|
||||
### Matching Routes
|
||||
|
||||
Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables:
|
||||
|
||||
```go
|
||||
r := mux.NewRouter()
|
||||
// Only matches if domain is "www.example.com".
|
||||
r.Host("www.example.com")
|
||||
// Matches a dynamic subdomain.
|
||||
r.Host("{subdomain:[a-z]+}.domain.com")
|
||||
```
|
||||
|
||||
There are several other matchers that can be added. To match path prefixes:
|
||||
|
||||
```go
|
||||
r.PathPrefix("/products/")
|
||||
```
|
||||
|
||||
...or HTTP methods:
|
||||
|
||||
```go
|
||||
r.Methods("GET", "POST")
|
||||
```
|
||||
|
||||
...or URL schemes:
|
||||
|
||||
```go
|
||||
r.Schemes("https")
|
||||
```
|
||||
|
||||
...or header values:
|
||||
|
||||
```go
|
||||
r.Headers("X-Requested-With", "XMLHttpRequest")
|
||||
```
|
||||
|
||||
...or query values:
|
||||
|
||||
```go
|
||||
r.Queries("key", "value")
|
||||
```
|
||||
|
||||
...or to use a custom matcher function:
|
||||
|
||||
```go
|
||||
r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
|
||||
return r.ProtoMajor == 0
|
||||
})
|
||||
```
|
||||
|
||||
...and finally, it is possible to combine several matchers in a single route:
|
||||
|
||||
```go
|
||||
r.HandleFunc("/products", ProductsHandler).
|
||||
Host("www.example.com").
|
||||
Methods("GET").
|
||||
Schemes("http")
|
||||
```
|
||||
|
||||
Routes are tested in the order they were added to the router. If two routes match, the first one wins:
|
||||
|
||||
```go
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/specific", specificHandler)
|
||||
r.PathPrefix("/").Handler(catchAllHandler)
|
||||
```
|
||||
|
||||
Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting".
|
||||
|
||||
For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it:
|
||||
|
||||
```go
|
||||
r := mux.NewRouter()
|
||||
s := r.Host("www.example.com").Subrouter()
|
||||
```
|
||||
|
||||
Then register routes in the subrouter:
|
||||
|
||||
```go
|
||||
s.HandleFunc("/products/", ProductsHandler)
|
||||
s.HandleFunc("/products/{key}", ProductHandler)
|
||||
s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
|
||||
```
|
||||
|
||||
The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route.
|
||||
|
||||
Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter.
|
||||
|
||||
There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths:
|
||||
|
||||
```go
|
||||
r := mux.NewRouter()
|
||||
s := r.PathPrefix("/products").Subrouter()
|
||||
// "/products/"
|
||||
s.HandleFunc("/", ProductsHandler)
|
||||
// "/products/{key}/"
|
||||
s.HandleFunc("/{key}/", ProductHandler)
|
||||
// "/products/{key}/details"
|
||||
s.HandleFunc("/{key}/details", ProductDetailsHandler)
|
||||
```
|
||||
|
||||
|
||||
### Static Files
|
||||
|
||||
Note that the path provided to `PathPrefix()` represents a "wildcard": calling
|
||||
`PathPrefix("/static/").Handler(...)` means that the handler will be passed any
|
||||
request that matches "/static/\*". This makes it easy to serve static files with mux:
|
||||
|
||||
```go
|
||||
func main() {
|
||||
var dir string
|
||||
|
||||
flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
|
||||
flag.Parse()
|
||||
r := mux.NewRouter()
|
||||
|
||||
// This will serve files under http://localhost:8000/static/<filename>
|
||||
r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
|
||||
|
||||
srv := &http.Server{
|
||||
Handler: r,
|
||||
Addr: "127.0.0.1:8000",
|
||||
// Good practice: enforce timeouts for servers you create!
|
||||
WriteTimeout: 15 * time.Second,
|
||||
ReadTimeout: 15 * time.Second,
|
||||
}
|
||||
|
||||
log.Fatal(srv.ListenAndServe())
|
||||
}
|
||||
```
|
||||
|
||||
### Registered URLs
|
||||
|
||||
Now let's see how to build registered URLs.
|
||||
|
||||
Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example:
|
||||
|
||||
```go
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
|
||||
Name("article")
|
||||
```
|
||||
|
||||
To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do:
|
||||
|
||||
```go
|
||||
url, err := r.Get("article").URL("category", "technology", "id", "42")
|
||||
```
|
||||
|
||||
...and the result will be a `url.URL` with the following path:
|
||||
|
||||
```
|
||||
"/articles/technology/42"
|
||||
```
|
||||
|
||||
This also works for host and query value variables:
|
||||
|
||||
```go
|
||||
r := mux.NewRouter()
|
||||
r.Host("{subdomain}.domain.com").
|
||||
Path("/articles/{category}/{id:[0-9]+}").
|
||||
Queries("filter", "{filter}").
|
||||
HandlerFunc(ArticleHandler).
|
||||
Name("article")
|
||||
|
||||
// url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla"
|
||||
url, err := r.Get("article").URL("subdomain", "news",
|
||||
"category", "technology",
|
||||
"id", "42",
|
||||
"filter", "gorilla")
|
||||
```
|
||||
|
||||
All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match.
|
||||
|
||||
Regex support also exists for matching Headers within a route. For example, we could do:
|
||||
|
||||
```go
|
||||
r.HeadersRegexp("Content-Type", "application/(text|json)")
|
||||
```
|
||||
|
||||
...and the route will match both requests with a Content-Type of `application/json` as well as `application/text`
|
||||
|
||||
There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do:
|
||||
|
||||
```go
|
||||
// "http://news.domain.com/"
|
||||
host, err := r.Get("article").URLHost("subdomain", "news")
|
||||
|
||||
// "/articles/technology/42"
|
||||
path, err := r.Get("article").URLPath("category", "technology", "id", "42")
|
||||
```
|
||||
|
||||
And if you use subrouters, host and path defined separately can be built as well:
|
||||
|
||||
```go
|
||||
r := mux.NewRouter()
|
||||
s := r.Host("{subdomain}.domain.com").Subrouter()
|
||||
s.Path("/articles/{category}/{id:[0-9]+}").
|
||||
HandlerFunc(ArticleHandler).
|
||||
Name("article")
|
||||
|
||||
// "http://news.domain.com/articles/technology/42"
|
||||
url, err := r.Get("article").URL("subdomain", "news",
|
||||
"category", "technology",
|
||||
"id", "42")
|
||||
```
|
||||
|
||||
### Walking Routes
|
||||
|
||||
The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example,
|
||||
the following prints all of the registered routes:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
func handler(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
func main() {
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/", handler)
|
||||
r.HandleFunc("/products", handler).Methods("POST")
|
||||
r.HandleFunc("/articles", handler).Methods("GET")
|
||||
r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT")
|
||||
r.HandleFunc("/authors", handler).Queries("surname", "{surname}")
|
||||
err := r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
|
||||
pathTemplate, err := route.GetPathTemplate()
|
||||
if err == nil {
|
||||
fmt.Println("ROUTE:", pathTemplate)
|
||||
}
|
||||
pathRegexp, err := route.GetPathRegexp()
|
||||
if err == nil {
|
||||
fmt.Println("Path regexp:", pathRegexp)
|
||||
}
|
||||
queriesTemplates, err := route.GetQueriesTemplates()
|
||||
if err == nil {
|
||||
fmt.Println("Queries templates:", strings.Join(queriesTemplates, ","))
|
||||
}
|
||||
queriesRegexps, err := route.GetQueriesRegexp()
|
||||
if err == nil {
|
||||
fmt.Println("Queries regexps:", strings.Join(queriesRegexps, ","))
|
||||
}
|
||||
methods, err := route.GetMethods()
|
||||
if err == nil {
|
||||
fmt.Println("Methods:", strings.Join(methods, ","))
|
||||
}
|
||||
fmt.Println()
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
|
||||
http.Handle("/", r)
|
||||
}
|
||||
```
|
||||
|
||||
### Graceful Shutdown
|
||||
|
||||
Go 1.8 introduced the ability to [gracefully shutdown](https://golang.org/doc/go1.8#http_shutdown) a `*http.Server`. Here's how to do that alongside `mux`:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var wait time.Duration
|
||||
flag.DurationVar(&wait, "graceful-timeout", time.Second * 15, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m")
|
||||
flag.Parse()
|
||||
|
||||
r := mux.NewRouter()
|
||||
// Add your routes as needed
|
||||
|
||||
srv := &http.Server{
|
||||
Addr: "0.0.0.0:8080",
|
||||
// Good practice to set timeouts to avoid Slowloris attacks.
|
||||
WriteTimeout: time.Second * 15,
|
||||
ReadTimeout: time.Second * 15,
|
||||
IdleTimeout: time.Second * 60,
|
||||
Handler: r, // Pass our instance of gorilla/mux in.
|
||||
}
|
||||
|
||||
// Run our server in a goroutine so that it doesn't block.
|
||||
go func() {
|
||||
if err := srv.ListenAndServe(); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}()
|
||||
|
||||
c := make(chan os.Signal, 1)
|
||||
// We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C)
|
||||
// SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught.
|
||||
signal.Notify(c, os.Interrupt)
|
||||
|
||||
// Block until we receive our signal.
|
||||
<-c
|
||||
|
||||
// Create a deadline to wait for.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), wait)
|
||||
defer cancel()
|
||||
// Doesn't block if no connections, but will otherwise wait
|
||||
// until the timeout deadline.
|
||||
srv.Shutdown(ctx)
|
||||
// Optionally, you could run srv.Shutdown in a goroutine and block on
|
||||
// <-ctx.Done() if your application should wait for other services
|
||||
// to finalize based on context cancellation.
|
||||
log.Println("shutting down")
|
||||
os.Exit(0)
|
||||
}
|
||||
```
|
||||
|
||||
### Middleware
|
||||
|
||||
Mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed in the order they are added if a match is found, including its subrouters.
|
||||
Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or `ResponseWriter` hijacking.
|
||||
|
||||
Mux middlewares are defined using the de facto standard type:
|
||||
|
||||
```go
|
||||
type MiddlewareFunc func(http.Handler) http.Handler
|
||||
```
|
||||
|
||||
Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc. This takes advantage of closures being able access variables from the context where they are created, while retaining the signature enforced by the receivers.
|
||||
|
||||
A very basic middleware which logs the URI of the request being handled could be written as:
|
||||
|
||||
```go
|
||||
func loggingMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Do stuff here
|
||||
log.Println(r.RequestURI)
|
||||
// Call the next handler, which can be another middleware in the chain, or the final handler.
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
Middlewares can be added to a router using `Router.Use()`:
|
||||
|
||||
```go
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/", handler)
|
||||
r.Use(loggingMiddleware)
|
||||
```
|
||||
|
||||
A more complex authentication middleware, which maps session token to users, could be written as:
|
||||
|
||||
```go
|
||||
// Define our struct
|
||||
type authenticationMiddleware struct {
|
||||
tokenUsers map[string]string
|
||||
}
|
||||
|
||||
// Initialize it somewhere
|
||||
func (amw *authenticationMiddleware) Populate() {
|
||||
amw.tokenUsers["00000000"] = "user0"
|
||||
amw.tokenUsers["aaaaaaaa"] = "userA"
|
||||
amw.tokenUsers["05f717e5"] = "randomUser"
|
||||
amw.tokenUsers["deadbeef"] = "user0"
|
||||
}
|
||||
|
||||
// Middleware function, which will be called for each request
|
||||
func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
token := r.Header.Get("X-Session-Token")
|
||||
|
||||
if user, found := amw.tokenUsers[token]; found {
|
||||
// We found the token in our map
|
||||
log.Printf("Authenticated user %s\n", user)
|
||||
// Pass down the request to the next middleware (or final handler)
|
||||
next.ServeHTTP(w, r)
|
||||
} else {
|
||||
// Write an error and stop the handler chain
|
||||
http.Error(w, "Forbidden", http.StatusForbidden)
|
||||
}
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
```go
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/", handler)
|
||||
|
||||
amw := authenticationMiddleware{}
|
||||
amw.Populate()
|
||||
|
||||
r.Use(amw.Middleware)
|
||||
```
|
||||
|
||||
Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it.
|
||||
|
||||
### Testing Handlers
|
||||
|
||||
Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_.
|
||||
|
||||
First, our simple HTTP handler:
|
||||
|
||||
```go
|
||||
// endpoints.go
|
||||
package main
|
||||
|
||||
func HealthCheckHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// A very simple health check.
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
// In the future we could report back on the status of our DB, or our cache
|
||||
// (e.g. Redis) by performing a simple PING, and include them in the response.
|
||||
io.WriteString(w, `{"alive": true}`)
|
||||
}
|
||||
|
||||
func main() {
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/health", HealthCheckHandler)
|
||||
|
||||
log.Fatal(http.ListenAndServe("localhost:8080", r))
|
||||
}
|
||||
```
|
||||
|
||||
Our test code:
|
||||
|
||||
```go
|
||||
// endpoints_test.go
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestHealthCheckHandler(t *testing.T) {
|
||||
// Create a request to pass to our handler. We don't have any query parameters for now, so we'll
|
||||
// pass 'nil' as the third parameter.
|
||||
req, err := http.NewRequest("GET", "/health", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response.
|
||||
rr := httptest.NewRecorder()
|
||||
handler := http.HandlerFunc(HealthCheckHandler)
|
||||
|
||||
// Our handlers satisfy http.Handler, so we can call their ServeHTTP method
|
||||
// directly and pass in our Request and ResponseRecorder.
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
// Check the status code is what we expect.
|
||||
if status := rr.Code; status != http.StatusOK {
|
||||
t.Errorf("handler returned wrong status code: got %v want %v",
|
||||
status, http.StatusOK)
|
||||
}
|
||||
|
||||
// Check the response body is what we expect.
|
||||
expected := `{"alive": true}`
|
||||
if rr.Body.String() != expected {
|
||||
t.Errorf("handler returned unexpected body: got %v want %v",
|
||||
rr.Body.String(), expected)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
In the case that our routes have [variables](#examples), we can pass those in the request. We could write
|
||||
[table-driven tests](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) to test multiple
|
||||
possible route variables as needed.
|
||||
|
||||
```go
|
||||
// endpoints.go
|
||||
func main() {
|
||||
r := mux.NewRouter()
|
||||
// A route with a route variable:
|
||||
r.HandleFunc("/metrics/{type}", MetricsHandler)
|
||||
|
||||
log.Fatal(http.ListenAndServe("localhost:8080", r))
|
||||
}
|
||||
```
|
||||
|
||||
Our test file, with a table-driven test of `routeVariables`:
|
||||
|
||||
```go
|
||||
// endpoints_test.go
|
||||
func TestMetricsHandler(t *testing.T) {
|
||||
tt := []struct{
|
||||
routeVariable string
|
||||
shouldPass bool
|
||||
}{
|
||||
{"goroutines", true},
|
||||
{"heap", true},
|
||||
{"counters", true},
|
||||
{"queries", true},
|
||||
{"adhadaeqm3k", false},
|
||||
}
|
||||
|
||||
for _, tc := range tt {
|
||||
path := fmt.Sprintf("/metrics/%s", tc.routeVariable)
|
||||
req, err := http.NewRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
// Need to create a router that we can pass the request through so that the vars will be added to the context
|
||||
router := mux.NewRouter()
|
||||
router.HandleFunc("/metrics/{type}", MetricsHandler)
|
||||
router.ServeHTTP(rr, req)
|
||||
|
||||
// In this case, our MetricsHandler returns a non-200 response
|
||||
// for a route variable it doesn't know about.
|
||||
if rr.Code == http.StatusOK && !tc.shouldPass {
|
||||
t.Errorf("handler should have failed on routeVariable %s: got %v want %v",
|
||||
tc.routeVariable, rr.Code, http.StatusOK)
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Full Example
|
||||
|
||||
Here's a complete, runnable example of a small `mux` based server:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"log"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
func YourHandler(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte("Gorilla!\n"))
|
||||
}
|
||||
|
||||
func main() {
|
||||
r := mux.NewRouter()
|
||||
// Routes consist of a path and a handler function.
|
||||
r.HandleFunc("/", YourHandler)
|
||||
|
||||
// Bind to a port and pass our router in
|
||||
log.Fatal(http.ListenAndServe(":8000", r))
|
||||
}
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
BSD licensed. See the LICENSE file for details.
|
1
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
generated
vendored
1
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
generated
vendored
|
@ -1 +0,0 @@
|
|||
cover.dat
|
7
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
generated
vendored
7
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
generated
vendored
|
@ -1,7 +0,0 @@
|
|||
all:
|
||||
|
||||
cover:
|
||||
go test -cover -v -coverprofile=cover.dat ./...
|
||||
go tool cover -func cover.dat
|
||||
|
||||
.PHONY: cover
|
23
vendor/github.com/pkg/errors/LICENSE
generated
vendored
Normal file
23
vendor/github.com/pkg/errors/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
Copyright (c) 2015, Dave Cheney <dave@cheney.net>
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
269
vendor/github.com/pkg/errors/errors.go
generated
vendored
Normal file
269
vendor/github.com/pkg/errors/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,269 @@
|
|||
// Package errors provides simple error handling primitives.
|
||||
//
|
||||
// The traditional error handling idiom in Go is roughly akin to
|
||||
//
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// which applied recursively up the call stack results in error reports
|
||||
// without context or debugging information. The errors package allows
|
||||
// programmers to add context to the failure path in their code in a way
|
||||
// that does not destroy the original value of the error.
|
||||
//
|
||||
// Adding context to an error
|
||||
//
|
||||
// The errors.Wrap function returns a new error that adds context to the
|
||||
// original error by recording a stack trace at the point Wrap is called,
|
||||
// and the supplied message. For example
|
||||
//
|
||||
// _, err := ioutil.ReadAll(r)
|
||||
// if err != nil {
|
||||
// return errors.Wrap(err, "read failed")
|
||||
// }
|
||||
//
|
||||
// If additional control is required the errors.WithStack and errors.WithMessage
|
||||
// functions destructure errors.Wrap into its component operations of annotating
|
||||
// an error with a stack trace and an a message, respectively.
|
||||
//
|
||||
// Retrieving the cause of an error
|
||||
//
|
||||
// Using errors.Wrap constructs a stack of errors, adding context to the
|
||||
// preceding error. Depending on the nature of the error it may be necessary
|
||||
// to reverse the operation of errors.Wrap to retrieve the original error
|
||||
// for inspection. Any error value which implements this interface
|
||||
//
|
||||
// type causer interface {
|
||||
// Cause() error
|
||||
// }
|
||||
//
|
||||
// can be inspected by errors.Cause. errors.Cause will recursively retrieve
|
||||
// the topmost error which does not implement causer, which is assumed to be
|
||||
// the original cause. For example:
|
||||
//
|
||||
// switch err := errors.Cause(err).(type) {
|
||||
// case *MyError:
|
||||
// // handle specifically
|
||||
// default:
|
||||
// // unknown error
|
||||
// }
|
||||
//
|
||||
// causer interface is not exported by this package, but is considered a part
|
||||
// of stable public API.
|
||||
//
|
||||
// Formatted printing of errors
|
||||
//
|
||||
// All error values returned from this package implement fmt.Formatter and can
|
||||
// be formatted by the fmt package. The following verbs are supported
|
||||
//
|
||||
// %s print the error. If the error has a Cause it will be
|
||||
// printed recursively
|
||||
// %v see %s
|
||||
// %+v extended format. Each Frame of the error's StackTrace will
|
||||
// be printed in detail.
|
||||
//
|
||||
// Retrieving the stack trace of an error or wrapper
|
||||
//
|
||||
// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
|
||||
// invoked. This information can be retrieved with the following interface.
|
||||
//
|
||||
// type stackTracer interface {
|
||||
// StackTrace() errors.StackTrace
|
||||
// }
|
||||
//
|
||||
// Where errors.StackTrace is defined as
|
||||
//
|
||||
// type StackTrace []Frame
|
||||
//
|
||||
// The Frame type represents a call site in the stack trace. Frame supports
|
||||
// the fmt.Formatter interface that can be used for printing information about
|
||||
// the stack trace of this error. For example:
|
||||
//
|
||||
// if err, ok := err.(stackTracer); ok {
|
||||
// for _, f := range err.StackTrace() {
|
||||
// fmt.Printf("%+s:%d", f)
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// stackTracer interface is not exported by this package, but is considered a part
|
||||
// of stable public API.
|
||||
//
|
||||
// See the documentation for Frame.Format for more details.
|
||||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// New returns an error with the supplied message.
|
||||
// New also records the stack trace at the point it was called.
|
||||
func New(message string) error {
|
||||
return &fundamental{
|
||||
msg: message,
|
||||
stack: callers(),
|
||||
}
|
||||
}
|
||||
|
||||
// Errorf formats according to a format specifier and returns the string
|
||||
// as a value that satisfies error.
|
||||
// Errorf also records the stack trace at the point it was called.
|
||||
func Errorf(format string, args ...interface{}) error {
|
||||
return &fundamental{
|
||||
msg: fmt.Sprintf(format, args...),
|
||||
stack: callers(),
|
||||
}
|
||||
}
|
||||
|
||||
// fundamental is an error that has a message and a stack, but no caller.
|
||||
type fundamental struct {
|
||||
msg string
|
||||
*stack
|
||||
}
|
||||
|
||||
func (f *fundamental) Error() string { return f.msg }
|
||||
|
||||
func (f *fundamental) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
if s.Flag('+') {
|
||||
io.WriteString(s, f.msg)
|
||||
f.stack.Format(s, verb)
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case 's':
|
||||
io.WriteString(s, f.msg)
|
||||
case 'q':
|
||||
fmt.Fprintf(s, "%q", f.msg)
|
||||
}
|
||||
}
|
||||
|
||||
// WithStack annotates err with a stack trace at the point WithStack was called.
|
||||
// If err is nil, WithStack returns nil.
|
||||
func WithStack(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &withStack{
|
||||
err,
|
||||
callers(),
|
||||
}
|
||||
}
|
||||
|
||||
type withStack struct {
|
||||
error
|
||||
*stack
|
||||
}
|
||||
|
||||
func (w *withStack) Cause() error { return w.error }
|
||||
|
||||
func (w *withStack) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
if s.Flag('+') {
|
||||
fmt.Fprintf(s, "%+v", w.Cause())
|
||||
w.stack.Format(s, verb)
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case 's':
|
||||
io.WriteString(s, w.Error())
|
||||
case 'q':
|
||||
fmt.Fprintf(s, "%q", w.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Wrap returns an error annotating err with a stack trace
|
||||
// at the point Wrap is called, and the supplied message.
|
||||
// If err is nil, Wrap returns nil.
|
||||
func Wrap(err error, message string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
err = &withMessage{
|
||||
cause: err,
|
||||
msg: message,
|
||||
}
|
||||
return &withStack{
|
||||
err,
|
||||
callers(),
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapf returns an error annotating err with a stack trace
|
||||
// at the point Wrapf is call, and the format specifier.
|
||||
// If err is nil, Wrapf returns nil.
|
||||
func Wrapf(err error, format string, args ...interface{}) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
err = &withMessage{
|
||||
cause: err,
|
||||
msg: fmt.Sprintf(format, args...),
|
||||
}
|
||||
return &withStack{
|
||||
err,
|
||||
callers(),
|
||||
}
|
||||
}
|
||||
|
||||
// WithMessage annotates err with a new message.
|
||||
// If err is nil, WithMessage returns nil.
|
||||
func WithMessage(err error, message string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &withMessage{
|
||||
cause: err,
|
||||
msg: message,
|
||||
}
|
||||
}
|
||||
|
||||
type withMessage struct {
|
||||
cause error
|
||||
msg string
|
||||
}
|
||||
|
||||
func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
|
||||
func (w *withMessage) Cause() error { return w.cause }
|
||||
|
||||
func (w *withMessage) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
if s.Flag('+') {
|
||||
fmt.Fprintf(s, "%+v\n", w.Cause())
|
||||
io.WriteString(s, w.msg)
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case 's', 'q':
|
||||
io.WriteString(s, w.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Cause returns the underlying cause of the error, if possible.
|
||||
// An error value has a cause if it implements the following
|
||||
// interface:
|
||||
//
|
||||
// type causer interface {
|
||||
// Cause() error
|
||||
// }
|
||||
//
|
||||
// If the error does not implement Cause, the original error will
|
||||
// be returned. If the error is nil, nil will be returned without further
|
||||
// investigation.
|
||||
func Cause(err error) error {
|
||||
type causer interface {
|
||||
Cause() error
|
||||
}
|
||||
|
||||
for err != nil {
|
||||
cause, ok := err.(causer)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
err = cause.Cause()
|
||||
}
|
||||
return err
|
||||
}
|
178
vendor/github.com/pkg/errors/stack.go
generated
vendored
Normal file
178
vendor/github.com/pkg/errors/stack.go
generated
vendored
Normal file
|
@ -0,0 +1,178 @@
|
|||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Frame represents a program counter inside a stack frame.
|
||||
type Frame uintptr
|
||||
|
||||
// pc returns the program counter for this frame;
|
||||
// multiple frames may have the same PC value.
|
||||
func (f Frame) pc() uintptr { return uintptr(f) - 1 }
|
||||
|
||||
// file returns the full path to the file that contains the
|
||||
// function for this Frame's pc.
|
||||
func (f Frame) file() string {
|
||||
fn := runtime.FuncForPC(f.pc())
|
||||
if fn == nil {
|
||||
return "unknown"
|
||||
}
|
||||
file, _ := fn.FileLine(f.pc())
|
||||
return file
|
||||
}
|
||||
|
||||
// line returns the line number of source code of the
|
||||
// function for this Frame's pc.
|
||||
func (f Frame) line() int {
|
||||
fn := runtime.FuncForPC(f.pc())
|
||||
if fn == nil {
|
||||
return 0
|
||||
}
|
||||
_, line := fn.FileLine(f.pc())
|
||||
return line
|
||||
}
|
||||
|
||||
// Format formats the frame according to the fmt.Formatter interface.
|
||||
//
|
||||
// %s source file
|
||||
// %d source line
|
||||
// %n function name
|
||||
// %v equivalent to %s:%d
|
||||
//
|
||||
// Format accepts flags that alter the printing of some verbs, as follows:
|
||||
//
|
||||
// %+s path of source file relative to the compile time GOPATH
|
||||
// %+v equivalent to %+s:%d
|
||||
func (f Frame) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 's':
|
||||
switch {
|
||||
case s.Flag('+'):
|
||||
pc := f.pc()
|
||||
fn := runtime.FuncForPC(pc)
|
||||
if fn == nil {
|
||||
io.WriteString(s, "unknown")
|
||||
} else {
|
||||
file, _ := fn.FileLine(pc)
|
||||
fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file)
|
||||
}
|
||||
default:
|
||||
io.WriteString(s, path.Base(f.file()))
|
||||
}
|
||||
case 'd':
|
||||
fmt.Fprintf(s, "%d", f.line())
|
||||
case 'n':
|
||||
name := runtime.FuncForPC(f.pc()).Name()
|
||||
io.WriteString(s, funcname(name))
|
||||
case 'v':
|
||||
f.Format(s, 's')
|
||||
io.WriteString(s, ":")
|
||||
f.Format(s, 'd')
|
||||
}
|
||||
}
|
||||
|
||||
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
|
||||
type StackTrace []Frame
|
||||
|
||||
func (st StackTrace) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
switch {
|
||||
case s.Flag('+'):
|
||||
for _, f := range st {
|
||||
fmt.Fprintf(s, "\n%+v", f)
|
||||
}
|
||||
case s.Flag('#'):
|
||||
fmt.Fprintf(s, "%#v", []Frame(st))
|
||||
default:
|
||||
fmt.Fprintf(s, "%v", []Frame(st))
|
||||
}
|
||||
case 's':
|
||||
fmt.Fprintf(s, "%s", []Frame(st))
|
||||
}
|
||||
}
|
||||
|
||||
// stack represents a stack of program counters.
|
||||
type stack []uintptr
|
||||
|
||||
func (s *stack) Format(st fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
switch {
|
||||
case st.Flag('+'):
|
||||
for _, pc := range *s {
|
||||
f := Frame(pc)
|
||||
fmt.Fprintf(st, "\n%+v", f)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stack) StackTrace() StackTrace {
|
||||
f := make([]Frame, len(*s))
|
||||
for i := 0; i < len(f); i++ {
|
||||
f[i] = Frame((*s)[i])
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func callers() *stack {
|
||||
const depth = 32
|
||||
var pcs [depth]uintptr
|
||||
n := runtime.Callers(3, pcs[:])
|
||||
var st stack = pcs[0:n]
|
||||
return &st
|
||||
}
|
||||
|
||||
// funcname removes the path prefix component of a function's name reported by func.Name().
|
||||
func funcname(name string) string {
|
||||
i := strings.LastIndex(name, "/")
|
||||
name = name[i+1:]
|
||||
i = strings.Index(name, ".")
|
||||
return name[i+1:]
|
||||
}
|
||||
|
||||
func trimGOPATH(name, file string) string {
|
||||
// Here we want to get the source file path relative to the compile time
|
||||
// GOPATH. As of Go 1.6.x there is no direct way to know the compiled
|
||||
// GOPATH at runtime, but we can infer the number of path segments in the
|
||||
// GOPATH. We note that fn.Name() returns the function name qualified by
|
||||
// the import path, which does not include the GOPATH. Thus we can trim
|
||||
// segments from the beginning of the file path until the number of path
|
||||
// separators remaining is one more than the number of path separators in
|
||||
// the function name. For example, given:
|
||||
//
|
||||
// GOPATH /home/user
|
||||
// file /home/user/src/pkg/sub/file.go
|
||||
// fn.Name() pkg/sub.Type.Method
|
||||
//
|
||||
// We want to produce:
|
||||
//
|
||||
// pkg/sub/file.go
|
||||
//
|
||||
// From this we can easily see that fn.Name() has one less path separator
|
||||
// than our desired output. We count separators from the end of the file
|
||||
// path until it finds two more than in the function name and then move
|
||||
// one character forward to preserve the initial path segment without a
|
||||
// leading separator.
|
||||
const sep = "/"
|
||||
goal := strings.Count(name, sep) + 2
|
||||
i := len(file)
|
||||
for n := 0; n < goal; n++ {
|
||||
i = strings.LastIndex(file[:i], sep)
|
||||
if i == -1 {
|
||||
// not enough separators found, set i so that the slice expression
|
||||
// below leaves file unmodified
|
||||
i = -len(sep)
|
||||
break
|
||||
}
|
||||
}
|
||||
// get back to 0 or trim the leading separator
|
||||
file = file[i+len(sep):]
|
||||
return file
|
||||
}
|
1
vendor/github.com/prometheus/client_golang/prometheus/.gitignore
generated
vendored
1
vendor/github.com/prometheus/client_golang/prometheus/.gitignore
generated
vendored
|
@ -1 +0,0 @@
|
|||
command-line-arguments.test
|
1
vendor/github.com/prometheus/client_golang/prometheus/README.md
generated
vendored
1
vendor/github.com/prometheus/client_golang/prometheus/README.md
generated
vendored
|
@ -1 +0,0 @@
|
|||
See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).
|
321
vendor/github.com/prometheus/client_model/go/metrics.pb.go
generated
vendored
321
vendor/github.com/prometheus/client_model/go/metrics.pb.go
generated
vendored
|
@ -1,34 +1,23 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: metrics.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package io_prometheus_client is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
metrics.proto
|
||||
|
||||
It has these top-level messages:
|
||||
LabelPair
|
||||
Gauge
|
||||
Counter
|
||||
Quantile
|
||||
Summary
|
||||
Untyped
|
||||
Histogram
|
||||
Bucket
|
||||
Metric
|
||||
MetricFamily
|
||||
*/
|
||||
package io_prometheus_client
|
||||
package io_prometheus_client // import "github.com/prometheus/client_model/go"
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type MetricType int32
|
||||
|
||||
const (
|
||||
|
@ -70,16 +59,41 @@ func (x *MetricType) UnmarshalJSON(data []byte) error {
|
|||
*x = MetricType(value)
|
||||
return nil
|
||||
}
|
||||
func (MetricType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
|
||||
}
|
||||
|
||||
type LabelPair struct {
|
||||
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *LabelPair) Reset() { *m = LabelPair{} }
|
||||
func (m *LabelPair) String() string { return proto.CompactTextString(m) }
|
||||
func (*LabelPair) ProtoMessage() {}
|
||||
func (*LabelPair) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
|
||||
}
|
||||
func (m *LabelPair) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_LabelPair.Unmarshal(m, b)
|
||||
}
|
||||
func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *LabelPair) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_LabelPair.Merge(dst, src)
|
||||
}
|
||||
func (m *LabelPair) XXX_Size() int {
|
||||
return xxx_messageInfo_LabelPair.Size(m)
|
||||
}
|
||||
func (m *LabelPair) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_LabelPair.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_LabelPair proto.InternalMessageInfo
|
||||
|
||||
func (m *LabelPair) GetName() string {
|
||||
if m != nil && m.Name != nil {
|
||||
|
@ -97,12 +111,34 @@ func (m *LabelPair) GetValue() string {
|
|||
|
||||
type Gauge struct {
|
||||
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Gauge) Reset() { *m = Gauge{} }
|
||||
func (m *Gauge) String() string { return proto.CompactTextString(m) }
|
||||
func (*Gauge) ProtoMessage() {}
|
||||
func (*Gauge) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1}
|
||||
}
|
||||
func (m *Gauge) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Gauge.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Gauge) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Gauge.Merge(dst, src)
|
||||
}
|
||||
func (m *Gauge) XXX_Size() int {
|
||||
return xxx_messageInfo_Gauge.Size(m)
|
||||
}
|
||||
func (m *Gauge) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Gauge.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Gauge proto.InternalMessageInfo
|
||||
|
||||
func (m *Gauge) GetValue() float64 {
|
||||
if m != nil && m.Value != nil {
|
||||
|
@ -113,12 +149,34 @@ func (m *Gauge) GetValue() float64 {
|
|||
|
||||
type Counter struct {
|
||||
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Counter) Reset() { *m = Counter{} }
|
||||
func (m *Counter) String() string { return proto.CompactTextString(m) }
|
||||
func (*Counter) ProtoMessage() {}
|
||||
func (*Counter) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2}
|
||||
}
|
||||
func (m *Counter) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Counter.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Counter) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Counter.Merge(dst, src)
|
||||
}
|
||||
func (m *Counter) XXX_Size() int {
|
||||
return xxx_messageInfo_Counter.Size(m)
|
||||
}
|
||||
func (m *Counter) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Counter.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Counter proto.InternalMessageInfo
|
||||
|
||||
func (m *Counter) GetValue() float64 {
|
||||
if m != nil && m.Value != nil {
|
||||
|
@ -130,12 +188,34 @@ func (m *Counter) GetValue() float64 {
|
|||
type Quantile struct {
|
||||
Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
|
||||
Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Quantile) Reset() { *m = Quantile{} }
|
||||
func (m *Quantile) String() string { return proto.CompactTextString(m) }
|
||||
func (*Quantile) ProtoMessage() {}
|
||||
func (*Quantile) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3}
|
||||
}
|
||||
func (m *Quantile) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Quantile.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Quantile) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Quantile.Merge(dst, src)
|
||||
}
|
||||
func (m *Quantile) XXX_Size() int {
|
||||
return xxx_messageInfo_Quantile.Size(m)
|
||||
}
|
||||
func (m *Quantile) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Quantile.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Quantile proto.InternalMessageInfo
|
||||
|
||||
func (m *Quantile) GetQuantile() float64 {
|
||||
if m != nil && m.Quantile != nil {
|
||||
|
@ -152,15 +232,37 @@ func (m *Quantile) GetValue() float64 {
|
|||
}
|
||||
|
||||
type Summary struct {
|
||||
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
|
||||
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
|
||||
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
|
||||
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
|
||||
Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Summary) Reset() { *m = Summary{} }
|
||||
func (m *Summary) String() string { return proto.CompactTextString(m) }
|
||||
func (*Summary) ProtoMessage() {}
|
||||
func (*Summary) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4}
|
||||
}
|
||||
func (m *Summary) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Summary.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Summary) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Summary.Merge(dst, src)
|
||||
}
|
||||
func (m *Summary) XXX_Size() int {
|
||||
return xxx_messageInfo_Summary.Size(m)
|
||||
}
|
||||
func (m *Summary) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Summary.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Summary proto.InternalMessageInfo
|
||||
|
||||
func (m *Summary) GetSampleCount() uint64 {
|
||||
if m != nil && m.SampleCount != nil {
|
||||
|
@ -185,12 +287,34 @@ func (m *Summary) GetQuantile() []*Quantile {
|
|||
|
||||
type Untyped struct {
|
||||
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Untyped) Reset() { *m = Untyped{} }
|
||||
func (m *Untyped) String() string { return proto.CompactTextString(m) }
|
||||
func (*Untyped) ProtoMessage() {}
|
||||
func (*Untyped) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5}
|
||||
}
|
||||
func (m *Untyped) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Untyped.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Untyped) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Untyped.Merge(dst, src)
|
||||
}
|
||||
func (m *Untyped) XXX_Size() int {
|
||||
return xxx_messageInfo_Untyped.Size(m)
|
||||
}
|
||||
func (m *Untyped) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Untyped.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Untyped proto.InternalMessageInfo
|
||||
|
||||
func (m *Untyped) GetValue() float64 {
|
||||
if m != nil && m.Value != nil {
|
||||
|
@ -200,15 +324,37 @@ func (m *Untyped) GetValue() float64 {
|
|||
}
|
||||
|
||||
type Histogram struct {
|
||||
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
|
||||
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
|
||||
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
|
||||
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
|
||||
Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Histogram) Reset() { *m = Histogram{} }
|
||||
func (m *Histogram) String() string { return proto.CompactTextString(m) }
|
||||
func (*Histogram) ProtoMessage() {}
|
||||
func (*Histogram) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6}
|
||||
}
|
||||
func (m *Histogram) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Histogram.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Histogram) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Histogram.Merge(dst, src)
|
||||
}
|
||||
func (m *Histogram) XXX_Size() int {
|
||||
return xxx_messageInfo_Histogram.Size(m)
|
||||
}
|
||||
func (m *Histogram) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Histogram.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Histogram proto.InternalMessageInfo
|
||||
|
||||
func (m *Histogram) GetSampleCount() uint64 {
|
||||
if m != nil && m.SampleCount != nil {
|
||||
|
@ -232,14 +378,36 @@ func (m *Histogram) GetBucket() []*Bucket {
|
|||
}
|
||||
|
||||
type Bucket struct {
|
||||
CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"`
|
||||
UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"`
|
||||
CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
|
||||
UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Bucket) Reset() { *m = Bucket{} }
|
||||
func (m *Bucket) String() string { return proto.CompactTextString(m) }
|
||||
func (*Bucket) ProtoMessage() {}
|
||||
func (*Bucket) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7}
|
||||
}
|
||||
func (m *Bucket) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Bucket.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Bucket) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Bucket.Merge(dst, src)
|
||||
}
|
||||
func (m *Bucket) XXX_Size() int {
|
||||
return xxx_messageInfo_Bucket.Size(m)
|
||||
}
|
||||
func (m *Bucket) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Bucket.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Bucket proto.InternalMessageInfo
|
||||
|
||||
func (m *Bucket) GetCumulativeCount() uint64 {
|
||||
if m != nil && m.CumulativeCount != nil {
|
||||
|
@ -262,13 +430,35 @@ type Metric struct {
|
|||
Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
|
||||
Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
|
||||
Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
|
||||
TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"`
|
||||
TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Metric) Reset() { *m = Metric{} }
|
||||
func (m *Metric) String() string { return proto.CompactTextString(m) }
|
||||
func (*Metric) ProtoMessage() {}
|
||||
func (*Metric) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8}
|
||||
}
|
||||
func (m *Metric) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Metric.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Metric) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Metric.Merge(dst, src)
|
||||
}
|
||||
func (m *Metric) XXX_Size() int {
|
||||
return xxx_messageInfo_Metric.Size(m)
|
||||
}
|
||||
func (m *Metric) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Metric.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Metric proto.InternalMessageInfo
|
||||
|
||||
func (m *Metric) GetLabel() []*LabelPair {
|
||||
if m != nil {
|
||||
|
@ -324,12 +514,34 @@ type MetricFamily struct {
|
|||
Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
|
||||
Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
|
||||
Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *MetricFamily) Reset() { *m = MetricFamily{} }
|
||||
func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
|
||||
func (*MetricFamily) ProtoMessage() {}
|
||||
func (*MetricFamily) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9}
|
||||
}
|
||||
func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_MetricFamily.Unmarshal(m, b)
|
||||
}
|
||||
func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *MetricFamily) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_MetricFamily.Merge(dst, src)
|
||||
}
|
||||
func (m *MetricFamily) XXX_Size() int {
|
||||
return xxx_messageInfo_MetricFamily.Size(m)
|
||||
}
|
||||
func (m *MetricFamily) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_MetricFamily.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_MetricFamily proto.InternalMessageInfo
|
||||
|
||||
func (m *MetricFamily) GetName() string {
|
||||
if m != nil && m.Name != nil {
|
||||
|
@ -360,5 +572,58 @@ func (m *MetricFamily) GetMetric() []*Metric {
|
|||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
|
||||
proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
|
||||
proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
|
||||
proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile")
|
||||
proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary")
|
||||
proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
|
||||
proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
|
||||
proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
|
||||
proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
|
||||
proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
|
||||
proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) }
|
||||
|
||||
var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{
|
||||
// 591 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e,
|
||||
0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89,
|
||||
0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81,
|
||||
0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47,
|
||||
0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77,
|
||||
0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e,
|
||||
0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64,
|
||||
0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58,
|
||||
0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c,
|
||||
0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2,
|
||||
0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4,
|
||||
0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12,
|
||||
0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c,
|
||||
0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee,
|
||||
0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f,
|
||||
0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54,
|
||||
0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea,
|
||||
0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63,
|
||||
0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45,
|
||||
0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d,
|
||||
0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5,
|
||||
0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d,
|
||||
0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d,
|
||||
0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7,
|
||||
0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8,
|
||||
0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2,
|
||||
0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58,
|
||||
0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11,
|
||||
0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff,
|
||||
0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02,
|
||||
0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd,
|
||||
0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25,
|
||||
0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9,
|
||||
0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27,
|
||||
0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9,
|
||||
0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48,
|
||||
0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00,
|
||||
}
|
||||
|
|
2
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
2
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
|
@ -359,7 +359,7 @@ func (p *TextParser) startLabelValue() stateFn {
|
|||
}
|
||||
return p.readingValue
|
||||
default:
|
||||
p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value))
|
||||
p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
67
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
generated
vendored
67
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
generated
vendored
|
@ -1,67 +0,0 @@
|
|||
PACKAGE
|
||||
|
||||
package goautoneg
|
||||
import "bitbucket.org/ww/goautoneg"
|
||||
|
||||
HTTP Content-Type Autonegotiation.
|
||||
|
||||
The functions in this package implement the behaviour specified in
|
||||
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||
|
||||
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
Neither the name of the Open Knowledge Foundation Ltd. nor the
|
||||
names of its contributors may be used to endorse or promote
|
||||
products derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
FUNCTIONS
|
||||
|
||||
func Negotiate(header string, alternatives []string) (content_type string)
|
||||
Negotiate the most appropriate content_type given the accept header
|
||||
and a list of alternatives.
|
||||
|
||||
func ParseAccept(header string) (accept []Accept)
|
||||
Parse an Accept Header string returning a sorted list
|
||||
of clauses
|
||||
|
||||
|
||||
TYPES
|
||||
|
||||
type Accept struct {
|
||||
Type, SubType string
|
||||
Q float32
|
||||
Params map[string]string
|
||||
}
|
||||
Structure to represent a clause in an HTTP Accept Header
|
||||
|
||||
|
||||
SUBDIRECTORIES
|
||||
|
||||
.hg
|
1
vendor/github.com/prometheus/procfs/.gitignore
generated
vendored
1
vendor/github.com/prometheus/procfs/.gitignore
generated
vendored
|
@ -1 +0,0 @@
|
|||
/fixtures/
|
15
vendor/github.com/prometheus/procfs/.travis.yml
generated
vendored
15
vendor/github.com/prometheus/procfs/.travis.yml
generated
vendored
|
@ -1,15 +0,0 @@
|
|||
sudo: false
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.x
|
||||
|
||||
go_import_path: github.com/prometheus/procfs
|
||||
|
||||
script:
|
||||
- make style check_license vet test staticcheck
|
18
vendor/github.com/prometheus/procfs/CONTRIBUTING.md
generated
vendored
18
vendor/github.com/prometheus/procfs/CONTRIBUTING.md
generated
vendored
|
@ -1,18 +0,0 @@
|
|||
# Contributing
|
||||
|
||||
Prometheus uses GitHub to manage reviews of pull requests.
|
||||
|
||||
* If you have a trivial fix or improvement, go ahead and create a pull request,
|
||||
addressing (with `@...`) the maintainer of this repository (see
|
||||
[MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
|
||||
|
||||
* If you plan to do something more involved, first discuss your ideas
|
||||
on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
|
||||
This will avoid unnecessary work and surely give you and us a good deal
|
||||
of inspiration.
|
||||
|
||||
* Relevant coding style guidelines are the [Go Code Review
|
||||
Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
|
||||
and the _Formatting and style_ section of Peter Bourgon's [Go: Best
|
||||
Practices for Production
|
||||
Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
|
1
vendor/github.com/prometheus/procfs/MAINTAINERS.md
generated
vendored
1
vendor/github.com/prometheus/procfs/MAINTAINERS.md
generated
vendored
|
@ -1 +0,0 @@
|
|||
* Tobias Schmidt <tobidt@gmail.com>
|
71
vendor/github.com/prometheus/procfs/Makefile
generated
vendored
71
vendor/github.com/prometheus/procfs/Makefile
generated
vendored
|
@ -1,71 +0,0 @@
|
|||
# Copyright 2018 The Prometheus Authors
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Ensure GOBIN is not set during build so that promu is installed to the correct path
|
||||
unexport GOBIN
|
||||
|
||||
GO ?= go
|
||||
GOFMT ?= $(GO)fmt
|
||||
FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
|
||||
STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck
|
||||
pkgs = $(shell $(GO) list ./... | grep -v /vendor/)
|
||||
|
||||
PREFIX ?= $(shell pwd)
|
||||
BIN_DIR ?= $(shell pwd)
|
||||
|
||||
ifdef DEBUG
|
||||
bindata_flags = -debug
|
||||
endif
|
||||
|
||||
STATICCHECK_IGNORE =
|
||||
|
||||
all: format staticcheck build test
|
||||
|
||||
style:
|
||||
@echo ">> checking code style"
|
||||
@! $(GOFMT) -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^'
|
||||
|
||||
check_license:
|
||||
@echo ">> checking license header"
|
||||
@./scripts/check_license.sh
|
||||
|
||||
test: fixtures/.unpacked sysfs/fixtures/.unpacked
|
||||
@echo ">> running all tests"
|
||||
@$(GO) test -race $(shell $(GO) list ./... | grep -v /vendor/ | grep -v examples)
|
||||
|
||||
format:
|
||||
@echo ">> formatting code"
|
||||
@$(GO) fmt $(pkgs)
|
||||
|
||||
vet:
|
||||
@echo ">> vetting code"
|
||||
@$(GO) vet $(pkgs)
|
||||
|
||||
staticcheck: $(STATICCHECK)
|
||||
@echo ">> running staticcheck"
|
||||
@$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs)
|
||||
|
||||
%/.unpacked: %.ttar
|
||||
./ttar -C $(dir $*) -x -f $*.ttar
|
||||
touch $@
|
||||
|
||||
$(FIRST_GOPATH)/bin/staticcheck:
|
||||
@GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck
|
||||
|
||||
.PHONY: all style check_license format test vet staticcheck
|
||||
|
||||
# Declaring the binaries at their default locations as PHONY targets is a hack
|
||||
# to ensure the latest version is downloaded on every make execution.
|
||||
# If this is not desired, copy/symlink these binaries to a different path and
|
||||
# set the respective environment variables.
|
||||
.PHONY: $(GOPATH)/bin/staticcheck
|
11
vendor/github.com/prometheus/procfs/README.md
generated
vendored
11
vendor/github.com/prometheus/procfs/README.md
generated
vendored
|
@ -1,11 +0,0 @@
|
|||
# procfs
|
||||
|
||||
This procfs package provides functions to retrieve system, kernel and process
|
||||
metrics from the pseudo-filesystem proc.
|
||||
|
||||
*WARNING*: This package is a work in progress. Its API may still break in
|
||||
backwards-incompatible ways without warnings. Use it at your own risk.
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
|
||||
[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)
|
446
vendor/github.com/prometheus/procfs/fixtures.ttar
generated
vendored
446
vendor/github.com/prometheus/procfs/fixtures.ttar
generated
vendored
|
@ -1,446 +0,0 @@
|
|||
# Archive created by ttar -c -f fixtures.ttar fixtures/
|
||||
Directory: fixtures
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/26231
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/cmdline
|
||||
Lines: 1
|
||||
vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/comm
|
||||
Lines: 1
|
||||
vim
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/exe
|
||||
SymlinkTo: /usr/bin/vim
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/26231/fd
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/fd/0
|
||||
SymlinkTo: ../../symlinktargets/abc
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/fd/1
|
||||
SymlinkTo: ../../symlinktargets/def
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/fd/10
|
||||
SymlinkTo: ../../symlinktargets/xyz
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/fd/2
|
||||
SymlinkTo: ../../symlinktargets/ghi
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/fd/3
|
||||
SymlinkTo: ../../symlinktargets/uvw
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/io
|
||||
Lines: 7
|
||||
rchar: 750339
|
||||
wchar: 818609
|
||||
syscr: 7405
|
||||
syscw: 5245
|
||||
read_bytes: 1024
|
||||
write_bytes: 2048
|
||||
cancelled_write_bytes: -1024
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/limits
|
||||
Lines: 17
|
||||
Limit Soft Limit Hard Limit Units
|
||||
Max cpu time unlimited unlimited seconds
|
||||
Max file size unlimited unlimited bytes
|
||||
Max data size unlimited unlimited bytes
|
||||
Max stack size 8388608 unlimited bytes
|
||||
Max core file size 0 unlimited bytes
|
||||
Max resident set unlimited unlimited bytes
|
||||
Max processes 62898 62898 processes
|
||||
Max open files 2048 4096 files
|
||||
Max locked memory 65536 65536 bytes
|
||||
Max address space 8589934592 unlimited bytes
|
||||
Max file locks unlimited unlimited locks
|
||||
Max pending signals 62898 62898 signals
|
||||
Max msgqueue size 819200 819200 bytes
|
||||
Max nice priority 0 0
|
||||
Max realtime priority 0 0
|
||||
Max realtime timeout unlimited unlimited us
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/mountstats
|
||||
Lines: 19
|
||||
device rootfs mounted on / with fstype rootfs
|
||||
device sysfs mounted on /sys with fstype sysfs
|
||||
device proc mounted on /proc with fstype proc
|
||||
device /dev/sda1 mounted on / with fstype ext4
|
||||
device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1
|
||||
opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none
|
||||
age: 13968
|
||||
caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255
|
||||
nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured
|
||||
sec: flavor=1,pseudoflavor=1
|
||||
events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0
|
||||
bytes: 1207640230 0 0 0 1210214218 0 295483 0
|
||||
RPC iostats version: 1.0 p/v: 100003/4 (nfs)
|
||||
xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726
|
||||
per-op statistics
|
||||
NULL: 0 0 0 0 0 0 0 0
|
||||
READ: 1298 1298 0 207680 1210292152 6 79386 79407
|
||||
WRITE: 0 0 0 0 0 0 0 0
|
||||
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/26231/net
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/net/dev
|
||||
Lines: 4
|
||||
Inter-| Receive | Transmit
|
||||
face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
|
||||
lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/26231/ns
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/ns/mnt
|
||||
SymlinkTo: mnt:[4026531840]
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/ns/net
|
||||
SymlinkTo: net:[4026531993]
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/stat
|
||||
Lines: 1
|
||||
26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/26232
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26232/cmdline
|
||||
Lines: 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26232/comm
|
||||
Lines: 1
|
||||
ata_sff
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/26232/fd
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26232/fd/0
|
||||
SymlinkTo: ../../symlinktargets/abc
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26232/fd/1
|
||||
SymlinkTo: ../../symlinktargets/def
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26232/fd/2
|
||||
SymlinkTo: ../../symlinktargets/ghi
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26232/fd/3
|
||||
SymlinkTo: ../../symlinktargets/uvw
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26232/fd/4
|
||||
SymlinkTo: ../../symlinktargets/xyz
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26232/limits
|
||||
Lines: 17
|
||||
Limit Soft Limit Hard Limit Units
|
||||
Max cpu time unlimited unlimited seconds
|
||||
Max file size unlimited unlimited bytes
|
||||
Max data size unlimited unlimited bytes
|
||||
Max stack size 8388608 unlimited bytes
|
||||
Max core file size 0 unlimited bytes
|
||||
Max resident set unlimited unlimited bytes
|
||||
Max processes 29436 29436 processes
|
||||
Max open files 1024 4096 files
|
||||
Max locked memory 65536 65536 bytes
|
||||
Max address space unlimited unlimited bytes
|
||||
Max file locks unlimited unlimited locks
|
||||
Max pending signals 29436 29436 signals
|
||||
Max msgqueue size 819200 819200 bytes
|
||||
Max nice priority 0 0
|
||||
Max realtime priority 0 0
|
||||
Max realtime timeout unlimited unlimited us
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26232/stat
|
||||
Lines: 1
|
||||
33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/26233
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26233/cmdline
|
||||
Lines: 1
|
||||
com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/584
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/584/stat
|
||||
Lines: 2
|
||||
1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0
|
||||
#!/bin/cat /proc/self/stat
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/buddyinfo
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/buddyinfo/short
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/buddyinfo/short/buddyinfo
|
||||
Lines: 3
|
||||
Node 0, zone
|
||||
Node 0, zone
|
||||
Node 0, zone
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/buddyinfo/sizemismatch
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/buddyinfo/sizemismatch/buddyinfo
|
||||
Lines: 3
|
||||
Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3
|
||||
Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 0
|
||||
Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/buddyinfo/valid
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/buddyinfo/valid/buddyinfo
|
||||
Lines: 3
|
||||
Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3
|
||||
Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0
|
||||
Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/fs
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/fs/xfs
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/fs/xfs/stat
|
||||
Lines: 23
|
||||
extent_alloc 92447 97589 92448 93751
|
||||
abt 0 0 0 0
|
||||
blk_map 1767055 188820 184891 92447 92448 2140766 0
|
||||
bmbt 0 0 0 0
|
||||
dir 185039 92447 92444 136422
|
||||
trans 706 944304 0
|
||||
ig 185045 58807 0 126238 0 33637 22
|
||||
log 2883 113448 9 17360 739
|
||||
push_ail 945014 0 134260 15483 0 3940 464 159985 0 40
|
||||
xstrat 92447 0
|
||||
rw 107739 94045
|
||||
attr 4 0 0 0
|
||||
icluster 8677 7849 135802
|
||||
vnodes 92601 0 0 0 92444 92444 92444 0
|
||||
buf 2666287 7122 2659202 3599 2 7085 0 10297 7085
|
||||
abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147
|
||||
abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023
|
||||
bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
qm 0 0 0 0 0 0 0 0
|
||||
xpc 399724544 92823103 86219234
|
||||
debug 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/mdstat
|
||||
Lines: 26
|
||||
Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
|
||||
md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9]
|
||||
5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU]
|
||||
|
||||
md127 : active raid1 sdi2[0] sdj2[1]
|
||||
312319552 blocks [2/2] [UU]
|
||||
|
||||
md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1]
|
||||
248896 blocks [2/2] [UU]
|
||||
|
||||
md4 : inactive raid1 sda3[0] sdb3[1]
|
||||
4883648 blocks [2/2] [UU]
|
||||
|
||||
md6 : active raid1 sdb2[2] sda2[0]
|
||||
195310144 blocks [2/1] [U_]
|
||||
[=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
|
||||
|
||||
md8 : active raid1 sdb1[1] sda1[0]
|
||||
195310144 blocks [2/2] [UU]
|
||||
[=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
|
||||
|
||||
md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1]
|
||||
7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU]
|
||||
bitmap: 0/30 pages [0KB], 65536KB chunk
|
||||
|
||||
unused devices: <none>
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/net
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/net/dev
|
||||
Lines: 6
|
||||
Inter-| Receive | Transmit
|
||||
face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
|
||||
vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0
|
||||
lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0
|
||||
docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0
|
||||
eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/net/ip_vs
|
||||
Lines: 21
|
||||
IP Virtual Server version 1.2.1 (size=4096)
|
||||
Prot LocalAddress:Port Scheduler Flags
|
||||
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
|
||||
TCP C0A80016:0CEA wlc
|
||||
-> C0A85216:0CEA Tunnel 100 248 2
|
||||
-> C0A85318:0CEA Tunnel 100 248 2
|
||||
-> C0A85315:0CEA Tunnel 100 248 1
|
||||
TCP C0A80039:0CEA wlc
|
||||
-> C0A85416:0CEA Tunnel 0 0 0
|
||||
-> C0A85215:0CEA Tunnel 100 1499 0
|
||||
-> C0A83215:0CEA Tunnel 100 1498 0
|
||||
TCP C0A80037:0CEA wlc
|
||||
-> C0A8321A:0CEA Tunnel 0 0 0
|
||||
-> C0A83120:0CEA Tunnel 100 0 0
|
||||
TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh
|
||||
-> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0
|
||||
-> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0
|
||||
-> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1
|
||||
FWM 10001000 wlc
|
||||
-> C0A8321A:0CEA Route 0 0 1
|
||||
-> C0A83215:0CEA Route 0 0 2
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/net/ip_vs_stats
|
||||
Lines: 6
|
||||
Total Incoming Outgoing Incoming Outgoing
|
||||
Conns Packets Packets Bytes Bytes
|
||||
16AA370 E33656E5 0 51D8C8883AB3 0
|
||||
|
||||
Conns/s Pkts/s Pkts/s Bytes/s Bytes/s
|
||||
4 1FB3C 0 1282A8F 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/net/rpc
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/net/rpc/nfs
|
||||
Lines: 5
|
||||
net 18628 0 18628 6
|
||||
rpc 4329785 0 4338291
|
||||
proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
|
||||
proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39
|
||||
proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/net/rpc/nfsd
|
||||
Lines: 11
|
||||
rc 0 6 18622
|
||||
fh 0 0 0 0 0
|
||||
io 157286400 0
|
||||
th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
|
||||
ra 32 0 0 0 0 0 0 0 0 0 0 0
|
||||
net 18628 0 18628 6
|
||||
rpc 18628 0 0 0 0
|
||||
proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
|
||||
proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0
|
||||
proc4 2 2 10853
|
||||
proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/net/xfrm_stat
|
||||
Lines: 28
|
||||
XfrmInError 1
|
||||
XfrmInBufferError 2
|
||||
XfrmInHdrError 4
|
||||
XfrmInNoStates 3
|
||||
XfrmInStateProtoError 40
|
||||
XfrmInStateModeError 100
|
||||
XfrmInStateSeqError 6000
|
||||
XfrmInStateExpired 4
|
||||
XfrmInStateMismatch 23451
|
||||
XfrmInStateInvalid 55555
|
||||
XfrmInTmplMismatch 51
|
||||
XfrmInNoPols 65432
|
||||
XfrmInPolBlock 100
|
||||
XfrmInPolError 10000
|
||||
XfrmOutError 1000000
|
||||
XfrmOutBundleGenError 43321
|
||||
XfrmOutBundleCheckError 555
|
||||
XfrmOutNoStates 869
|
||||
XfrmOutStateProtoError 4542
|
||||
XfrmOutStateModeError 4
|
||||
XfrmOutStateSeqError 543
|
||||
XfrmOutStateExpired 565
|
||||
XfrmOutPolBlock 43456
|
||||
XfrmOutPolDead 7656
|
||||
XfrmOutPolError 1454
|
||||
XfrmFwdHdrError 6654
|
||||
XfrmOutStateInvalid 28765
|
||||
XfrmAcquireError 24532
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/self
|
||||
SymlinkTo: 26231
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/stat
|
||||
Lines: 16
|
||||
cpu 301854 612 111922 8979004 3552 2 3944 0 0 0
|
||||
cpu0 44490 19 21045 1087069 220 1 3410 0 0 0
|
||||
cpu1 47869 23 16474 1110787 591 0 46 0 0 0
|
||||
cpu2 46504 36 15916 1112321 441 0 326 0 0 0
|
||||
cpu3 47054 102 15683 1113230 533 0 60 0 0 0
|
||||
cpu4 28413 25 10776 1140321 217 0 8 0 0 0
|
||||
cpu5 29271 101 11586 1136270 672 0 30 0 0 0
|
||||
cpu6 29152 36 10276 1139721 319 0 29 0 0 0
|
||||
cpu7 29098 268 10164 1139282 555 0 31 0 0 0
|
||||
intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
ctxt 38014093
|
||||
btime 1418183276
|
||||
processes 26442
|
||||
procs_running 2
|
||||
procs_blocked 1
|
||||
softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/symlinktargets
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/symlinktargets/README
|
||||
Lines: 2
|
||||
This directory contains some empty files that are the symlinks the files in the "fd" directory point to.
|
||||
They are otherwise ignored by the tests
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/symlinktargets/abc
|
||||
Lines: 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/symlinktargets/def
|
||||
Lines: 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/symlinktargets/ghi
|
||||
Lines: 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/symlinktargets/uvw
|
||||
Lines: 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/symlinktargets/xyz
|
||||
Lines: 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
15
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
15
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
|
@ -13,7 +13,11 @@
|
|||
|
||||
package util
|
||||
|
||||
import "strconv"
|
||||
import (
|
||||
"io/ioutil"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ParseUint32s parses a slice of strings into a slice of uint32s.
|
||||
func ParseUint32s(ss []string) ([]uint32, error) {
|
||||
|
@ -44,3 +48,12 @@ func ParseUint64s(ss []string) ([]uint64, error) {
|
|||
|
||||
return us, nil
|
||||
}
|
||||
|
||||
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
|
||||
func ReadUintFromFile(path string) (uint64, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
|
||||
}
|
||||
|
|
45
vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go
generated
vendored
Normal file
45
vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
|
||||
// https://github.com/prometheus/node_exporter/pull/728/files
|
||||
func SysReadFile(file string) (string, error) {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// On some machines, hwmon drivers are broken and return EAGAIN. This causes
|
||||
// Go's ioutil.ReadFile implementation to poll forever.
|
||||
//
|
||||
// Since we either want to read data or bail immediately, do the simplest
|
||||
// possible read using syscall directly.
|
||||
b := make([]byte, 128)
|
||||
n, err := syscall.Read(int(f.Fd()), b)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(bytes.TrimSpace(b[:n])), nil
|
||||
}
|
51
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
51
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
|
@ -39,8 +39,11 @@ const (
|
|||
statVersion10 = "1.0"
|
||||
statVersion11 = "1.1"
|
||||
|
||||
fieldTransport10Len = 10
|
||||
fieldTransport11Len = 13
|
||||
fieldTransport10TCPLen = 10
|
||||
fieldTransport10UDPLen = 7
|
||||
|
||||
fieldTransport11TCPLen = 13
|
||||
fieldTransport11UDPLen = 10
|
||||
)
|
||||
|
||||
// A Mount is a device mount parsed from /proc/[pid]/mountstats.
|
||||
|
@ -186,6 +189,8 @@ type NFSOperationStats struct {
|
|||
// A NFSTransportStats contains statistics for the NFS mount RPC requests and
|
||||
// responses.
|
||||
type NFSTransportStats struct {
|
||||
// The transport protocol used for the NFS mount.
|
||||
Protocol string
|
||||
// The local port used for the NFS mount.
|
||||
Port uint64
|
||||
// Number of times the client has had to establish a connection from scratch
|
||||
|
@ -360,7 +365,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
|
|||
return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
|
||||
}
|
||||
|
||||
tstats, err := parseNFSTransportStats(ss[2:], statVersion)
|
||||
tstats, err := parseNFSTransportStats(ss[1:], statVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -522,13 +527,33 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
|
|||
// parseNFSTransportStats parses a NFSTransportStats line using an input set of
|
||||
// integer fields matched to a specific stats version.
|
||||
func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
|
||||
// Extract the protocol field. It is the only string value in the line
|
||||
protocol := ss[0]
|
||||
ss = ss[1:]
|
||||
|
||||
switch statVersion {
|
||||
case statVersion10:
|
||||
if len(ss) != fieldTransport10Len {
|
||||
var expectedLength int
|
||||
if protocol == "tcp" {
|
||||
expectedLength = fieldTransport10TCPLen
|
||||
} else if protocol == "udp" {
|
||||
expectedLength = fieldTransport10UDPLen
|
||||
} else {
|
||||
return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss)
|
||||
}
|
||||
if len(ss) != expectedLength {
|
||||
return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
|
||||
}
|
||||
case statVersion11:
|
||||
if len(ss) != fieldTransport11Len {
|
||||
var expectedLength int
|
||||
if protocol == "tcp" {
|
||||
expectedLength = fieldTransport11TCPLen
|
||||
} else if protocol == "udp" {
|
||||
expectedLength = fieldTransport11UDPLen
|
||||
} else {
|
||||
return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss)
|
||||
}
|
||||
if len(ss) != expectedLength {
|
||||
return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
|
||||
}
|
||||
default:
|
||||
|
@ -536,12 +561,13 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
|||
}
|
||||
|
||||
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
|
||||
// in a v1.0 response.
|
||||
// in a v1.0 response. Since the stat length is bigger for TCP stats, we use
|
||||
// the TCP length here.
|
||||
//
|
||||
// Note: slice length must be set to length of v1.1 stats to avoid a panic when
|
||||
// only v1.0 stats are present.
|
||||
// See: https://github.com/prometheus/node_exporter/issues/571.
|
||||
ns := make([]uint64, fieldTransport11Len)
|
||||
ns := make([]uint64, fieldTransport11TCPLen)
|
||||
for i, s := range ss {
|
||||
n, err := strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
|
@ -551,7 +577,18 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
|||
ns[i] = n
|
||||
}
|
||||
|
||||
// The fields differ depending on the transport protocol (TCP or UDP)
|
||||
// From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt
|
||||
//
|
||||
// For the udp RPC transport there is no connection count, connect idle time,
|
||||
// or idle time (fields #3, #4, and #5); all other fields are the same. So
|
||||
// we set them to 0 here.
|
||||
if protocol == "udp" {
|
||||
ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
|
||||
}
|
||||
|
||||
return &NFSTransportStats{
|
||||
Protocol: protocol,
|
||||
Port: ns[0],
|
||||
Bind: ns[1],
|
||||
Connect: ns[2],
|
||||
|
|
20
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
20
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
|
@ -156,6 +156,26 @@ func (p Proc) Executable() (string, error) {
|
|||
return exe, err
|
||||
}
|
||||
|
||||
// Cwd returns the absolute path to the current working directory of the process.
|
||||
func (p Proc) Cwd() (string, error) {
|
||||
wd, err := os.Readlink(p.path("cwd"))
|
||||
if os.IsNotExist(err) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
return wd, err
|
||||
}
|
||||
|
||||
// RootDir returns the absolute path to the process's root directory (as set by chroot)
|
||||
func (p Proc) RootDir() (string, error) {
|
||||
rdir, err := os.Readlink(p.path("root"))
|
||||
if os.IsNotExist(err) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
return rdir, err
|
||||
}
|
||||
|
||||
// FileDescriptors returns the currently open file descriptors of a process.
|
||||
func (p Proc) FileDescriptors() ([]uintptr, error) {
|
||||
names, err := p.fileDescriptors()
|
||||
|
|
389
vendor/github.com/prometheus/procfs/ttar
generated
vendored
389
vendor/github.com/prometheus/procfs/ttar
generated
vendored
|
@ -1,389 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Purpose: plain text tar format
|
||||
# Limitations: - only suitable for text files, directories, and symlinks
|
||||
# - stores only filename, content, and mode
|
||||
# - not designed for untrusted input
|
||||
#
|
||||
# Note: must work with bash version 3.2 (macOS)
|
||||
|
||||
# Copyright 2017 Roger Luethi
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit -o nounset
|
||||
|
||||
# Sanitize environment (for instance, standard sorting of glob matches)
|
||||
export LC_ALL=C
|
||||
|
||||
path=""
|
||||
CMD=""
|
||||
ARG_STRING="$*"
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# Not all sed implementations can work on null bytes. In order to make ttar
|
||||
# work out of the box on macOS, use Python as a stream editor.
|
||||
|
||||
USE_PYTHON=0
|
||||
|
||||
PYTHON_CREATE_FILTER=$(cat << 'PCF'
|
||||
#!/usr/bin/env python
|
||||
|
||||
import re
|
||||
import sys
|
||||
|
||||
for line in sys.stdin:
|
||||
line = re.sub(r'EOF', r'\EOF', line)
|
||||
line = re.sub(r'NULLBYTE', r'\NULLBYTE', line)
|
||||
line = re.sub('\x00', r'NULLBYTE', line)
|
||||
sys.stdout.write(line)
|
||||
PCF
|
||||
)
|
||||
|
||||
PYTHON_EXTRACT_FILTER=$(cat << 'PEF'
|
||||
#!/usr/bin/env python
|
||||
|
||||
import re
|
||||
import sys
|
||||
|
||||
for line in sys.stdin:
|
||||
line = re.sub(r'(?<!\\)NULLBYTE', '\x00', line)
|
||||
line = re.sub(r'\\NULLBYTE', 'NULLBYTE', line)
|
||||
line = re.sub(r'([^\\])EOF', r'\1', line)
|
||||
line = re.sub(r'\\EOF', 'EOF', line)
|
||||
sys.stdout.write(line)
|
||||
PEF
|
||||
)
|
||||
|
||||
function test_environment {
|
||||
if [[ "$(echo "a" | sed 's/a/\x0/' | wc -c)" -ne 2 ]]; then
|
||||
echo "WARNING sed unable to handle null bytes, using Python (slow)."
|
||||
if ! which python >/dev/null; then
|
||||
echo "ERROR Python not found. Aborting."
|
||||
exit 2
|
||||
fi
|
||||
USE_PYTHON=1
|
||||
fi
|
||||
}
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
function usage {
|
||||
bname=$(basename "$0")
|
||||
cat << USAGE
|
||||
Usage: $bname [-C <DIR>] -c -f <ARCHIVE> <FILE...> (create archive)
|
||||
$bname -t -f <ARCHIVE> (list archive contents)
|
||||
$bname [-C <DIR>] -x -f <ARCHIVE> (extract archive)
|
||||
|
||||
Options:
|
||||
-C <DIR> (change directory)
|
||||
-v (verbose)
|
||||
|
||||
Example: Change to sysfs directory, create ttar file from fixtures directory
|
||||
$bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/
|
||||
USAGE
|
||||
exit "$1"
|
||||
}
|
||||
|
||||
function vecho {
|
||||
if [ "${VERBOSE:-}" == "yes" ]; then
|
||||
echo >&7 "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
function set_cmd {
|
||||
if [ -n "$CMD" ]; then
|
||||
echo "ERROR: more than one command given"
|
||||
echo
|
||||
usage 2
|
||||
fi
|
||||
CMD=$1
|
||||
}
|
||||
|
||||
unset VERBOSE
|
||||
|
||||
while getopts :cf:htxvC: opt; do
|
||||
case $opt in
|
||||
c)
|
||||
set_cmd "create"
|
||||
;;
|
||||
f)
|
||||
ARCHIVE=$OPTARG
|
||||
;;
|
||||
h)
|
||||
usage 0
|
||||
;;
|
||||
t)
|
||||
set_cmd "list"
|
||||
;;
|
||||
x)
|
||||
set_cmd "extract"
|
||||
;;
|
||||
v)
|
||||
VERBOSE=yes
|
||||
exec 7>&1
|
||||
;;
|
||||
C)
|
||||
CDIR=$OPTARG
|
||||
;;
|
||||
*)
|
||||
echo >&2 "ERROR: invalid option -$OPTARG"
|
||||
echo
|
||||
usage 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Remove processed options from arguments
|
||||
shift $(( OPTIND - 1 ));
|
||||
|
||||
if [ "${CMD:-}" == "" ]; then
|
||||
echo >&2 "ERROR: no command given"
|
||||
echo
|
||||
usage 1
|
||||
elif [ "${ARCHIVE:-}" == "" ]; then
|
||||
echo >&2 "ERROR: no archive name given"
|
||||
echo
|
||||
usage 1
|
||||
fi
|
||||
|
||||
function list {
|
||||
local path=""
|
||||
local size=0
|
||||
local line_no=0
|
||||
local ttar_file=$1
|
||||
if [ -n "${2:-}" ]; then
|
||||
echo >&2 "ERROR: too many arguments."
|
||||
echo
|
||||
usage 1
|
||||
fi
|
||||
if [ ! -e "$ttar_file" ]; then
|
||||
echo >&2 "ERROR: file not found ($ttar_file)"
|
||||
echo
|
||||
usage 1
|
||||
fi
|
||||
while read -r line; do
|
||||
line_no=$(( line_no + 1 ))
|
||||
if [ $size -gt 0 ]; then
|
||||
size=$(( size - 1 ))
|
||||
continue
|
||||
fi
|
||||
if [[ $line =~ ^Path:\ (.*)$ ]]; then
|
||||
path=${BASH_REMATCH[1]}
|
||||
elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
|
||||
size=${BASH_REMATCH[1]}
|
||||
echo "$path"
|
||||
elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
|
||||
path=${BASH_REMATCH[1]}
|
||||
echo "$path/"
|
||||
elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
|
||||
echo "$path -> ${BASH_REMATCH[1]}"
|
||||
fi
|
||||
done < "$ttar_file"
|
||||
}
|
||||
|
||||
function extract {
|
||||
local path=""
|
||||
local size=0
|
||||
local line_no=0
|
||||
local ttar_file=$1
|
||||
if [ -n "${2:-}" ]; then
|
||||
echo >&2 "ERROR: too many arguments."
|
||||
echo
|
||||
usage 1
|
||||
fi
|
||||
if [ ! -e "$ttar_file" ]; then
|
||||
echo >&2 "ERROR: file not found ($ttar_file)"
|
||||
echo
|
||||
usage 1
|
||||
fi
|
||||
while IFS= read -r line; do
|
||||
line_no=$(( line_no + 1 ))
|
||||
local eof_without_newline
|
||||
if [ "$size" -gt 0 ]; then
|
||||
if [[ "$line" =~ [^\\]EOF ]]; then
|
||||
# An EOF not preceeded by a backslash indicates that the line
|
||||
# does not end with a newline
|
||||
eof_without_newline=1
|
||||
else
|
||||
eof_without_newline=0
|
||||
fi
|
||||
# Replace NULLBYTE with null byte if at beginning of line
|
||||
# Replace NULLBYTE with null byte unless preceeded by backslash
|
||||
# Remove one backslash in front of NULLBYTE (if any)
|
||||
# Remove EOF unless preceeded by backslash
|
||||
# Remove one backslash in front of EOF
|
||||
if [ $USE_PYTHON -eq 1 ]; then
|
||||
echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path"
|
||||
else
|
||||
# The repeated pattern makes up for sed's lack of negative
|
||||
# lookbehind assertions (for consecutive null bytes).
|
||||
echo -n "$line" | \
|
||||
sed -e 's/^NULLBYTE/\x0/g;
|
||||
s/\([^\\]\)NULLBYTE/\1\x0/g;
|
||||
s/\([^\\]\)NULLBYTE/\1\x0/g;
|
||||
s/\\NULLBYTE/NULLBYTE/g;
|
||||
s/\([^\\]\)EOF/\1/g;
|
||||
s/\\EOF/EOF/g;
|
||||
' >> "$path"
|
||||
fi
|
||||
if [[ "$eof_without_newline" -eq 0 ]]; then
|
||||
echo >> "$path"
|
||||
fi
|
||||
size=$(( size - 1 ))
|
||||
continue
|
||||
fi
|
||||
if [[ $line =~ ^Path:\ (.*)$ ]]; then
|
||||
path=${BASH_REMATCH[1]}
|
||||
if [ -e "$path" ] || [ -L "$path" ]; then
|
||||
rm "$path"
|
||||
fi
|
||||
elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
|
||||
size=${BASH_REMATCH[1]}
|
||||
# Create file even if it is zero-length.
|
||||
touch "$path"
|
||||
vecho " $path"
|
||||
elif [[ $line =~ ^Mode:\ (.*)$ ]]; then
|
||||
mode=${BASH_REMATCH[1]}
|
||||
chmod "$mode" "$path"
|
||||
vecho "$mode"
|
||||
elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
|
||||
path=${BASH_REMATCH[1]}
|
||||
mkdir -p "$path"
|
||||
vecho " $path/"
|
||||
elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
|
||||
ln -s "${BASH_REMATCH[1]}" "$path"
|
||||
vecho " $path -> ${BASH_REMATCH[1]}"
|
||||
elif [[ $line =~ ^# ]]; then
|
||||
# Ignore comments between files
|
||||
continue
|
||||
else
|
||||
echo >&2 "ERROR: Unknown keyword on line $line_no: $line"
|
||||
exit 1
|
||||
fi
|
||||
done < "$ttar_file"
|
||||
}
|
||||
|
||||
function div {
|
||||
echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \
|
||||
"- - - - - -"
|
||||
}
|
||||
|
||||
function get_mode {
|
||||
local mfile=$1
|
||||
if [ -z "${STAT_OPTION:-}" ]; then
|
||||
if stat -c '%a' "$mfile" >/dev/null 2>&1; then
|
||||
# GNU stat
|
||||
STAT_OPTION='-c'
|
||||
STAT_FORMAT='%a'
|
||||
else
|
||||
# BSD stat
|
||||
STAT_OPTION='-f'
|
||||
# Octal output, user/group/other (omit file type, sticky bit)
|
||||
STAT_FORMAT='%OLp'
|
||||
fi
|
||||
fi
|
||||
stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile"
|
||||
}
|
||||
|
||||
function _create {
|
||||
shopt -s nullglob
|
||||
local mode
|
||||
local eof_without_newline
|
||||
while (( "$#" )); do
|
||||
file=$1
|
||||
if [ -L "$file" ]; then
|
||||
echo "Path: $file"
|
||||
symlinkTo=$(readlink "$file")
|
||||
echo "SymlinkTo: $symlinkTo"
|
||||
vecho " $file -> $symlinkTo"
|
||||
div
|
||||
elif [ -d "$file" ]; then
|
||||
# Strip trailing slash (if there is one)
|
||||
file=${file%/}
|
||||
echo "Directory: $file"
|
||||
mode=$(get_mode "$file")
|
||||
echo "Mode: $mode"
|
||||
vecho "$mode $file/"
|
||||
div
|
||||
# Find all files and dirs, including hidden/dot files
|
||||
for x in "$file/"{*,.[^.]*}; do
|
||||
_create "$x"
|
||||
done
|
||||
elif [ -f "$file" ]; then
|
||||
echo "Path: $file"
|
||||
lines=$(wc -l "$file"|awk '{print $1}')
|
||||
eof_without_newline=0
|
||||
if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \
|
||||
[[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then
|
||||
eof_without_newline=1
|
||||
lines=$((lines+1))
|
||||
fi
|
||||
echo "Lines: $lines"
|
||||
# Add backslash in front of EOF
|
||||
# Add backslash in front of NULLBYTE
|
||||
# Replace null byte with NULLBYTE
|
||||
if [ $USE_PYTHON -eq 1 ]; then
|
||||
< "$file" python -c "$PYTHON_CREATE_FILTER"
|
||||
else
|
||||
< "$file" \
|
||||
sed 's/EOF/\\EOF/g;
|
||||
s/NULLBYTE/\\NULLBYTE/g;
|
||||
s/\x0/NULLBYTE/g;
|
||||
'
|
||||
fi
|
||||
if [[ "$eof_without_newline" -eq 1 ]]; then
|
||||
# Finish line with EOF to indicate that the original line did
|
||||
# not end with a linefeed
|
||||
echo "EOF"
|
||||
fi
|
||||
mode=$(get_mode "$file")
|
||||
echo "Mode: $mode"
|
||||
vecho "$mode $file"
|
||||
div
|
||||
else
|
||||
echo >&2 "ERROR: file not found ($file in $(pwd))"
|
||||
exit 2
|
||||
fi
|
||||
shift
|
||||
done
|
||||
}
|
||||
|
||||
function create {
|
||||
ttar_file=$1
|
||||
shift
|
||||
if [ -z "${1:-}" ]; then
|
||||
echo >&2 "ERROR: missing arguments."
|
||||
echo
|
||||
usage 1
|
||||
fi
|
||||
if [ -e "$ttar_file" ]; then
|
||||
rm "$ttar_file"
|
||||
fi
|
||||
exec > "$ttar_file"
|
||||
echo "# Archive created by ttar $ARG_STRING"
|
||||
_create "$@"
|
||||
}
|
||||
|
||||
test_environment
|
||||
|
||||
if [ -n "${CDIR:-}" ]; then
|
||||
if [[ "$ARCHIVE" != /* ]]; then
|
||||
# Relative path: preserve the archive's location before changing
|
||||
# directory
|
||||
ARCHIVE="$(pwd)/$ARCHIVE"
|
||||
fi
|
||||
cd "$CDIR"
|
||||
fi
|
||||
|
||||
"$CMD" "$ARCHIVE" "$@"
|
2
vendor/github.com/prometheus/procfs/xfrm.go
generated
vendored
2
vendor/github.com/prometheus/procfs/xfrm.go
generated
vendored
|
@ -113,7 +113,7 @@ func (fs FS) NewXfrmStat() (XfrmStat, error) {
|
|||
|
||||
if len(fields) != 2 {
|
||||
return XfrmStat{}, fmt.Errorf(
|
||||
"couldnt parse %s line %s", file.Name(), s.Text())
|
||||
"couldn't parse %s line %s", file.Name(), s.Text())
|
||||
}
|
||||
|
||||
name := fields[0]
|
||||
|
|
2
vendor/github.com/spf13/pflag/.gitignore
generated
vendored
2
vendor/github.com/spf13/pflag/.gitignore
generated
vendored
|
@ -1,2 +0,0 @@
|
|||
.idea/*
|
||||
|
21
vendor/github.com/spf13/pflag/.travis.yml
generated
vendored
21
vendor/github.com/spf13/pflag/.travis.yml
generated
vendored
|
@ -1,21 +0,0 @@
|
|||
sudo: false
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.7.3
|
||||
- 1.8.1
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
install:
|
||||
- go get github.com/golang/lint/golint
|
||||
- export PATH=$GOPATH/bin:$PATH
|
||||
- go install ./...
|
||||
|
||||
script:
|
||||
- verify/all.sh -v
|
||||
- go test ./...
|
296
vendor/github.com/spf13/pflag/README.md
generated
vendored
296
vendor/github.com/spf13/pflag/README.md
generated
vendored
|
@ -1,296 +0,0 @@
|
|||
[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/pflag)](https://goreportcard.com/report/github.com/spf13/pflag)
|
||||
[![GoDoc](https://godoc.org/github.com/spf13/pflag?status.svg)](https://godoc.org/github.com/spf13/pflag)
|
||||
|
||||
## Description
|
||||
|
||||
pflag is a drop-in replacement for Go's flag package, implementing
|
||||
POSIX/GNU-style --flags.
|
||||
|
||||
pflag is compatible with the [GNU extensions to the POSIX recommendations
|
||||
for command-line options][1]. For a more precise description, see the
|
||||
"Command-line flag syntax" section below.
|
||||
|
||||
[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
|
||||
|
||||
pflag is available under the same style of BSD license as the Go language,
|
||||
which can be found in the LICENSE file.
|
||||
|
||||
## Installation
|
||||
|
||||
pflag is available using the standard `go get` command.
|
||||
|
||||
Install by running:
|
||||
|
||||
go get github.com/spf13/pflag
|
||||
|
||||
Run tests by running:
|
||||
|
||||
go test github.com/spf13/pflag
|
||||
|
||||
## Usage
|
||||
|
||||
pflag is a drop-in replacement of Go's native flag package. If you import
|
||||
pflag under the name "flag" then all code should continue to function
|
||||
with no changes.
|
||||
|
||||
``` go
|
||||
import flag "github.com/spf13/pflag"
|
||||
```
|
||||
|
||||
There is one exception to this: if you directly instantiate the Flag struct
|
||||
there is one more field "Shorthand" that you will need to set.
|
||||
Most code never instantiates this struct directly, and instead uses
|
||||
functions such as String(), BoolVar(), and Var(), and is therefore
|
||||
unaffected.
|
||||
|
||||
Define flags using flag.String(), Bool(), Int(), etc.
|
||||
|
||||
This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
|
||||
|
||||
``` go
|
||||
var ip *int = flag.Int("flagname", 1234, "help message for flagname")
|
||||
```
|
||||
|
||||
If you like, you can bind the flag to a variable using the Var() functions.
|
||||
|
||||
``` go
|
||||
var flagvar int
|
||||
func init() {
|
||||
flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
|
||||
}
|
||||
```
|
||||
|
||||
Or you can create custom flags that satisfy the Value interface (with
|
||||
pointer receivers) and couple them to flag parsing by
|
||||
|
||||
``` go
|
||||
flag.Var(&flagVal, "name", "help message for flagname")
|
||||
```
|
||||
|
||||
For such flags, the default value is just the initial value of the variable.
|
||||
|
||||
After all flags are defined, call
|
||||
|
||||
``` go
|
||||
flag.Parse()
|
||||
```
|
||||
|
||||
to parse the command line into the defined flags.
|
||||
|
||||
Flags may then be used directly. If you're using the flags themselves,
|
||||
they are all pointers; if you bind to variables, they're values.
|
||||
|
||||
``` go
|
||||
fmt.Println("ip has value ", *ip)
|
||||
fmt.Println("flagvar has value ", flagvar)
|
||||
```
|
||||
|
||||
There are helpers function to get values later if you have the FlagSet but
|
||||
it was difficult to keep up with all of the flag pointers in your code.
|
||||
If you have a pflag.FlagSet with a flag called 'flagname' of type int you
|
||||
can use GetInt() to get the int value. But notice that 'flagname' must exist
|
||||
and it must be an int. GetString("flagname") will fail.
|
||||
|
||||
``` go
|
||||
i, err := flagset.GetInt("flagname")
|
||||
```
|
||||
|
||||
After parsing, the arguments after the flag are available as the
|
||||
slice flag.Args() or individually as flag.Arg(i).
|
||||
The arguments are indexed from 0 through flag.NArg()-1.
|
||||
|
||||
The pflag package also defines some new functions that are not in flag,
|
||||
that give one-letter shorthands for flags. You can use these by appending
|
||||
'P' to the name of any function that defines a flag.
|
||||
|
||||
``` go
|
||||
var ip = flag.IntP("flagname", "f", 1234, "help message")
|
||||
var flagvar bool
|
||||
func init() {
|
||||
flag.BoolVarP(&flagvar, "boolname", "b", true, "help message")
|
||||
}
|
||||
flag.VarP(&flagVal, "varname", "v", "help message")
|
||||
```
|
||||
|
||||
Shorthand letters can be used with single dashes on the command line.
|
||||
Boolean shorthand flags can be combined with other shorthand flags.
|
||||
|
||||
The default set of command-line flags is controlled by
|
||||
top-level functions. The FlagSet type allows one to define
|
||||
independent sets of flags, such as to implement subcommands
|
||||
in a command-line interface. The methods of FlagSet are
|
||||
analogous to the top-level functions for the command-line
|
||||
flag set.
|
||||
|
||||
## Setting no option default values for flags
|
||||
|
||||
After you create a flag it is possible to set the pflag.NoOptDefVal for
|
||||
the given flag. Doing this changes the meaning of the flag slightly. If
|
||||
a flag has a NoOptDefVal and the flag is set on the command line without
|
||||
an option the flag will be set to the NoOptDefVal. For example given:
|
||||
|
||||
``` go
|
||||
var ip = flag.IntP("flagname", "f", 1234, "help message")
|
||||
flag.Lookup("flagname").NoOptDefVal = "4321"
|
||||
```
|
||||
|
||||
Would result in something like
|
||||
|
||||
| Parsed Arguments | Resulting Value |
|
||||
| ------------- | ------------- |
|
||||
| --flagname=1357 | ip=1357 |
|
||||
| --flagname | ip=4321 |
|
||||
| [nothing] | ip=1234 |
|
||||
|
||||
## Command line flag syntax
|
||||
|
||||
```
|
||||
--flag // boolean flags, or flags with no option default values
|
||||
--flag x // only on flags without a default value
|
||||
--flag=x
|
||||
```
|
||||
|
||||
Unlike the flag package, a single dash before an option means something
|
||||
different than a double dash. Single dashes signify a series of shorthand
|
||||
letters for flags. All but the last shorthand letter must be boolean flags
|
||||
or a flag with a default value
|
||||
|
||||
```
|
||||
// boolean or flags where the 'no option default value' is set
|
||||
-f
|
||||
-f=true
|
||||
-abc
|
||||
but
|
||||
-b true is INVALID
|
||||
|
||||
// non-boolean and flags without a 'no option default value'
|
||||
-n 1234
|
||||
-n=1234
|
||||
-n1234
|
||||
|
||||
// mixed
|
||||
-abcs "hello"
|
||||
-absd="hello"
|
||||
-abcs1234
|
||||
```
|
||||
|
||||
Flag parsing stops after the terminator "--". Unlike the flag package,
|
||||
flags can be interspersed with arguments anywhere on the command line
|
||||
before this terminator.
|
||||
|
||||
Integer flags accept 1234, 0664, 0x1234 and may be negative.
|
||||
Boolean flags (in their long form) accept 1, 0, t, f, true, false,
|
||||
TRUE, FALSE, True, False.
|
||||
Duration flags accept any input valid for time.ParseDuration.
|
||||
|
||||
## Mutating or "Normalizing" Flag names
|
||||
|
||||
It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow.
|
||||
|
||||
**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag
|
||||
|
||||
``` go
|
||||
func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
|
||||
from := []string{"-", "_"}
|
||||
to := "."
|
||||
for _, sep := range from {
|
||||
name = strings.Replace(name, sep, to, -1)
|
||||
}
|
||||
return pflag.NormalizedName(name)
|
||||
}
|
||||
|
||||
myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc)
|
||||
```
|
||||
|
||||
**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name
|
||||
|
||||
``` go
|
||||
func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
|
||||
switch name {
|
||||
case "old-flag-name":
|
||||
name = "new-flag-name"
|
||||
break
|
||||
}
|
||||
return pflag.NormalizedName(name)
|
||||
}
|
||||
|
||||
myFlagSet.SetNormalizeFunc(aliasNormalizeFunc)
|
||||
```
|
||||
|
||||
## Deprecating a flag or its shorthand
|
||||
It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used.
|
||||
|
||||
**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead.
|
||||
```go
|
||||
// deprecate a flag by specifying its name and a usage message
|
||||
flags.MarkDeprecated("badflag", "please use --good-flag instead")
|
||||
```
|
||||
This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used.
|
||||
|
||||
**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n".
|
||||
```go
|
||||
// deprecate a flag shorthand by specifying its flag name and a usage message
|
||||
flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only")
|
||||
```
|
||||
This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used.
|
||||
|
||||
Note that usage message is essential here, and it should not be empty.
|
||||
|
||||
## Hidden flags
|
||||
It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text.
|
||||
|
||||
**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available.
|
||||
```go
|
||||
// hide a flag by specifying its name
|
||||
flags.MarkHidden("secretFlag")
|
||||
```
|
||||
|
||||
## Disable sorting of flags
|
||||
`pflag` allows you to disable sorting of flags for help and usage message.
|
||||
|
||||
**Example**:
|
||||
```go
|
||||
flags.BoolP("verbose", "v", false, "verbose output")
|
||||
flags.String("coolflag", "yeaah", "it's really cool flag")
|
||||
flags.Int("usefulflag", 777, "sometimes it's very useful")
|
||||
flags.SortFlags = false
|
||||
flags.PrintDefaults()
|
||||
```
|
||||
**Output**:
|
||||
```
|
||||
-v, --verbose verbose output
|
||||
--coolflag string it's really cool flag (default "yeaah")
|
||||
--usefulflag int sometimes it's very useful (default 777)
|
||||
```
|
||||
|
||||
|
||||
## Supporting Go flags when using pflag
|
||||
In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary
|
||||
to support flags defined by third-party dependencies (e.g. `golang/glog`).
|
||||
|
||||
**Example**: You want to add the Go flags to the `CommandLine` flagset
|
||||
```go
|
||||
import (
|
||||
goflag "flag"
|
||||
flag "github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
var ip *int = flag.Int("flagname", 1234, "help message for flagname")
|
||||
|
||||
func main() {
|
||||
flag.CommandLine.AddGoFlagSet(goflag.CommandLine)
|
||||
flag.Parse()
|
||||
}
|
||||
```
|
||||
|
||||
## More info
|
||||
|
||||
You can see the full reference documentation of the pflag package
|
||||
[at godoc.org][3], or through go's standard documentation system by
|
||||
running `godoc -http=:6060` and browsing to
|
||||
[http://localhost:6060/pkg/github.com/spf13/pflag][2] after
|
||||
installation.
|
||||
|
||||
[2]: http://localhost:6060/pkg/github.com/spf13/pflag
|
||||
[3]: http://godoc.org/github.com/spf13/pflag
|
104
vendor/github.com/spf13/pflag/bytes.go
generated
vendored
104
vendor/github.com/spf13/pflag/bytes.go
generated
vendored
|
@ -1,6 +1,7 @@
|
|||
package pflag
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
@ -9,10 +10,12 @@ import (
|
|||
// BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded
|
||||
type bytesHexValue []byte
|
||||
|
||||
// String implements pflag.Value.String.
|
||||
func (bytesHex bytesHexValue) String() string {
|
||||
return fmt.Sprintf("%X", []byte(bytesHex))
|
||||
}
|
||||
|
||||
// Set implements pflag.Value.Set.
|
||||
func (bytesHex *bytesHexValue) Set(value string) error {
|
||||
bin, err := hex.DecodeString(strings.TrimSpace(value))
|
||||
|
||||
|
@ -25,6 +28,7 @@ func (bytesHex *bytesHexValue) Set(value string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Type implements pflag.Value.Type.
|
||||
func (*bytesHexValue) Type() string {
|
||||
return "bytesHex"
|
||||
}
|
||||
|
@ -103,3 +107,103 @@ func BytesHex(name string, value []byte, usage string) *[]byte {
|
|||
func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
|
||||
return CommandLine.BytesHexP(name, shorthand, value, usage)
|
||||
}
|
||||
|
||||
// BytesBase64 adapts []byte for use as a flag. Value of flag is Base64 encoded
|
||||
type bytesBase64Value []byte
|
||||
|
||||
// String implements pflag.Value.String.
|
||||
func (bytesBase64 bytesBase64Value) String() string {
|
||||
return base64.StdEncoding.EncodeToString([]byte(bytesBase64))
|
||||
}
|
||||
|
||||
// Set implements pflag.Value.Set.
|
||||
func (bytesBase64 *bytesBase64Value) Set(value string) error {
|
||||
bin, err := base64.StdEncoding.DecodeString(strings.TrimSpace(value))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*bytesBase64 = bin
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type implements pflag.Value.Type.
|
||||
func (*bytesBase64Value) Type() string {
|
||||
return "bytesBase64"
|
||||
}
|
||||
|
||||
func newBytesBase64Value(val []byte, p *[]byte) *bytesBase64Value {
|
||||
*p = val
|
||||
return (*bytesBase64Value)(p)
|
||||
}
|
||||
|
||||
func bytesBase64ValueConv(sval string) (interface{}, error) {
|
||||
|
||||
bin, err := base64.StdEncoding.DecodeString(sval)
|
||||
if err == nil {
|
||||
return bin, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
|
||||
}
|
||||
|
||||
// GetBytesBase64 return the []byte value of a flag with the given name
|
||||
func (f *FlagSet) GetBytesBase64(name string) ([]byte, error) {
|
||||
val, err := f.getFlagType(name, "bytesBase64", bytesBase64ValueConv)
|
||||
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
return val.([]byte), nil
|
||||
}
|
||||
|
||||
// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
|
||||
// The argument p points to an []byte variable in which to store the value of the flag.
|
||||
func (f *FlagSet) BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
|
||||
f.VarP(newBytesBase64Value(value, p), name, "", usage)
|
||||
}
|
||||
|
||||
// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
|
||||
func (f *FlagSet) BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
|
||||
f.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
|
||||
}
|
||||
|
||||
// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
|
||||
// The argument p points to an []byte variable in which to store the value of the flag.
|
||||
func BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
|
||||
CommandLine.VarP(newBytesBase64Value(value, p), name, "", usage)
|
||||
}
|
||||
|
||||
// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
|
||||
func BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
|
||||
CommandLine.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
|
||||
}
|
||||
|
||||
// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
|
||||
// The return value is the address of an []byte variable that stores the value of the flag.
|
||||
func (f *FlagSet) BytesBase64(name string, value []byte, usage string) *[]byte {
|
||||
p := new([]byte)
|
||||
f.BytesBase64VarP(p, name, "", value, usage)
|
||||
return p
|
||||
}
|
||||
|
||||
// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
|
||||
func (f *FlagSet) BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
|
||||
p := new([]byte)
|
||||
f.BytesBase64VarP(p, name, shorthand, value, usage)
|
||||
return p
|
||||
}
|
||||
|
||||
// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
|
||||
// The return value is the address of an []byte variable that stores the value of the flag.
|
||||
func BytesBase64(name string, value []byte, usage string) *[]byte {
|
||||
return CommandLine.BytesBase64P(name, "", value, usage)
|
||||
}
|
||||
|
||||
// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
|
||||
func BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
|
||||
return CommandLine.BytesBase64P(name, shorthand, value, usage)
|
||||
}
|
||||
|
|
8
vendor/github.com/spf13/pflag/flag.go
generated
vendored
8
vendor/github.com/spf13/pflag/flag.go
generated
vendored
|
@ -925,13 +925,16 @@ func stripUnknownFlagValue(args []string) []string {
|
|||
}
|
||||
|
||||
first := args[0]
|
||||
if first[0] == '-' {
|
||||
if len(first) > 0 && first[0] == '-' {
|
||||
//--unknown --next-flag ...
|
||||
return args
|
||||
}
|
||||
|
||||
//--unknown arg ... (args will be arg ...)
|
||||
if len(args) > 1 {
|
||||
return args[1:]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) {
|
||||
|
@ -990,11 +993,12 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin
|
|||
}
|
||||
|
||||
func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) {
|
||||
outArgs = args
|
||||
|
||||
if strings.HasPrefix(shorthands, "test.") {
|
||||
return
|
||||
}
|
||||
|
||||
outArgs = args
|
||||
outShorts = shorthands[1:]
|
||||
c := shorthands[0]
|
||||
|
||||
|
|
149
vendor/github.com/spf13/pflag/string_to_int.go
generated
vendored
Normal file
149
vendor/github.com/spf13/pflag/string_to_int.go
generated
vendored
Normal file
|
@ -0,0 +1,149 @@
|
|||
package pflag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// -- stringToInt Value
|
||||
type stringToIntValue struct {
|
||||
value *map[string]int
|
||||
changed bool
|
||||
}
|
||||
|
||||
func newStringToIntValue(val map[string]int, p *map[string]int) *stringToIntValue {
|
||||
ssv := new(stringToIntValue)
|
||||
ssv.value = p
|
||||
*ssv.value = val
|
||||
return ssv
|
||||
}
|
||||
|
||||
// Format: a=1,b=2
|
||||
func (s *stringToIntValue) Set(val string) error {
|
||||
ss := strings.Split(val, ",")
|
||||
out := make(map[string]int, len(ss))
|
||||
for _, pair := range ss {
|
||||
kv := strings.SplitN(pair, "=", 2)
|
||||
if len(kv) != 2 {
|
||||
return fmt.Errorf("%s must be formatted as key=value", pair)
|
||||
}
|
||||
var err error
|
||||
out[kv[0]], err = strconv.Atoi(kv[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !s.changed {
|
||||
*s.value = out
|
||||
} else {
|
||||
for k, v := range out {
|
||||
(*s.value)[k] = v
|
||||
}
|
||||
}
|
||||
s.changed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stringToIntValue) Type() string {
|
||||
return "stringToInt"
|
||||
}
|
||||
|
||||
func (s *stringToIntValue) String() string {
|
||||
var buf bytes.Buffer
|
||||
i := 0
|
||||
for k, v := range *s.value {
|
||||
if i > 0 {
|
||||
buf.WriteRune(',')
|
||||
}
|
||||
buf.WriteString(k)
|
||||
buf.WriteRune('=')
|
||||
buf.WriteString(strconv.Itoa(v))
|
||||
i++
|
||||
}
|
||||
return "[" + buf.String() + "]"
|
||||
}
|
||||
|
||||
func stringToIntConv(val string) (interface{}, error) {
|
||||
val = strings.Trim(val, "[]")
|
||||
// An empty string would cause an empty map
|
||||
if len(val) == 0 {
|
||||
return map[string]int{}, nil
|
||||
}
|
||||
ss := strings.Split(val, ",")
|
||||
out := make(map[string]int, len(ss))
|
||||
for _, pair := range ss {
|
||||
kv := strings.SplitN(pair, "=", 2)
|
||||
if len(kv) != 2 {
|
||||
return nil, fmt.Errorf("%s must be formatted as key=value", pair)
|
||||
}
|
||||
var err error
|
||||
out[kv[0]], err = strconv.Atoi(kv[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// GetStringToInt return the map[string]int value of a flag with the given name
|
||||
func (f *FlagSet) GetStringToInt(name string) (map[string]int, error) {
|
||||
val, err := f.getFlagType(name, "stringToInt", stringToIntConv)
|
||||
if err != nil {
|
||||
return map[string]int{}, err
|
||||
}
|
||||
return val.(map[string]int), nil
|
||||
}
|
||||
|
||||
// StringToIntVar defines a string flag with specified name, default value, and usage string.
|
||||
// The argument p points to a map[string]int variable in which to store the values of the multiple flags.
|
||||
// The value of each argument will not try to be separated by comma
|
||||
func (f *FlagSet) StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
|
||||
f.VarP(newStringToIntValue(value, p), name, "", usage)
|
||||
}
|
||||
|
||||
// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
|
||||
func (f *FlagSet) StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
|
||||
f.VarP(newStringToIntValue(value, p), name, shorthand, usage)
|
||||
}
|
||||
|
||||
// StringToIntVar defines a string flag with specified name, default value, and usage string.
|
||||
// The argument p points to a map[string]int variable in which to store the value of the flag.
|
||||
// The value of each argument will not try to be separated by comma
|
||||
func StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
|
||||
CommandLine.VarP(newStringToIntValue(value, p), name, "", usage)
|
||||
}
|
||||
|
||||
// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
|
||||
func StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
|
||||
CommandLine.VarP(newStringToIntValue(value, p), name, shorthand, usage)
|
||||
}
|
||||
|
||||
// StringToInt defines a string flag with specified name, default value, and usage string.
|
||||
// The return value is the address of a map[string]int variable that stores the value of the flag.
|
||||
// The value of each argument will not try to be separated by comma
|
||||
func (f *FlagSet) StringToInt(name string, value map[string]int, usage string) *map[string]int {
|
||||
p := map[string]int{}
|
||||
f.StringToIntVarP(&p, name, "", value, usage)
|
||||
return &p
|
||||
}
|
||||
|
||||
// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
|
||||
func (f *FlagSet) StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
|
||||
p := map[string]int{}
|
||||
f.StringToIntVarP(&p, name, shorthand, value, usage)
|
||||
return &p
|
||||
}
|
||||
|
||||
// StringToInt defines a string flag with specified name, default value, and usage string.
|
||||
// The return value is the address of a map[string]int variable that stores the value of the flag.
|
||||
// The value of each argument will not try to be separated by comma
|
||||
func StringToInt(name string, value map[string]int, usage string) *map[string]int {
|
||||
return CommandLine.StringToIntP(name, "", value, usage)
|
||||
}
|
||||
|
||||
// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
|
||||
func StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
|
||||
return CommandLine.StringToIntP(name, shorthand, value, usage)
|
||||
}
|
160
vendor/github.com/spf13/pflag/string_to_string.go
generated
vendored
Normal file
160
vendor/github.com/spf13/pflag/string_to_string.go
generated
vendored
Normal file
|
@ -0,0 +1,160 @@
|
|||
package pflag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// -- stringToString Value
|
||||
type stringToStringValue struct {
|
||||
value *map[string]string
|
||||
changed bool
|
||||
}
|
||||
|
||||
func newStringToStringValue(val map[string]string, p *map[string]string) *stringToStringValue {
|
||||
ssv := new(stringToStringValue)
|
||||
ssv.value = p
|
||||
*ssv.value = val
|
||||
return ssv
|
||||
}
|
||||
|
||||
// Format: a=1,b=2
|
||||
func (s *stringToStringValue) Set(val string) error {
|
||||
var ss []string
|
||||
n := strings.Count(val, "=")
|
||||
switch n {
|
||||
case 0:
|
||||
return fmt.Errorf("%s must be formatted as key=value", val)
|
||||
case 1:
|
||||
ss = append(ss, strings.Trim(val, `"`))
|
||||
default:
|
||||
r := csv.NewReader(strings.NewReader(val))
|
||||
var err error
|
||||
ss, err = r.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
out := make(map[string]string, len(ss))
|
||||
for _, pair := range ss {
|
||||
kv := strings.SplitN(pair, "=", 2)
|
||||
if len(kv) != 2 {
|
||||
return fmt.Errorf("%s must be formatted as key=value", pair)
|
||||
}
|
||||
out[kv[0]] = kv[1]
|
||||
}
|
||||
if !s.changed {
|
||||
*s.value = out
|
||||
} else {
|
||||
for k, v := range out {
|
||||
(*s.value)[k] = v
|
||||
}
|
||||
}
|
||||
s.changed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stringToStringValue) Type() string {
|
||||
return "stringToString"
|
||||
}
|
||||
|
||||
func (s *stringToStringValue) String() string {
|
||||
records := make([]string, 0, len(*s.value)>>1)
|
||||
for k, v := range *s.value {
|
||||
records = append(records, k+"="+v)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
w := csv.NewWriter(&buf)
|
||||
if err := w.Write(records); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
w.Flush()
|
||||
return "[" + strings.TrimSpace(buf.String()) + "]"
|
||||
}
|
||||
|
||||
func stringToStringConv(val string) (interface{}, error) {
|
||||
val = strings.Trim(val, "[]")
|
||||
// An empty string would cause an empty map
|
||||
if len(val) == 0 {
|
||||
return map[string]string{}, nil
|
||||
}
|
||||
r := csv.NewReader(strings.NewReader(val))
|
||||
ss, err := r.Read()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := make(map[string]string, len(ss))
|
||||
for _, pair := range ss {
|
||||
kv := strings.SplitN(pair, "=", 2)
|
||||
if len(kv) != 2 {
|
||||
return nil, fmt.Errorf("%s must be formatted as key=value", pair)
|
||||
}
|
||||
out[kv[0]] = kv[1]
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// GetStringToString return the map[string]string value of a flag with the given name
|
||||
func (f *FlagSet) GetStringToString(name string) (map[string]string, error) {
|
||||
val, err := f.getFlagType(name, "stringToString", stringToStringConv)
|
||||
if err != nil {
|
||||
return map[string]string{}, err
|
||||
}
|
||||
return val.(map[string]string), nil
|
||||
}
|
||||
|
||||
// StringToStringVar defines a string flag with specified name, default value, and usage string.
|
||||
// The argument p points to a map[string]string variable in which to store the values of the multiple flags.
|
||||
// The value of each argument will not try to be separated by comma
|
||||
func (f *FlagSet) StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
|
||||
f.VarP(newStringToStringValue(value, p), name, "", usage)
|
||||
}
|
||||
|
||||
// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
|
||||
func (f *FlagSet) StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
|
||||
f.VarP(newStringToStringValue(value, p), name, shorthand, usage)
|
||||
}
|
||||
|
||||
// StringToStringVar defines a string flag with specified name, default value, and usage string.
|
||||
// The argument p points to a map[string]string variable in which to store the value of the flag.
|
||||
// The value of each argument will not try to be separated by comma
|
||||
func StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
|
||||
CommandLine.VarP(newStringToStringValue(value, p), name, "", usage)
|
||||
}
|
||||
|
||||
// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
|
||||
func StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
|
||||
CommandLine.VarP(newStringToStringValue(value, p), name, shorthand, usage)
|
||||
}
|
||||
|
||||
// StringToString defines a string flag with specified name, default value, and usage string.
|
||||
// The return value is the address of a map[string]string variable that stores the value of the flag.
|
||||
// The value of each argument will not try to be separated by comma
|
||||
func (f *FlagSet) StringToString(name string, value map[string]string, usage string) *map[string]string {
|
||||
p := map[string]string{}
|
||||
f.StringToStringVarP(&p, name, "", value, usage)
|
||||
return &p
|
||||
}
|
||||
|
||||
// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
|
||||
func (f *FlagSet) StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
|
||||
p := map[string]string{}
|
||||
f.StringToStringVarP(&p, name, shorthand, value, usage)
|
||||
return &p
|
||||
}
|
||||
|
||||
// StringToString defines a string flag with specified name, default value, and usage string.
|
||||
// The return value is the address of a map[string]string variable that stores the value of the flag.
|
||||
// The value of each argument will not try to be separated by comma
|
||||
func StringToString(name string, value map[string]string, usage string) *map[string]string {
|
||||
return CommandLine.StringToStringP(name, "", value, usage)
|
||||
}
|
||||
|
||||
// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
|
||||
func StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
|
||||
return CommandLine.StringToStringP(name, shorthand, value, usage)
|
||||
}
|
1
vendor/github.com/tdewolff/minify/.gitattributes
generated
vendored
1
vendor/github.com/tdewolff/minify/.gitattributes
generated
vendored
|
@ -1 +0,0 @@
|
|||
benchmarks/sample_* linguist-generated=true
|
4
vendor/github.com/tdewolff/minify/.gitignore
generated
vendored
4
vendor/github.com/tdewolff/minify/.gitignore
generated
vendored
|
@ -1,4 +0,0 @@
|
|||
dist/
|
||||
benchmarks/*
|
||||
!benchmarks/*.go
|
||||
!benchmarks/sample_*
|
26
vendor/github.com/tdewolff/minify/.goreleaser.yml
generated
vendored
26
vendor/github.com/tdewolff/minify/.goreleaser.yml
generated
vendored
|
@ -1,26 +0,0 @@
|
|||
builds:
|
||||
- binary: minify
|
||||
main: ./cmd/minify/
|
||||
ldflags: -s -w -X main.Version={{.Version}} -X main.Commit={{.Commit}} -X main.Date={{.Date}}
|
||||
goos:
|
||||
- windows
|
||||
- linux
|
||||
- darwin
|
||||
goarch:
|
||||
- amd64
|
||||
- 386
|
||||
- arm
|
||||
- arm64
|
||||
archive:
|
||||
format: tar.gz
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
format: zip
|
||||
name_template: "{{.Binary}}_{{.Version}}_{{.Os}}_{{.Arch}}"
|
||||
files:
|
||||
- README.md
|
||||
- LICENSE.md
|
||||
snapshot:
|
||||
name_template: "devel"
|
||||
release:
|
||||
draft: true
|
5
vendor/github.com/tdewolff/minify/.travis.yml
generated
vendored
5
vendor/github.com/tdewolff/minify/.travis.yml
generated
vendored
|
@ -1,5 +0,0 @@
|
|||
language: go
|
||||
before_install:
|
||||
- go get github.com/mattn/goveralls
|
||||
script:
|
||||
- goveralls -v -service travis-ci -repotoken $COVERALLS_TOKEN -ignore=cmd/minify/* || go test -v ./...
|
591
vendor/github.com/tdewolff/minify/README.md
generated
vendored
591
vendor/github.com/tdewolff/minify/README.md
generated
vendored
|
@ -1,591 +0,0 @@
|
|||
# Minify <a name="minify"></a> [![Build Status](https://travis-ci.org/tdewolff/minify.svg?branch=master)](https://travis-ci.org/tdewolff/minify) [![GoDoc](http://godoc.org/github.com/tdewolff/minify?status.svg)](http://godoc.org/github.com/tdewolff/minify) [![Coverage Status](https://coveralls.io/repos/github/tdewolff/minify/badge.svg?branch=master)](https://coveralls.io/github/tdewolff/minify?branch=master) [![Join the chat at https://gitter.im/tdewolff/minify](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/tdewolff/minify?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
|
||||
**[Online demo](http://go.tacodewolff.nl/minify) if you need to minify files *now*.**
|
||||
|
||||
**[Command line tool](https://github.com/tdewolff/minify/tree/master/cmd/minify) that minifies concurrently and supports watching file changes.**
|
||||
|
||||
**[All releases](https://github.com/tdewolff/minify/releases) for various platforms.**
|
||||
|
||||
---
|
||||
|
||||
Minify is a minifier package written in [Go][1]. It provides HTML5, CSS3, JS, JSON, SVG and XML minifiers and an interface to implement any other minifier. Minification is the process of removing bytes from a file (such as whitespace) without changing its output and therefore shrinking its size and speeding up transmission over the internet and possibly parsing. The implemented minifiers are designed for high performance.
|
||||
|
||||
The core functionality associates mimetypes with minification functions, allowing embedded resources (like CSS or JS within HTML files) to be minified as well. Users can add new implementations that are triggered based on a mimetype (or pattern), or redirect to an external command (like ClosureCompiler, UglifyCSS, ...).
|
||||
|
||||
#### Table of Contents
|
||||
|
||||
- [Minify](#minify)
|
||||
- [Prologue](#prologue)
|
||||
- [Installation](#installation)
|
||||
- [API stability](#api-stability)
|
||||
- [Testing](#testing)
|
||||
- [Performance](#performance)
|
||||
- [HTML](#html)
|
||||
- [Whitespace removal](#whitespace-removal)
|
||||
- [CSS](#css)
|
||||
- [JS](#js)
|
||||
- [JSON](#json)
|
||||
- [SVG](#svg)
|
||||
- [XML](#xml)
|
||||
- [Usage](#usage)
|
||||
- [New](#new)
|
||||
- [From reader](#from-reader)
|
||||
- [From bytes](#from-bytes)
|
||||
- [From string](#from-string)
|
||||
- [To reader](#to-reader)
|
||||
- [To writer](#to-writer)
|
||||
- [Middleware](#middleware)
|
||||
- [Custom minifier](#custom-minifier)
|
||||
- [Mediatypes](#mediatypes)
|
||||
- [Examples](#examples)
|
||||
- [Common minifiers](#common-minifiers)
|
||||
- [Custom minifier](#custom-minifier-example)
|
||||
- [ResponseWriter](#responsewriter)
|
||||
- [Templates](#templates)
|
||||
- [License](#license)
|
||||
|
||||
### Status
|
||||
|
||||
* CSS: **fully implemented**
|
||||
* HTML: **fully implemented**
|
||||
* JS: improved JSmin implementation
|
||||
* JSON: **fully implemented**
|
||||
* SVG: partially implemented; in development
|
||||
* XML: **fully implemented**
|
||||
|
||||
### Roadmap
|
||||
|
||||
- [ ] General speed-up of all minifiers (use ASM for whitespace funcs)
|
||||
- [ ] Improve JS minifiers by shortening variables and proper semicolon omission
|
||||
- [ ] Speed-up SVG minifier, it is very slow
|
||||
- [ ] Proper parser error reporting and line number + column information
|
||||
- [ ] Generation of source maps (uncertain, might slow down parsers too much if it cannot run separately nicely)
|
||||
- [ ] Look into compression of images, fonts and other web resources (into package `compress`?)
|
||||
- [ ] Create a cmd to pack webfiles (much like webpack), ie. merging CSS and JS files, inlining small external files, minification and gzipping. This would work on HTML files.
|
||||
- [ ] Create a package to format files, much like `gofmt` for Go files
|
||||
|
||||
## Prologue
|
||||
Minifiers or bindings to minifiers exist in almost all programming languages. Some implementations are merely using several regular-expressions to trim whitespace and comments (even though regex for parsing HTML/XML is ill-advised, for a good read see [Regular Expressions: Now You Have Two Problems](http://blog.codinghorror.com/regular-expressions-now-you-have-two-problems/)). Some implementations are much more profound, such as the [YUI Compressor](http://yui.github.io/yuicompressor/) and [Google Closure Compiler](https://github.com/google/closure-compiler) for JS. As most existing implementations either use Java or JavaScript and don't focus on performance, they are pretty slow. Additionally, loading the whole file into memory at once is bad for really large files (or impossible for streams).
|
||||
|
||||
This minifier proves to be that fast and extensive minifier that can handle HTML and any other filetype it may contain (CSS, JS, ...). It streams the input and output and can minify files concurrently.
|
||||
|
||||
## Installation
|
||||
Run the following command
|
||||
|
||||
go get github.com/tdewolff/minify
|
||||
|
||||
or add the following imports and run the project with `go get`
|
||||
``` go
|
||||
import (
|
||||
"github.com/tdewolff/minify"
|
||||
"github.com/tdewolff/minify/css"
|
||||
"github.com/tdewolff/minify/html"
|
||||
"github.com/tdewolff/minify/js"
|
||||
"github.com/tdewolff/minify/json"
|
||||
"github.com/tdewolff/minify/svg"
|
||||
"github.com/tdewolff/minify/xml"
|
||||
)
|
||||
```
|
||||
|
||||
## API stability
|
||||
There is no guarantee for absolute stability, but I take issues and bugs seriously and don't take API changes lightly. The library will be maintained in a compatible way unless vital bugs prevent me from doing so. There has been one API change after v1 which added options support and I took the opportunity to push through some more API clean up as well. There are no plans whatsoever for future API changes.
|
||||
|
||||
## Testing
|
||||
For all subpackages and the imported `parse` and `buffer` packages, test coverage of 100% is pursued. Besides full coverage, the minifiers are [fuzz tested](https://github.com/tdewolff/fuzz) using [github.com/dvyukov/go-fuzz](http://www.github.com/dvyukov/go-fuzz), see [the wiki](https://github.com/tdewolff/minify/wiki) for the most important bugs found by fuzz testing. Furthermore am I working on adding visual testing to ensure that minification doesn't change anything visually. By using the WebKit browser to render the original and minified pages we can check whether any pixel is different.
|
||||
|
||||
These tests ensure that everything works as intended, the code does not crash (whatever the input) and that it doesn't change the final result visually. If you still encounter a bug, please report [here](https://github.com/tdewolff/minify/issues)!
|
||||
|
||||
## Performance
|
||||
The benchmarks directory contains a number of standardized samples used to compare performance between changes. To give an indication of the speed of this library, I've ran the tests on my Thinkpad T460 (i5-6300U quad-core 2.4GHz running Arch Linux) using Go 1.9.2.
|
||||
|
||||
```
|
||||
name time/op
|
||||
CSS/sample_bootstrap.css-4 2.26ms ± 0%
|
||||
CSS/sample_gumby.css-4 2.92ms ± 1%
|
||||
HTML/sample_amazon.html-4 2.33ms ± 2%
|
||||
HTML/sample_bbc.html-4 1.02ms ± 1%
|
||||
HTML/sample_blogpost.html-4 171µs ± 2%
|
||||
HTML/sample_es6.html-4 14.5ms ± 0%
|
||||
HTML/sample_stackoverflow.html-4 2.41ms ± 1%
|
||||
HTML/sample_wikipedia.html-4 4.76ms ± 0%
|
||||
JS/sample_ace.js-4 7.41ms ± 0%
|
||||
JS/sample_dot.js-4 63.7µs ± 0%
|
||||
JS/sample_jquery.js-4 2.99ms ± 0%
|
||||
JS/sample_jqueryui.js-4 5.92ms ± 2%
|
||||
JS/sample_moment.js-4 1.09ms ± 1%
|
||||
JSON/sample_large.json-4 2.95ms ± 0%
|
||||
JSON/sample_testsuite.json-4 1.51ms ± 1%
|
||||
JSON/sample_twitter.json-4 6.75µs ± 1%
|
||||
SVG/sample_arctic.svg-4 62.3ms ± 1%
|
||||
SVG/sample_gopher.svg-4 218µs ± 0%
|
||||
SVG/sample_usa.svg-4 33.1ms ± 3%
|
||||
XML/sample_books.xml-4 36.2µs ± 0%
|
||||
XML/sample_catalog.xml-4 14.9µs ± 0%
|
||||
XML/sample_omg.xml-4 6.31ms ± 1%
|
||||
|
||||
name speed
|
||||
CSS/sample_bootstrap.css-4 60.8MB/s ± 0%
|
||||
CSS/sample_gumby.css-4 63.9MB/s ± 1%
|
||||
HTML/sample_amazon.html-4 203MB/s ± 2%
|
||||
HTML/sample_bbc.html-4 113MB/s ± 1%
|
||||
HTML/sample_blogpost.html-4 123MB/s ± 2%
|
||||
HTML/sample_es6.html-4 70.7MB/s ± 0%
|
||||
HTML/sample_stackoverflow.html-4 85.2MB/s ± 1%
|
||||
HTML/sample_wikipedia.html-4 93.6MB/s ± 0%
|
||||
JS/sample_ace.js-4 86.9MB/s ± 0%
|
||||
JS/sample_dot.js-4 81.0MB/s ± 0%
|
||||
JS/sample_jquery.js-4 82.8MB/s ± 0%
|
||||
JS/sample_jqueryui.js-4 79.3MB/s ± 2%
|
||||
JS/sample_moment.js-4 91.2MB/s ± 1%
|
||||
JSON/sample_large.json-4 258MB/s ± 0%
|
||||
JSON/sample_testsuite.json-4 457MB/s ± 1%
|
||||
JSON/sample_twitter.json-4 226MB/s ± 1%
|
||||
SVG/sample_arctic.svg-4 23.6MB/s ± 1%
|
||||
SVG/sample_gopher.svg-4 26.7MB/s ± 0%
|
||||
SVG/sample_usa.svg-4 30.9MB/s ± 3%
|
||||
XML/sample_books.xml-4 122MB/s ± 0%
|
||||
XML/sample_catalog.xml-4 130MB/s ± 0%
|
||||
XML/sample_omg.xml-4 180MB/s ± 1%
|
||||
```
|
||||
|
||||
## HTML
|
||||
|
||||
HTML (with JS and CSS) minification typically shaves off about 10%.
|
||||
|
||||
The HTML5 minifier uses these minifications:
|
||||
|
||||
- strip unnecessary whitespace and otherwise collapse it to one space (or newline if it originally contained a newline)
|
||||
- strip superfluous quotes, or uses single/double quotes whichever requires fewer escapes
|
||||
- strip default attribute values and attribute boolean values
|
||||
- strip some empty attributes
|
||||
- strip unrequired tags (`html`, `head`, `body`, ...)
|
||||
- strip unrequired end tags (`tr`, `td`, `li`, ... and often `p`)
|
||||
- strip default protocols (`http:`, `https:` and `javascript:`)
|
||||
- strip all comments (including conditional comments, old IE versions are not supported anymore by Microsoft)
|
||||
- shorten `doctype` and `meta` charset
|
||||
- lowercase tags, attributes and some values to enhance gzip compression
|
||||
|
||||
Options:
|
||||
|
||||
- `KeepConditionalComments` preserve all IE conditional comments such as `<!--[if IE 6]><![endif]-->` and `<![if IE 6]><![endif]>`, see https://msdn.microsoft.com/en-us/library/ms537512(v=vs.85).aspx#syntax
|
||||
- `KeepDefaultAttrVals` preserve default attribute values such as `<script type="text/javascript">`
|
||||
- `KeepDocumentTags` preserve `html`, `head` and `body` tags
|
||||
- `KeepEndTags` preserve all end tags
|
||||
- `KeepWhitespace` preserve whitespace between inline tags but still collapse multiple whitespace characters into one
|
||||
|
||||
After recent benchmarking and profiling it became really fast and minifies pages in the 10ms range, making it viable for on-the-fly minification.
|
||||
|
||||
However, be careful when doing on-the-fly minification. Minification typically trims off 10% and does this at worst around about 20MB/s. This means users have to download slower than 2MB/s to make on-the-fly minification worthwhile. This may or may not apply in your situation. Rather use caching!
|
||||
|
||||
### Whitespace removal
|
||||
The whitespace removal mechanism collapses all sequences of whitespace (spaces, newlines, tabs) to a single space. If the sequence contained a newline or carriage return it will collapse into a newline character instead. It trims all text parts (in between tags) depending on whether it was preceded by a space from a previous piece of text and whether it is followed up by a block element or an inline element. In the former case we can omit spaces while for inline elements whitespace has significance.
|
||||
|
||||
Make sure your HTML doesn't depend on whitespace between `block` elements that have been changed to `inline` or `inline-block` elements using CSS. Your layout *should not* depend on those whitespaces as the minifier will remove them. An example is a menu consisting of multiple `<li>` that have `display:inline-block` applied and have whitespace in between them. It is bad practise to rely on whitespace for element positioning anyways!
|
||||
|
||||
## CSS
|
||||
|
||||
Minification typically shaves off about 10%-15%.
|
||||
|
||||
The CSS minifier will only use safe minifications:
|
||||
|
||||
- remove comments and unnecessary whitespace
|
||||
- remove trailing semicolons
|
||||
- optimize `margin`, `padding` and `border-width` number of sides
|
||||
- shorten numbers by removing unnecessary `+` and zeros and rewriting with/without exponent
|
||||
- remove dimension and percentage for zero values
|
||||
- remove quotes for URLs
|
||||
- remove quotes for font families and make lowercase
|
||||
- rewrite hex colors to/from color names, or to 3 digit hex
|
||||
- rewrite `rgb(`, `rgba(`, `hsl(` and `hsla(` colors to hex or name
|
||||
- replace `normal` and `bold` by numbers for `font-weight` and `font`
|
||||
- replace `none` → `0` for `border`, `background` and `outline`
|
||||
- lowercase all identifiers except classes, IDs and URLs to enhance gzip compression
|
||||
- shorten MS alpha function
|
||||
- rewrite data URIs with base64 or ASCII whichever is shorter
|
||||
- calls minifier for data URI mediatypes, thus you can compress embedded SVG files if you have that minifier attached
|
||||
|
||||
It does purposely not use the following techniques:
|
||||
|
||||
- (partially) merge rulesets
|
||||
- (partially) split rulesets
|
||||
- collapse multiple declarations when main declaration is defined within a ruleset (don't put `font-weight` within an already existing `font`, too complex)
|
||||
- remove overwritten properties in ruleset (this not always overwrites it, for example with `!important`)
|
||||
- rewrite properties into one ruleset if possible (like `margin-top`, `margin-right`, `margin-bottom` and `margin-left` → `margin`)
|
||||
- put nested ID selector at the front (`body > div#elem p` → `#elem p`)
|
||||
- rewrite attribute selectors for IDs and classes (`div[id=a]` → `div#a`)
|
||||
- put space after pseudo-selectors (IE6 is old, move on!)
|
||||
|
||||
It's great that so many other tools make comparison tables: [CSS Minifier Comparison](http://www.codenothing.com/benchmarks/css-compressor-3.0/full.html), [CSS minifiers comparison](http://www.phpied.com/css-minifiers-comparison/) and [CleanCSS tests](http://goalsmashers.github.io/css-minification-benchmark/). From the last link, this CSS minifier is almost without doubt the fastest and has near-perfect minification rates. It falls short with the purposely not implemented and often unsafe techniques, so that's fine.
|
||||
|
||||
Options:
|
||||
|
||||
- `Decimals` number of decimals to preserve for numbers, `-1` means no trimming
|
||||
|
||||
## JS
|
||||
|
||||
The JS minifier is pretty basic. It removes comments, whitespace and line breaks whenever it can. It employs all the rules that [JSMin](http://www.crockford.com/javascript/jsmin.html) does too, but has additional improvements. For example the prefix-postfix bug is fixed.
|
||||
|
||||
Common speeds of PHP and JS implementations are about 100-300kB/s (see [Uglify2](http://lisperator.net/uglifyjs/), [Adventures in PHP web asset minimization](https://www.happyassassin.net/2014/12/29/adventures-in-php-web-asset-minimization/)). This implementation or orders of magnitude faster, around ~50MB/s.
|
||||
|
||||
TODO:
|
||||
- shorten local variables / function parameters names
|
||||
- precise semicolon and newline omission
|
||||
|
||||
## JSON
|
||||
|
||||
Minification typically shaves off about 15% of filesize for common indented JSON such as generated by [JSON Generator](http://www.json-generator.com/).
|
||||
|
||||
The JSON minifier only removes whitespace, which is the only thing that can be left out.
|
||||
|
||||
## SVG
|
||||
|
||||
The SVG minifier uses these minifications:
|
||||
|
||||
- trim and collapse whitespace between all tags
|
||||
- strip comments, empty `doctype`, XML prelude, `metadata`
|
||||
- strip SVG version
|
||||
- strip CDATA sections wherever possible
|
||||
- collapse tags with no content to a void tag
|
||||
- collapse empty container tags (`g`, `svg`, ...)
|
||||
- minify style tag and attributes with the CSS minifier
|
||||
- minify colors
|
||||
- shorten lengths and numbers and remove default `px` unit
|
||||
- shorten `path` data
|
||||
- convert `rect`, `line`, `polygon`, `polyline` to `path`
|
||||
- use relative or absolute positions in path data whichever is shorter
|
||||
|
||||
TODO:
|
||||
- convert attributes to style attribute whenever shorter
|
||||
- merge path data? (same style and no intersection -- the latter is difficult)
|
||||
|
||||
Options:
|
||||
|
||||
- `Decimals` number of decimals to preserve for numbers, `-1` means no trimming
|
||||
|
||||
## XML
|
||||
|
||||
The XML minifier uses these minifications:
|
||||
|
||||
- strip unnecessary whitespace and otherwise collapse it to one space (or newline if it originally contained a newline)
|
||||
- strip comments
|
||||
- collapse tags with no content to a void tag
|
||||
- strip CDATA sections wherever possible
|
||||
|
||||
Options:
|
||||
|
||||
- `KeepWhitespace` preserve whitespace between inline tags but still collapse multiple whitespace characters into one
|
||||
|
||||
## Usage
|
||||
Any input stream is being buffered by the minification functions. This is how the underlying buffer package inherently works to ensure high performance. The output stream however is not buffered. It is wise to preallocate a buffer as big as the input to which the output is written, or otherwise use `bufio` to buffer to a streaming writer.
|
||||
|
||||
### New
|
||||
Retrieve a minifier struct which holds a map of mediatype → minifier functions.
|
||||
``` go
|
||||
m := minify.New()
|
||||
```
|
||||
|
||||
The following loads all provided minifiers.
|
||||
``` go
|
||||
m := minify.New()
|
||||
m.AddFunc("text/css", css.Minify)
|
||||
m.AddFunc("text/html", html.Minify)
|
||||
m.AddFunc("text/javascript", js.Minify)
|
||||
m.AddFunc("image/svg+xml", svg.Minify)
|
||||
m.AddFuncRegexp(regexp.MustCompile("[/+]json$"), json.Minify)
|
||||
m.AddFuncRegexp(regexp.MustCompile("[/+]xml$"), xml.Minify)
|
||||
```
|
||||
|
||||
You can set options to several minifiers.
|
||||
``` go
|
||||
m.Add("text/html", &html.Minifier{
|
||||
KeepDefaultAttrVals: true,
|
||||
KeepWhitespace: true,
|
||||
})
|
||||
```
|
||||
|
||||
### From reader
|
||||
Minify from an `io.Reader` to an `io.Writer` for a specific mediatype.
|
||||
``` go
|
||||
if err := m.Minify(mediatype, w, r); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
```
|
||||
|
||||
### From bytes
|
||||
Minify from and to a `[]byte` for a specific mediatype.
|
||||
``` go
|
||||
b, err = m.Bytes(mediatype, b)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
```
|
||||
|
||||
### From string
|
||||
Minify from and to a `string` for a specific mediatype.
|
||||
``` go
|
||||
s, err = m.String(mediatype, s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
```
|
||||
|
||||
### To reader
|
||||
Get a minifying reader for a specific mediatype.
|
||||
``` go
|
||||
mr := m.Reader(mediatype, r)
|
||||
if _, err := mr.Read(b); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
```
|
||||
|
||||
### To writer
|
||||
Get a minifying writer for a specific mediatype. Must be explicitly closed because it uses an `io.Pipe` underneath.
|
||||
``` go
|
||||
mw := m.Writer(mediatype, w)
|
||||
if mw.Write([]byte("input")); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := mw.Close(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
```
|
||||
|
||||
### Middleware
|
||||
Minify resources on the fly using middleware. It passes a wrapped response writer to the handler that removes the Content-Length header. The minifier is chosen based on the Content-Type header or, if the header is empty, by the request URI file extension. This is on-the-fly processing, you should preferably cache the results though!
|
||||
``` go
|
||||
fs := http.FileServer(http.Dir("www/"))
|
||||
http.Handle("/", m.Middleware(fs))
|
||||
```
|
||||
|
||||
### Custom minifier
|
||||
Add a minifier for a specific mimetype.
|
||||
``` go
|
||||
type CustomMinifier struct {
|
||||
KeepLineBreaks bool
|
||||
}
|
||||
|
||||
func (c *CustomMinifier) Minify(m *minify.M, w io.Writer, r io.Reader, params map[string]string) error {
|
||||
// ...
|
||||
return nil
|
||||
}
|
||||
|
||||
m.Add(mimetype, &CustomMinifier{KeepLineBreaks: true})
|
||||
// or
|
||||
m.AddRegexp(regexp.MustCompile("/x-custom$"), &CustomMinifier{KeepLineBreaks: true})
|
||||
```
|
||||
|
||||
Add a minify function for a specific mimetype.
|
||||
``` go
|
||||
m.AddFunc(mimetype, func(m *minify.M, w io.Writer, r io.Reader, params map[string]string) error {
|
||||
// ...
|
||||
return nil
|
||||
})
|
||||
m.AddFuncRegexp(regexp.MustCompile("/x-custom$"), func(m *minify.M, w io.Writer, r io.Reader, params map[string]string) error {
|
||||
// ...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Add a command `cmd` with arguments `args` for a specific mimetype.
|
||||
``` go
|
||||
m.AddCmd(mimetype, exec.Command(cmd, args...))
|
||||
m.AddCmdRegexp(regexp.MustCompile("/x-custom$"), exec.Command(cmd, args...))
|
||||
```
|
||||
|
||||
### Mediatypes
|
||||
Using the `params map[string]string` argument one can pass parameters to the minifier such as seen in mediatypes (`type/subtype; key1=val2; key2=val2`). Examples are the encoding or charset of the data. Calling `Minify` will split the mimetype and parameters for the minifiers for you, but `MinifyMimetype` can be used if you already have them split up.
|
||||
|
||||
Minifiers can also be added using a regular expression. For example a minifier with `image/.*` will match any image mime.
|
||||
|
||||
## Examples
|
||||
### Common minifiers
|
||||
Basic example that minifies from stdin to stdout and loads the default HTML, CSS and JS minifiers. Optionally, one can enable `java -jar build/compiler.jar` to run for JS (for example the [ClosureCompiler](https://code.google.com/p/closure-compiler/)). Note that reading the file into a buffer first and writing to a pre-allocated buffer would be faster (but would disable streaming).
|
||||
``` go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"github.com/tdewolff/minify"
|
||||
"github.com/tdewolff/minify/css"
|
||||
"github.com/tdewolff/minify/html"
|
||||
"github.com/tdewolff/minify/js"
|
||||
"github.com/tdewolff/minify/json"
|
||||
"github.com/tdewolff/minify/svg"
|
||||
"github.com/tdewolff/minify/xml"
|
||||
)
|
||||
|
||||
func main() {
|
||||
m := minify.New()
|
||||
m.AddFunc("text/css", css.Minify)
|
||||
m.AddFunc("text/html", html.Minify)
|
||||
m.AddFunc("text/javascript", js.Minify)
|
||||
m.AddFunc("image/svg+xml", svg.Minify)
|
||||
m.AddFuncRegexp(regexp.MustCompile("[/+]json$"), json.Minify)
|
||||
m.AddFuncRegexp(regexp.MustCompile("[/+]xml$"), xml.Minify)
|
||||
|
||||
// Or use the following for better minification of JS but lower speed:
|
||||
// m.AddCmd("text/javascript", exec.Command("java", "-jar", "build/compiler.jar"))
|
||||
|
||||
if err := m.Minify("text/html", os.Stdout, os.Stdin); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### <a name="custom-minifier-example"></a> Custom minifier
|
||||
Custom minifier showing an example that implements the minifier function interface. Within a custom minifier, it is possible to call any minifier function (through `m minify.Minifier`) recursively when dealing with embedded resources.
|
||||
``` go
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/tdewolff/minify"
|
||||
)
|
||||
|
||||
func main() {
|
||||
m := minify.New()
|
||||
m.AddFunc("text/plain", func(m *minify.M, w io.Writer, r io.Reader, _ map[string]string) error {
|
||||
// remove newlines and spaces
|
||||
rb := bufio.NewReader(r)
|
||||
for {
|
||||
line, err := rb.ReadString('\n')
|
||||
if err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
if _, errws := io.WriteString(w, strings.Replace(line, " ", "", -1)); errws != nil {
|
||||
return errws
|
||||
}
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
in := "Because my coffee was too cold, I heated it in the microwave."
|
||||
out, err := m.String("text/plain", in)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println(out)
|
||||
// Output: Becausemycoffeewastoocold,Iheateditinthemicrowave.
|
||||
}
|
||||
```
|
||||
|
||||
### ResponseWriter
|
||||
#### Middleware
|
||||
``` go
|
||||
func main() {
|
||||
m := minify.New()
|
||||
m.AddFunc("text/css", css.Minify)
|
||||
m.AddFunc("text/html", html.Minify)
|
||||
m.AddFunc("text/javascript", js.Minify)
|
||||
m.AddFunc("image/svg+xml", svg.Minify)
|
||||
m.AddFuncRegexp(regexp.MustCompile("[/+]json$"), json.Minify)
|
||||
m.AddFuncRegexp(regexp.MustCompile("[/+]xml$"), xml.Minify)
|
||||
|
||||
fs := http.FileServer(http.Dir("www/"))
|
||||
http.Handle("/", m.Middleware(fs))
|
||||
}
|
||||
```
|
||||
|
||||
#### ResponseWriter
|
||||
``` go
|
||||
func Serve(w http.ResponseWriter, r *http.Request) {
|
||||
mw := m.ResponseWriter(w, r)
|
||||
defer mw.Close()
|
||||
w = mw
|
||||
|
||||
http.ServeFile(w, r, path.Join("www", r.URL.Path))
|
||||
}
|
||||
```
|
||||
|
||||
#### Custom response writer
|
||||
ResponseWriter example which returns a ResponseWriter that minifies the content and then writes to the original ResponseWriter. Any write after applying this filter will be minified.
|
||||
``` go
|
||||
type MinifyResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
io.WriteCloser
|
||||
}
|
||||
|
||||
func (m MinifyResponseWriter) Write(b []byte) (int, error) {
|
||||
return m.WriteCloser.Write(b)
|
||||
}
|
||||
|
||||
// MinifyResponseWriter must be closed explicitly by calling site.
|
||||
func MinifyFilter(mediatype string, res http.ResponseWriter) MinifyResponseWriter {
|
||||
m := minify.New()
|
||||
// add minfiers
|
||||
|
||||
mw := m.Writer(mediatype, res)
|
||||
return MinifyResponseWriter{res, mw}
|
||||
}
|
||||
```
|
||||
|
||||
``` go
|
||||
// Usage
|
||||
func(w http.ResponseWriter, req *http.Request) {
|
||||
w = MinifyFilter("text/html", w)
|
||||
if _, err := io.WriteString(w, "<p class="message"> This HTTP response will be minified. </p>"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Output: <p class=message>This HTTP response will be minified.
|
||||
}
|
||||
```
|
||||
|
||||
### Templates
|
||||
|
||||
Here's an example of a replacement for `template.ParseFiles` from `template/html`, which automatically minifies each template before parsing it.
|
||||
|
||||
Be aware that minifying templates will work in most cases but not all. Because the HTML minifier only works for valid HTML5, your template must be valid HTML5 of itself. Template tags are parsed as regular text by the minifier.
|
||||
|
||||
``` go
|
||||
func compileTemplates(filenames ...string) (*template.Template, error) {
|
||||
m := minify.New()
|
||||
m.AddFunc("text/html", html.Minify)
|
||||
|
||||
var tmpl *template.Template
|
||||
for _, filename := range filenames {
|
||||
name := filepath.Base(filename)
|
||||
if tmpl == nil {
|
||||
tmpl = template.New(name)
|
||||
} else {
|
||||
tmpl = tmpl.New(name)
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mb, err := m.Bytes("text/html", b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tmpl.Parse(string(mb))
|
||||
}
|
||||
return tmpl, nil
|
||||
}
|
||||
```
|
||||
|
||||
Example usage:
|
||||
|
||||
``` go
|
||||
templates := template.MustCompile(compileTemplates("view.html", "home.html"))
|
||||
```
|
||||
|
||||
## License
|
||||
Released under the [MIT license](LICENSE.md).
|
||||
|
||||
[1]: http://golang.org/ "Go Language"
|
137
vendor/github.com/tdewolff/minify/common.go
generated
vendored
137
vendor/github.com/tdewolff/minify/common.go
generated
vendored
|
@ -12,8 +12,8 @@ import (
|
|||
// Epsilon is the closest number to zero that is not considered to be zero.
|
||||
var Epsilon = 0.00001
|
||||
|
||||
// ContentType minifies a given mediatype by removing all whitespace.
|
||||
func ContentType(b []byte) []byte {
|
||||
// Mediatype minifies a given mediatype by removing all whitespace.
|
||||
func Mediatype(b []byte) []byte {
|
||||
j := 0
|
||||
start := 0
|
||||
inString := false
|
||||
|
@ -79,6 +79,107 @@ func DataURI(m *M, dataURI []byte) []byte {
|
|||
const MaxInt = int(^uint(0) >> 1)
|
||||
const MinInt = -MaxInt - 1
|
||||
|
||||
// Decimal minifies a given byte slice containing a number (see parse.Number) and removes superfluous characters.
|
||||
// It does not parse or output exponents.
|
||||
func Decimal(num []byte, prec int) []byte {
|
||||
// omit first + and register mantissa start and end, whether it's negative and the exponent
|
||||
neg := false
|
||||
start := 0
|
||||
dot := -1
|
||||
end := len(num)
|
||||
if 0 < end && (num[0] == '+' || num[0] == '-') {
|
||||
if num[0] == '-' {
|
||||
neg = true
|
||||
}
|
||||
start++
|
||||
}
|
||||
for i, c := range num[start:] {
|
||||
if c == '.' {
|
||||
dot = start + i
|
||||
break
|
||||
}
|
||||
}
|
||||
if dot == -1 {
|
||||
dot = end
|
||||
}
|
||||
|
||||
// trim leading zeros but leave at least one digit
|
||||
for start < end-1 && num[start] == '0' {
|
||||
start++
|
||||
}
|
||||
// trim trailing zeros
|
||||
i := end - 1
|
||||
for ; i > dot; i-- {
|
||||
if num[i] != '0' {
|
||||
end = i + 1
|
||||
break
|
||||
}
|
||||
}
|
||||
if i == dot {
|
||||
end = dot
|
||||
if start == end {
|
||||
num[start] = '0'
|
||||
return num[start : start+1]
|
||||
}
|
||||
} else if start == end-1 && num[start] == '0' {
|
||||
return num[start:end]
|
||||
}
|
||||
|
||||
// apply precision
|
||||
if prec > -1 && dot+1+prec < end {
|
||||
end = dot + 1 + prec
|
||||
inc := num[end] >= '5'
|
||||
if inc || num[end-1] == '0' {
|
||||
for i := end - 1; i > start; i-- {
|
||||
if i == dot {
|
||||
end--
|
||||
} else if inc {
|
||||
if num[i] == '9' {
|
||||
if i > dot {
|
||||
end--
|
||||
} else {
|
||||
num[i] = '0'
|
||||
}
|
||||
} else {
|
||||
num[i]++
|
||||
inc = false
|
||||
break
|
||||
}
|
||||
} else if i > dot && num[i] == '0' {
|
||||
end--
|
||||
}
|
||||
}
|
||||
}
|
||||
if dot == start && end == start+1 {
|
||||
if inc {
|
||||
num[start] = '1'
|
||||
} else {
|
||||
num[start] = '0'
|
||||
}
|
||||
} else {
|
||||
if dot+1 == end {
|
||||
end--
|
||||
}
|
||||
if inc {
|
||||
if num[start] == '9' {
|
||||
num[start] = '0'
|
||||
copy(num[start+1:], num[start:end])
|
||||
end++
|
||||
num[start] = '1'
|
||||
} else {
|
||||
num[start]++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if neg {
|
||||
start--
|
||||
num[start] = '-'
|
||||
}
|
||||
return num[start:end]
|
||||
}
|
||||
|
||||
// Number minifies a given byte slice containing a number (see parse.Number) and removes superfluous characters.
|
||||
func Number(num []byte, prec int) []byte {
|
||||
// omit first + and register mantissa start and end, whether it's negative and the exponent
|
||||
|
@ -311,6 +412,20 @@ func Number(num []byte, prec int) []byte {
|
|||
}
|
||||
} else {
|
||||
// case 3
|
||||
|
||||
// find new end, considering moving numbers to the front, removing the dot and increasing the length of the exponent
|
||||
newEnd := end
|
||||
if dot == start {
|
||||
newEnd = start + n
|
||||
} else {
|
||||
newEnd--
|
||||
}
|
||||
newEnd += 2 + lenIntExp
|
||||
|
||||
exp := intExp
|
||||
lenExp := lenIntExp
|
||||
if newEnd < len(num) {
|
||||
// it saves space to convert the decimal to an integer and decrease the exponent
|
||||
if dot < end {
|
||||
if dot == start {
|
||||
copy(num[start:], num[end-n:end])
|
||||
|
@ -320,15 +435,23 @@ func Number(num []byte, prec int) []byte {
|
|||
end--
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// it does not save space and will panic, so we revert to the original representation
|
||||
exp = origExp
|
||||
lenExp = 1
|
||||
if origExp <= -10 || origExp >= 10 {
|
||||
lenExp = strconv.LenInt(int64(origExp))
|
||||
}
|
||||
}
|
||||
num[end] = 'e'
|
||||
num[end+1] = '-'
|
||||
end += 2
|
||||
intExp = -intExp
|
||||
for i := end + lenIntExp - 1; i >= end; i-- {
|
||||
num[i] = byte(intExp%10) + '0'
|
||||
intExp /= 10
|
||||
exp = -exp
|
||||
for i := end + lenExp - 1; i >= end; i-- {
|
||||
num[i] = byte(exp%10) + '0'
|
||||
exp /= 10
|
||||
}
|
||||
end += lenIntExp
|
||||
end += lenExp
|
||||
}
|
||||
|
||||
if neg {
|
||||
|
|
346
vendor/github.com/tdewolff/minify/css/css.go
generated
vendored
346
vendor/github.com/tdewolff/minify/css/css.go
generated
vendored
|
@ -4,6 +4,7 @@ package css // import "github.com/tdewolff/minify/css"
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
|
||||
|
@ -29,16 +30,19 @@ type cssMinifier struct {
|
|||
w io.Writer
|
||||
p *css.Parser
|
||||
o *Minifier
|
||||
|
||||
valuesBuffer []Token
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
|
||||
// DefaultMinifier is the default minifier.
|
||||
var DefaultMinifier = &Minifier{Decimals: -1}
|
||||
var DefaultMinifier = &Minifier{Decimals: -1, KeepCSS2: false}
|
||||
|
||||
// Minifier is a CSS minifier.
|
||||
type Minifier struct {
|
||||
Decimals int
|
||||
KeepCSS2 bool
|
||||
}
|
||||
|
||||
// Minify minifies CSS data, it reads from r and writes to w.
|
||||
|
@ -108,7 +112,19 @@ func (c *cssMinifier) minifyGrammar() error {
|
|||
if _, err := c.w.Write(data); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, val := range c.p.Values() {
|
||||
values := c.p.Values()
|
||||
if css.ToHash(data[1:]) == css.Import && len(values) == 2 && values[1].TokenType == css.URLToken {
|
||||
url := values[1].Data
|
||||
if url[4] != '"' && url[4] != '\'' {
|
||||
url = url[3:]
|
||||
url[0] = '"'
|
||||
url[len(url)-1] = '"'
|
||||
} else {
|
||||
url = url[4 : len(url)-1]
|
||||
}
|
||||
values[1].Data = url
|
||||
}
|
||||
for _, val := range values {
|
||||
if _, err := c.w.Write(val.Data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -216,35 +232,138 @@ func (c *cssMinifier) minifySelectors(property []byte, values []css.Token) error
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *cssMinifier) minifyDeclaration(property []byte, values []css.Token) error {
|
||||
if len(values) == 0 {
|
||||
type Token struct {
|
||||
css.TokenType
|
||||
Data []byte
|
||||
Components []css.Token // only filled for functions
|
||||
}
|
||||
|
||||
func (t Token) String() string {
|
||||
if len(t.Components) == 0 {
|
||||
return t.TokenType.String() + "(" + string(t.Data) + ")"
|
||||
} else {
|
||||
return fmt.Sprint(t.Components)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cssMinifier) minifyDeclaration(property []byte, components []css.Token) error {
|
||||
if len(components) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
prop := css.ToHash(property)
|
||||
inProgid := false
|
||||
|
||||
// Strip !important from the component list, this will be added later separately
|
||||
important := false
|
||||
if len(components) > 2 && components[len(components)-2].TokenType == css.DelimToken && components[len(components)-2].Data[0] == '!' && css.ToHash(components[len(components)-1].Data) == css.Important {
|
||||
components = components[:len(components)-2]
|
||||
important = true
|
||||
}
|
||||
|
||||
// Check if this is a simple list of values separated by whitespace or commas, otherwise we'll not be processing
|
||||
simple := true
|
||||
prevSep := true
|
||||
values := c.valuesBuffer[:0]
|
||||
for i := 0; i < len(components); i++ {
|
||||
comp := components[i]
|
||||
if comp.TokenType == css.LeftParenthesisToken || comp.TokenType == css.LeftBraceToken || comp.TokenType == css.LeftBracketToken || comp.TokenType == css.RightParenthesisToken || comp.TokenType == css.RightBraceToken || comp.TokenType == css.RightBracketToken {
|
||||
simple = false
|
||||
break
|
||||
}
|
||||
|
||||
if !prevSep && comp.TokenType != css.WhitespaceToken && comp.TokenType != css.CommaToken {
|
||||
simple = false
|
||||
break
|
||||
}
|
||||
|
||||
if comp.TokenType == css.WhitespaceToken || comp.TokenType == css.CommaToken {
|
||||
prevSep = true
|
||||
if comp.TokenType == css.CommaToken {
|
||||
values = append(values, Token{components[i].TokenType, components[i].Data, nil})
|
||||
}
|
||||
} else if comp.TokenType == css.FunctionToken {
|
||||
prevSep = false
|
||||
j := i + 1
|
||||
level := 0
|
||||
for ; j < len(components); j++ {
|
||||
if components[j].TokenType == css.LeftParenthesisToken {
|
||||
level++
|
||||
} else if components[j].TokenType == css.RightParenthesisToken {
|
||||
if level == 0 {
|
||||
j++
|
||||
break
|
||||
}
|
||||
level--
|
||||
}
|
||||
}
|
||||
values = append(values, Token{components[i].TokenType, components[i].Data, components[i:j]})
|
||||
i = j - 1
|
||||
} else {
|
||||
prevSep = false
|
||||
values = append(values, Token{components[i].TokenType, components[i].Data, nil})
|
||||
}
|
||||
}
|
||||
c.valuesBuffer = values
|
||||
|
||||
// Do not process complex values (eg. containing blocks or is not alternated between whitespace/commas and flat values
|
||||
if !simple {
|
||||
if prop == css.Filter && len(components) == 11 {
|
||||
if bytes.Equal(components[0].Data, []byte("progid")) &&
|
||||
components[1].TokenType == css.ColonToken &&
|
||||
bytes.Equal(components[2].Data, []byte("DXImageTransform")) &&
|
||||
components[3].Data[0] == '.' &&
|
||||
bytes.Equal(components[4].Data, []byte("Microsoft")) &&
|
||||
components[5].Data[0] == '.' &&
|
||||
bytes.Equal(components[6].Data, []byte("Alpha(")) &&
|
||||
bytes.Equal(parse.ToLower(components[7].Data), []byte("opacity")) &&
|
||||
components[8].Data[0] == '=' &&
|
||||
components[10].Data[0] == ')' {
|
||||
components = components[6:]
|
||||
components[0].Data = []byte("alpha(")
|
||||
}
|
||||
}
|
||||
|
||||
for _, component := range components {
|
||||
if _, err := c.w.Write(component.Data); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if important {
|
||||
if _, err := c.w.Write([]byte("!important")); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := range values {
|
||||
values[i].TokenType, values[i].Data = c.shortenToken(prop, values[i].TokenType, values[i].Data)
|
||||
}
|
||||
|
||||
if len(values) > 0 {
|
||||
switch prop {
|
||||
case css.Font, css.Font_Weight, css.Font_Family:
|
||||
if prop == css.Font {
|
||||
// in "font:" shorthand all values before the size have "normal"
|
||||
// as valid and, at the same time, default value, so just skip them
|
||||
for i, value := range values {
|
||||
if inProgid {
|
||||
if value.TokenType == css.FunctionToken {
|
||||
inProgid = false
|
||||
if !(value.TokenType == css.IdentToken && css.ToHash(value.Data) == css.Normal) {
|
||||
values = values[i:]
|
||||
break
|
||||
}
|
||||
continue
|
||||
} else if value.TokenType == css.IdentToken && css.ToHash(value.Data) == css.Progid {
|
||||
inProgid = true
|
||||
continue
|
||||
}
|
||||
value.TokenType, value.Data = c.shortenToken(prop, value.TokenType, value.Data)
|
||||
if prop == css.Font || prop == css.Font_Family || prop == css.Font_Weight {
|
||||
if value.TokenType == css.IdentToken && (prop == css.Font || prop == css.Font_Weight) {
|
||||
}
|
||||
for i, value := range values {
|
||||
if value.TokenType == css.IdentToken {
|
||||
val := css.ToHash(value.Data)
|
||||
if val == css.Normal && prop == css.Font_Weight {
|
||||
// normal could also be specified for font-variant, not just font-weight
|
||||
value.TokenType = css.NumberToken
|
||||
value.Data = []byte("400")
|
||||
if prop == css.Font_Weight && val == css.Normal {
|
||||
values[i].TokenType = css.NumberToken
|
||||
values[i].Data = []byte("400")
|
||||
} else if val == css.Bold {
|
||||
value.TokenType = css.NumberToken
|
||||
value.Data = []byte("700")
|
||||
values[i].TokenType = css.NumberToken
|
||||
values[i].Data = []byte("700")
|
||||
}
|
||||
} else if value.TokenType == css.StringToken && (prop == css.Font || prop == css.Font_Family) && len(value.Data) > 2 {
|
||||
} else if value.TokenType == css.StringToken && len(value.Data) > 2 {
|
||||
unquote := true
|
||||
parse.ToLower(value.Data)
|
||||
s := value.Data[1 : len(value.Data)-1]
|
||||
|
@ -260,94 +379,91 @@ func (c *cssMinifier) minifyDeclaration(property []byte, values []css.Token) err
|
|||
}
|
||||
}
|
||||
if unquote {
|
||||
value.Data = s
|
||||
values[i].Data = s
|
||||
}
|
||||
}
|
||||
} else if prop == css.Outline || prop == css.Border || prop == css.Border_Bottom || prop == css.Border_Left || prop == css.Border_Right || prop == css.Border_Top {
|
||||
if css.ToHash(value.Data) == css.None {
|
||||
value.TokenType = css.NumberToken
|
||||
value.Data = zeroBytes
|
||||
}
|
||||
case css.Margin, css.Padding, css.Border_Width:
|
||||
n := len(values)
|
||||
if n == 2 {
|
||||
if bytes.Equal(values[0].Data, values[1].Data) {
|
||||
values = values[:1]
|
||||
}
|
||||
} else if n == 3 {
|
||||
if bytes.Equal(values[0].Data, values[1].Data) && bytes.Equal(values[0].Data, values[2].Data) {
|
||||
values = values[:1]
|
||||
} else if bytes.Equal(values[0].Data, values[2].Data) {
|
||||
values = values[:2]
|
||||
}
|
||||
} else if n == 4 {
|
||||
if bytes.Equal(values[0].Data, values[1].Data) && bytes.Equal(values[0].Data, values[2].Data) && bytes.Equal(values[0].Data, values[3].Data) {
|
||||
values = values[:1]
|
||||
} else if bytes.Equal(values[0].Data, values[2].Data) && bytes.Equal(values[1].Data, values[3].Data) {
|
||||
values = values[:2]
|
||||
} else if bytes.Equal(values[1].Data, values[3].Data) {
|
||||
values = values[:3]
|
||||
}
|
||||
}
|
||||
values[i].TokenType, values[i].Data = value.TokenType, value.Data
|
||||
case css.Outline, css.Border, css.Border_Bottom, css.Border_Left, css.Border_Right, css.Border_Top:
|
||||
none := false
|
||||
iZero := -1
|
||||
for i, value := range values {
|
||||
if len(value.Data) == 1 && value.Data[0] == '0' {
|
||||
iZero = i
|
||||
} else if css.ToHash(value.Data) == css.None {
|
||||
values[i].TokenType = css.NumberToken
|
||||
values[i].Data = zeroBytes
|
||||
none = true
|
||||
}
|
||||
|
||||
important := false
|
||||
if len(values) > 2 && values[len(values)-2].TokenType == css.DelimToken && values[len(values)-2].Data[0] == '!' && css.ToHash(values[len(values)-1].Data) == css.Important {
|
||||
values = values[:len(values)-2]
|
||||
important = true
|
||||
}
|
||||
|
||||
if len(values) == 1 {
|
||||
if prop == css.Background && css.ToHash(values[0].Data) == css.None {
|
||||
if none && iZero != -1 {
|
||||
values = append(values[:iZero], values[iZero+1:]...)
|
||||
}
|
||||
case css.Background:
|
||||
ident := css.ToHash(values[0].Data)
|
||||
if len(values) == 1 && (ident == css.None || ident == css.Transparent) {
|
||||
values[0].Data = backgroundNoneBytes
|
||||
} else if bytes.Equal(property, msfilterBytes) {
|
||||
}
|
||||
case css.Box_Shadow:
|
||||
if len(values) == 4 && len(values[0].Data) == 1 && values[0].Data[0] == '0' && len(values[1].Data) == 1 && values[1].Data[0] == '0' && len(values[2].Data) == 1 && values[2].Data[0] == '0' && len(values[3].Data) == 1 && values[3].Data[0] == '0' {
|
||||
values = values[:2]
|
||||
}
|
||||
default:
|
||||
if bytes.Equal(property, msfilterBytes) {
|
||||
alpha := []byte("progid:DXImageTransform.Microsoft.Alpha(Opacity=")
|
||||
if values[0].TokenType == css.StringToken && bytes.HasPrefix(values[0].Data[1:len(values[0].Data)-1], alpha) {
|
||||
values[0].Data = append(append([]byte{values[0].Data[0]}, []byte("alpha(opacity=")...), values[0].Data[1+len(alpha):]...)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if prop == css.Margin || prop == css.Padding || prop == css.Border_Width {
|
||||
if (values[0].TokenType == css.NumberToken || values[0].TokenType == css.DimensionToken || values[0].TokenType == css.PercentageToken) && (len(values)+1)%2 == 0 {
|
||||
valid := true
|
||||
for i := 1; i < len(values); i += 2 {
|
||||
if values[i].TokenType != css.WhitespaceToken || values[i+1].TokenType != css.NumberToken && values[i+1].TokenType != css.DimensionToken && values[i+1].TokenType != css.PercentageToken {
|
||||
valid = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if valid {
|
||||
n := (len(values) + 1) / 2
|
||||
if n == 2 {
|
||||
if bytes.Equal(values[0].Data, values[2].Data) {
|
||||
values = values[:1]
|
||||
}
|
||||
} else if n == 3 {
|
||||
if bytes.Equal(values[0].Data, values[2].Data) && bytes.Equal(values[0].Data, values[4].Data) {
|
||||
values = values[:1]
|
||||
} else if bytes.Equal(values[0].Data, values[4].Data) {
|
||||
values = values[:3]
|
||||
}
|
||||
} else if n == 4 {
|
||||
if bytes.Equal(values[0].Data, values[2].Data) && bytes.Equal(values[0].Data, values[4].Data) && bytes.Equal(values[0].Data, values[6].Data) {
|
||||
values = values[:1]
|
||||
} else if bytes.Equal(values[0].Data, values[4].Data) && bytes.Equal(values[2].Data, values[6].Data) {
|
||||
values = values[:3]
|
||||
} else if bytes.Equal(values[2].Data, values[6].Data) {
|
||||
values = values[:5]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if prop == css.Filter && len(values) == 11 {
|
||||
if bytes.Equal(values[0].Data, []byte("progid")) &&
|
||||
values[1].TokenType == css.ColonToken &&
|
||||
bytes.Equal(values[2].Data, []byte("DXImageTransform")) &&
|
||||
values[3].Data[0] == '.' &&
|
||||
bytes.Equal(values[4].Data, []byte("Microsoft")) &&
|
||||
values[5].Data[0] == '.' &&
|
||||
bytes.Equal(values[6].Data, []byte("Alpha(")) &&
|
||||
bytes.Equal(parse.ToLower(values[7].Data), []byte("opacity")) &&
|
||||
values[8].Data[0] == '=' &&
|
||||
values[10].Data[0] == ')' {
|
||||
values = values[6:]
|
||||
values[0].Data = []byte("alpha(")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < len(values); i++ {
|
||||
if values[i].TokenType == css.FunctionToken {
|
||||
n, err := c.minifyFunction(values[i:])
|
||||
prevComma := true
|
||||
for _, value := range values {
|
||||
if !prevComma && value.TokenType != css.CommaToken {
|
||||
if _, err := c.w.Write([]byte(" ")); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if value.TokenType == css.FunctionToken {
|
||||
err := c.minifyFunction(value.Components)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i += n - 1
|
||||
} else if _, err := c.w.Write(values[i].Data); err != nil {
|
||||
} else {
|
||||
if _, err := c.w.Write(value.Data); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if value.TokenType == css.CommaToken {
|
||||
prevComma = true
|
||||
} else {
|
||||
prevComma = false
|
||||
}
|
||||
}
|
||||
|
||||
if important {
|
||||
if _, err := c.w.Write([]byte("!important")); err != nil {
|
||||
return err
|
||||
|
@ -356,22 +472,22 @@ func (c *cssMinifier) minifyDeclaration(property []byte, values []css.Token) err
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *cssMinifier) minifyFunction(values []css.Token) (int, error) {
|
||||
n := 1
|
||||
func (c *cssMinifier) minifyFunction(values []css.Token) error {
|
||||
n := len(values)
|
||||
if n > 2 {
|
||||
simple := true
|
||||
for i, value := range values[1:] {
|
||||
if value.TokenType == css.RightParenthesisToken {
|
||||
n++
|
||||
break
|
||||
}
|
||||
for i, value := range values[1 : n-1] {
|
||||
if i%2 == 0 && (value.TokenType != css.NumberToken && value.TokenType != css.PercentageToken) || (i%2 == 1 && value.TokenType != css.CommaToken) {
|
||||
simple = false
|
||||
}
|
||||
n++
|
||||
}
|
||||
values = values[:n]
|
||||
if simple && (n-1)%2 == 0 {
|
||||
fun := css.ToHash(values[0].Data[:len(values[0].Data)-1])
|
||||
|
||||
if simple && n%2 == 1 {
|
||||
fun := css.ToHash(values[0].Data[0 : len(values[0].Data)-1])
|
||||
for i := 1; i < n; i += 2 {
|
||||
values[i].TokenType, values[i].Data = c.shortenToken(0, values[i].TokenType, values[i].Data)
|
||||
}
|
||||
|
||||
nArgs := (n - 1) / 2
|
||||
if (fun == css.Rgba || fun == css.Hsla) && nArgs == 4 {
|
||||
d, _ := strconv.ParseFloat(string(values[7].Data), 32) // can never fail because if simple == true than this is a NumberToken or PercentageToken
|
||||
|
@ -425,7 +541,7 @@ func (c *cssMinifier) minifyFunction(values []css.Token) (int, error) {
|
|||
parse.ToLower(val)
|
||||
if s, ok := ShortenColorHex[string(val)]; ok {
|
||||
if _, err := c.w.Write(s); err != nil {
|
||||
return 0, err
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if len(val) == 7 && val[1] == val[2] && val[3] == val[4] && val[5] == val[6] {
|
||||
|
@ -434,10 +550,10 @@ func (c *cssMinifier) minifyFunction(values []css.Token) (int, error) {
|
|||
val = val[:4]
|
||||
}
|
||||
if _, err := c.w.Write(val); err != nil {
|
||||
return 0, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
return nil
|
||||
}
|
||||
} else if fun == css.Hsl && nArgs == 3 {
|
||||
if values[1].TokenType == css.NumberToken && values[3].TokenType == css.PercentageToken && values[5].TokenType == css.PercentageToken {
|
||||
|
@ -453,7 +569,7 @@ func (c *cssMinifier) minifyFunction(values []css.Token) (int, error) {
|
|||
parse.ToLower(val)
|
||||
if s, ok := ShortenColorHex[string(val)]; ok {
|
||||
if _, err := c.w.Write(s); err != nil {
|
||||
return 0, err
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if len(val) == 7 && val[1] == val[2] && val[3] == val[4] && val[5] == val[6] {
|
||||
|
@ -462,20 +578,22 @@ func (c *cssMinifier) minifyFunction(values []css.Token) (int, error) {
|
|||
val = val[:4]
|
||||
}
|
||||
if _, err := c.w.Write(val); err != nil {
|
||||
return 0, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, value := range values {
|
||||
if _, err := c.w.Write(value.Data); err != nil {
|
||||
return 0, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *cssMinifier) shortenToken(prop css.Hash, tt css.TokenType, data []byte) (css.TokenType, []byte) {
|
||||
|
@ -491,11 +609,15 @@ func (c *cssMinifier) shortenToken(prop css.Hash, tt css.TokenType, data []byte)
|
|||
}
|
||||
dim := data[n:]
|
||||
parse.ToLower(dim)
|
||||
if !c.o.KeepCSS2 {
|
||||
data = minify.Number(data[:n], c.o.Decimals)
|
||||
if tt == css.PercentageToken && (len(data) != 1 || data[0] != '0' || prop == css.Color) {
|
||||
data = append(data, '%')
|
||||
} else if tt == css.DimensionToken && (len(data) != 1 || data[0] != '0' || requiredDimension[string(dim)]) {
|
||||
} else {
|
||||
data = minify.Decimal(data[:n], c.o.Decimals) // don't use exponents
|
||||
}
|
||||
if tt == css.DimensionToken && (len(data) != 1 || data[0] != '0' || !optionalZeroDimension[string(dim)] || prop == css.Flex) {
|
||||
data = append(data, dim...)
|
||||
} else if tt == css.PercentageToken {
|
||||
data = append(data, '%') // TODO: drop percentage for properties that accept <percentage> and <length>
|
||||
}
|
||||
} else if tt == css.IdentToken {
|
||||
//parse.ToLower(data) // TODO: not all identifiers are case-insensitive; all <custom-ident> properties are case-sensitive
|
||||
|
@ -541,7 +663,7 @@ func (c *cssMinifier) shortenToken(prop css.Hash, tt css.TokenType, data []byte)
|
|||
} else if tt == css.URLToken {
|
||||
parse.ToLower(data[:3])
|
||||
if len(data) > 10 {
|
||||
uri := data[4 : len(data)-1]
|
||||
uri := parse.TrimWhitespace(data[4 : len(data)-1])
|
||||
delim := byte('"')
|
||||
if uri[0] == '\'' || uri[0] == '"' {
|
||||
delim = uri[0]
|
||||
|
|
28
vendor/github.com/tdewolff/minify/css/table.go
generated
vendored
28
vendor/github.com/tdewolff/minify/css/table.go
generated
vendored
|
@ -2,14 +2,26 @@ package css
|
|||
|
||||
import "github.com/tdewolff/parse/css"
|
||||
|
||||
var requiredDimension = map[string]bool{
|
||||
"s": true,
|
||||
"ms": true,
|
||||
"dpi": true,
|
||||
"dpcm": true,
|
||||
"dppx": true,
|
||||
"hz": true,
|
||||
"khz": true,
|
||||
var optionalZeroDimension = map[string]bool{
|
||||
"px": true,
|
||||
"mm": true,
|
||||
"q": true,
|
||||
"cm": true,
|
||||
"in": true,
|
||||
"pt": true,
|
||||
"pc": true,
|
||||
"ch": true,
|
||||
"em": true,
|
||||
"ex": true,
|
||||
"rem": true,
|
||||
"vh": true,
|
||||
"vw": true,
|
||||
"vmin": true,
|
||||
"vmax": true,
|
||||
"deg": true,
|
||||
"grad": true,
|
||||
"rad": true,
|
||||
"turn": true,
|
||||
}
|
||||
|
||||
// Uses http://www.w3.org/TR/2010/PR-css3-color-20101028/ for colors
|
||||
|
|
10
vendor/github.com/tdewolff/minify/svg/pathdata.go
generated
vendored
10
vendor/github.com/tdewolff/minify/svg/pathdata.go
generated
vendored
|
@ -33,6 +33,8 @@ func NewPathData(o *Minifier) *PathData {
|
|||
}
|
||||
}
|
||||
|
||||
// ShortenPathData takes a full pathdata string and returns a shortened version. The original string is overwritten.
|
||||
// It parses all commands (M, A, Z, ...) and coordinates (numbers) and calls copyInstruction for each command.
|
||||
func (p *PathData) ShortenPathData(b []byte) []byte {
|
||||
var x0, y0 float64
|
||||
var cmd byte
|
||||
|
@ -74,6 +76,8 @@ func (p *PathData) ShortenPathData(b []byte) []byte {
|
|||
return b[:j]
|
||||
}
|
||||
|
||||
// copyInstruction copies pathdata of a single command, but may be comprised of multiple sets for that command. For example, L takes two coordinates, but this function may process 2*N coordinates. Lowercase commands are relative commands, where the coordinates are relative to the previous point. Uppercase commands have absolute coordinates.
|
||||
// We update p.x and p.y (the current coordinates) according to the commands given. For each set of coordinates we call shortenCurPosInstruction and shortenAltPosInstruction. The former just minifies the coordinates, the latter will inverse the lowercase/uppercase of the command, and see if the coordinates get smaller due to that. The shortest is chosen and copied to `b`.
|
||||
func (p *PathData) copyInstruction(b []byte, cmd byte) int {
|
||||
n := len(p.coords)
|
||||
if n == 0 {
|
||||
|
@ -191,6 +195,7 @@ func (p *PathData) copyInstruction(b []byte, cmd byte) int {
|
|||
return j
|
||||
}
|
||||
|
||||
// shortenCurPosInstruction only minifies the coordinates.
|
||||
func (p *PathData) shortenCurPosInstruction(cmd byte, coords [][]byte) PathDataState {
|
||||
state := p.state
|
||||
p.curBuffer = p.curBuffer[:0]
|
||||
|
@ -202,7 +207,8 @@ func (p *PathData) shortenCurPosInstruction(cmd byte, coords [][]byte) PathDataS
|
|||
}
|
||||
for i, coord := range coords {
|
||||
isFlag := false
|
||||
if (cmd == 'A' || cmd == 'a') && (i%7 == 3 || i%7 == 4) {
|
||||
// Arc has boolean flags that can only be 0 or 1. Setting isFlag prevents from adding a dot before a zero (instead of a space). However, when the dot already was there, the command is malformed and could make the path longer than before, introducing bugs.
|
||||
if (cmd == 'A' || cmd == 'a') && (i%7 == 3 || i%7 == 4) && coord[0] != '.' {
|
||||
isFlag = true
|
||||
}
|
||||
|
||||
|
@ -212,6 +218,7 @@ func (p *PathData) shortenCurPosInstruction(cmd byte, coords [][]byte) PathDataS
|
|||
return state
|
||||
}
|
||||
|
||||
// shortenAltPosInstruction toggles the command between absolute / relative coordinates and minifies the coordinates.
|
||||
func (p *PathData) shortenAltPosInstruction(cmd byte, coordFloats []float64, x, y float64) PathDataState {
|
||||
state := p.state
|
||||
p.altBuffer = p.altBuffer[:0]
|
||||
|
@ -250,6 +257,7 @@ func (p *PathData) shortenAltPosInstruction(cmd byte, coordFloats []float64, x,
|
|||
return state
|
||||
}
|
||||
|
||||
// copyNumber will copy a number to the destination buffer, taking into account space or dot insertion to guarantee the shortest pathdata.
|
||||
func (state *PathDataState) copyNumber(buffer *[]byte, coord []byte, isFlag bool) {
|
||||
if state.prevDigit && (coord[0] >= '0' && coord[0] <= '9' || coord[0] == '.' && state.prevDigitIsInt) {
|
||||
if coord[0] == '0' && !state.prevDigitIsInt {
|
||||
|
|
35
vendor/github.com/tdewolff/minify/svg/svg.go
generated
vendored
35
vendor/github.com/tdewolff/minify/svg/svg.go
generated
vendored
|
@ -51,7 +51,6 @@ func (o *Minifier) Minify(m *minify.M, w io.Writer, r io.Reader, _ map[string]st
|
|||
p := NewPathData(o)
|
||||
minifyBuffer := buffer.NewWriter(make([]byte, 0, 64))
|
||||
attrByteBuffer := make([]byte, 0, 64)
|
||||
gStack := make([]bool, 0)
|
||||
|
||||
l := xml.NewLexer(r)
|
||||
defer l.Restore()
|
||||
|
@ -59,7 +58,6 @@ func (o *Minifier) Minify(m *minify.M, w io.Writer, r io.Reader, _ map[string]st
|
|||
tb := NewTokenBuffer(l)
|
||||
for {
|
||||
t := *tb.Shift()
|
||||
SWITCH:
|
||||
switch t.TokenType {
|
||||
case xml.ErrorToken:
|
||||
if l.Err() == io.EOF {
|
||||
|
@ -113,29 +111,7 @@ func (o *Minifier) Minify(m *minify.M, w io.Writer, r io.Reader, _ map[string]st
|
|||
}
|
||||
case xml.StartTagToken:
|
||||
tag = t.Hash
|
||||
if containerTagMap[tag] { // skip empty containers
|
||||
i := 0
|
||||
for {
|
||||
next := tb.Peek(i)
|
||||
i++
|
||||
if next.TokenType == xml.EndTagToken && next.Hash == tag || next.TokenType == xml.StartTagCloseVoidToken || next.TokenType == xml.ErrorToken {
|
||||
for j := 0; j < i; j++ {
|
||||
tb.Shift()
|
||||
}
|
||||
break SWITCH
|
||||
} else if next.TokenType != xml.AttributeToken && next.TokenType != xml.StartTagCloseToken {
|
||||
break
|
||||
}
|
||||
}
|
||||
if tag == svg.G {
|
||||
if tb.Peek(0).TokenType == xml.StartTagCloseToken {
|
||||
gStack = append(gStack, false)
|
||||
tb.Shift()
|
||||
break
|
||||
}
|
||||
gStack = append(gStack, true)
|
||||
}
|
||||
} else if tag == svg.Metadata {
|
||||
if tag == svg.Metadata {
|
||||
skipTag(tb, tag)
|
||||
break
|
||||
} else if tag == svg.Line {
|
||||
|
@ -184,7 +160,7 @@ func (o *Minifier) Minify(m *minify.M, w io.Writer, r io.Reader, _ map[string]st
|
|||
}
|
||||
|
||||
if tag == svg.Svg && attr == svg.ContentStyleType {
|
||||
val = minify.ContentType(val)
|
||||
val = minify.Mediatype(val)
|
||||
defaultStyleType = val
|
||||
} else if attr == svg.Style {
|
||||
minifyBuffer.Reset()
|
||||
|
@ -266,13 +242,6 @@ func (o *Minifier) Minify(m *minify.M, w io.Writer, r io.Reader, _ map[string]st
|
|||
}
|
||||
case xml.EndTagToken:
|
||||
tag = 0
|
||||
if t.Hash == svg.G && len(gStack) > 0 {
|
||||
if !gStack[len(gStack)-1] {
|
||||
gStack = gStack[:len(gStack)-1]
|
||||
break
|
||||
}
|
||||
gStack = gStack[:len(gStack)-1]
|
||||
}
|
||||
if len(t.Data) > 3+len(t.Text) {
|
||||
t.Data[2+len(t.Text)] = '>'
|
||||
t.Data = t.Data[:3+len(t.Text)]
|
||||
|
|
12
vendor/github.com/tdewolff/minify/svg/table.go
generated
vendored
12
vendor/github.com/tdewolff/minify/svg/table.go
generated
vendored
|
@ -2,18 +2,6 @@ package svg // import "github.com/tdewolff/minify/svg"
|
|||
|
||||
import "github.com/tdewolff/parse/svg"
|
||||
|
||||
var containerTagMap = map[svg.Hash]bool{
|
||||
svg.A: true,
|
||||
svg.Defs: true,
|
||||
svg.G: true,
|
||||
svg.Marker: true,
|
||||
svg.Mask: true,
|
||||
svg.Missing_Glyph: true,
|
||||
svg.Pattern: true,
|
||||
svg.Switch: true,
|
||||
svg.Symbol: true,
|
||||
}
|
||||
|
||||
var colorAttrMap = map[svg.Hash]bool{
|
||||
svg.Color: true,
|
||||
svg.Fill: true,
|
||||
|
|
5
vendor/github.com/tdewolff/parse/.travis.yml
generated
vendored
5
vendor/github.com/tdewolff/parse/.travis.yml
generated
vendored
|
@ -1,5 +0,0 @@
|
|||
language: go
|
||||
before_install:
|
||||
- go get github.com/mattn/goveralls
|
||||
script:
|
||||
- goveralls -v -service travis-ci -repotoken $COVERALLS_TOKEN || go test -v ./...
|
66
vendor/github.com/tdewolff/parse/README.md
generated
vendored
66
vendor/github.com/tdewolff/parse/README.md
generated
vendored
|
@ -1,66 +0,0 @@
|
|||
# Parse [![Build Status](https://travis-ci.org/tdewolff/parse.svg?branch=master)](https://travis-ci.org/tdewolff/parse) [![GoDoc](http://godoc.org/github.com/tdewolff/parse?status.svg)](http://godoc.org/github.com/tdewolff/parse) [![Coverage Status](https://coveralls.io/repos/github/tdewolff/parse/badge.svg?branch=master)](https://coveralls.io/github/tdewolff/parse?branch=master)
|
||||
|
||||
This package contains several lexers and parsers written in [Go][1]. All subpackages are built to be streaming, high performance and to be in accordance with the official (latest) specifications.
|
||||
|
||||
The lexers are implemented using `buffer.Lexer` in https://github.com/tdewolff/parse/buffer and the parsers work on top of the lexers. Some subpackages have hashes defined (using [Hasher](https://github.com/tdewolff/hasher)) that speed up common byte-slice comparisons.
|
||||
|
||||
## Buffer
|
||||
### Reader
|
||||
Reader is a wrapper around a `[]byte` that implements the `io.Reader` interface. It is a much thinner layer than `bytes.Buffer` provides and is therefore faster.
|
||||
|
||||
### Writer
|
||||
Writer is a buffer that implements the `io.Writer` interface. It is a much thinner layer than `bytes.Buffer` provides and is therefore faster. It will expand the buffer when needed.
|
||||
|
||||
The reset functionality allows for better memory reuse. After calling `Reset`, it will overwrite the current buffer and thus reduce allocations.
|
||||
|
||||
### Lexer
|
||||
Lexer is a read buffer specifically designed for building lexers. It keeps track of two positions: a start and end position. The start position is the beginning of the current token being parsed, the end position is being moved forward until a valid token is found. Calling `Shift` will collapse the positions to the end and return the parsed `[]byte`.
|
||||
|
||||
Moving the end position can go through `Move(int)` which also accepts negative integers. One can also use `Pos() int` to try and parse a token, and if it fails rewind with `Rewind(int)`, passing the previously saved position.
|
||||
|
||||
`Peek(int) byte` will peek forward (relative to the end position) and return the byte at that location. `PeekRune(int) (rune, int)` returns UTF-8 runes and its length at the given **byte** position. Upon an error `Peek` will return `0`, the **user must peek at every character** and not skip any, otherwise it may skip a `0` and panic on out-of-bounds indexing.
|
||||
|
||||
`Lexeme() []byte` will return the currently selected bytes, `Skip()` will collapse the selection. `Shift() []byte` is a combination of `Lexeme() []byte` and `Skip()`.
|
||||
|
||||
When the passed `io.Reader` returned an error, `Err() error` will return that error even if not at the end of the buffer.
|
||||
|
||||
### StreamLexer
|
||||
StreamLexer behaves like Lexer but uses a buffer pool to read in chunks from `io.Reader`, retaining old buffers in memory that are still in use, and re-using old buffers otherwise. Calling `Free(n int)` frees up `n` bytes from the internal buffer(s). It holds an array of buffers to accommodate for keeping everything in-memory. Calling `ShiftLen() int` returns the number of bytes that have been shifted since the previous call to `ShiftLen`, which can be used to specify how many bytes need to be freed up from the buffer. If you don't need to keep returned byte slices around, call `Free(ShiftLen())` after every `Shift` call.
|
||||
|
||||
## Strconv
|
||||
This package contains string conversion function much like the standard library's `strconv` package, but it is specifically tailored for the performance needs within the `minify` package.
|
||||
|
||||
For example, the floating-point to string conversion function is approximately twice as fast as the standard library, but it is not as precise.
|
||||
|
||||
## CSS
|
||||
This package is a CSS3 lexer and parser. Both follow the specification at [CSS Syntax Module Level 3](http://www.w3.org/TR/css-syntax-3/). The lexer takes an io.Reader and converts it into tokens until the EOF. The parser returns a parse tree of the full io.Reader input stream, but the low-level `Next` function can be used for stream parsing to returns grammar units until the EOF.
|
||||
|
||||
[See README here](https://github.com/tdewolff/parse/tree/master/css).
|
||||
|
||||
## HTML
|
||||
This package is an HTML5 lexer. It follows the specification at [The HTML syntax](http://www.w3.org/TR/html5/syntax.html). The lexer takes an io.Reader and converts it into tokens until the EOF.
|
||||
|
||||
[See README here](https://github.com/tdewolff/parse/tree/master/html).
|
||||
|
||||
## JS
|
||||
This package is a JS lexer (ECMA-262, edition 6.0). It follows the specification at [ECMAScript Language Specification](http://www.ecma-international.org/ecma-262/6.0/). The lexer takes an io.Reader and converts it into tokens until the EOF.
|
||||
|
||||
[See README here](https://github.com/tdewolff/parse/tree/master/js).
|
||||
|
||||
## JSON
|
||||
This package is a JSON parser (ECMA-404). It follows the specification at [JSON](http://json.org/). The parser takes an io.Reader and converts it into tokens until the EOF.
|
||||
|
||||
[See README here](https://github.com/tdewolff/parse/tree/master/json).
|
||||
|
||||
## SVG
|
||||
This package contains common hashes for SVG1.1 tags and attributes.
|
||||
|
||||
## XML
|
||||
This package is an XML1.0 lexer. It follows the specification at [Extensible Markup Language (XML) 1.0 (Fifth Edition)](http://www.w3.org/TR/xml/). The lexer takes an io.Reader and converts it into tokens until the EOF.
|
||||
|
||||
[See README here](https://github.com/tdewolff/parse/tree/master/xml).
|
||||
|
||||
## License
|
||||
Released under the [MIT license](LICENSE.md).
|
||||
|
||||
[1]: http://golang.org/ "Go Language"
|
171
vendor/github.com/tdewolff/parse/css/README.md
generated
vendored
171
vendor/github.com/tdewolff/parse/css/README.md
generated
vendored
|
@ -1,171 +0,0 @@
|
|||
# CSS [![GoDoc](http://godoc.org/github.com/tdewolff/parse/css?status.svg)](http://godoc.org/github.com/tdewolff/parse/css) [![GoCover](http://gocover.io/_badge/github.com/tdewolff/parse/css)](http://gocover.io/github.com/tdewolff/parse/css)
|
||||
|
||||
This package is a CSS3 lexer and parser written in [Go][1]. Both follow the specification at [CSS Syntax Module Level 3](http://www.w3.org/TR/css-syntax-3/). The lexer takes an io.Reader and converts it into tokens until the EOF. The parser returns a parse tree of the full io.Reader input stream, but the low-level `Next` function can be used for stream parsing to returns grammar units until the EOF.
|
||||
|
||||
## Installation
|
||||
Run the following command
|
||||
|
||||
go get github.com/tdewolff/parse/css
|
||||
|
||||
or add the following import and run project with `go get`
|
||||
|
||||
import "github.com/tdewolff/parse/css"
|
||||
|
||||
## Lexer
|
||||
### Usage
|
||||
The following initializes a new Lexer with io.Reader `r`:
|
||||
``` go
|
||||
l := css.NewLexer(r)
|
||||
```
|
||||
|
||||
To tokenize until EOF an error, use:
|
||||
``` go
|
||||
for {
|
||||
tt, text := l.Next()
|
||||
switch tt {
|
||||
case css.ErrorToken:
|
||||
// error or EOF set in l.Err()
|
||||
return
|
||||
// ...
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
All tokens (see [CSS Syntax Module Level 3](http://www.w3.org/TR/css3-syntax/)):
|
||||
``` go
|
||||
ErrorToken // non-official token, returned when errors occur
|
||||
IdentToken
|
||||
FunctionToken // rgb( rgba( ...
|
||||
AtKeywordToken // @abc
|
||||
HashToken // #abc
|
||||
StringToken
|
||||
BadStringToken
|
||||
UrlToken // url(
|
||||
BadUrlToken
|
||||
DelimToken // any unmatched character
|
||||
NumberToken // 5
|
||||
PercentageToken // 5%
|
||||
DimensionToken // 5em
|
||||
UnicodeRangeToken
|
||||
IncludeMatchToken // ~=
|
||||
DashMatchToken // |=
|
||||
PrefixMatchToken // ^=
|
||||
SuffixMatchToken // $=
|
||||
SubstringMatchToken // *=
|
||||
ColumnToken // ||
|
||||
WhitespaceToken
|
||||
CDOToken // <!--
|
||||
CDCToken // -->
|
||||
ColonToken
|
||||
SemicolonToken
|
||||
CommaToken
|
||||
BracketToken // ( ) [ ] { }, all bracket tokens use this, Data() can distinguish between the brackets
|
||||
CommentToken // non-official token
|
||||
```
|
||||
|
||||
### Examples
|
||||
``` go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/tdewolff/parse/css"
|
||||
)
|
||||
|
||||
// Tokenize CSS3 from stdin.
|
||||
func main() {
|
||||
l := css.NewLexer(os.Stdin)
|
||||
for {
|
||||
tt, text := l.Next()
|
||||
switch tt {
|
||||
case css.ErrorToken:
|
||||
if l.Err() != io.EOF {
|
||||
fmt.Println("Error on line", l.Line(), ":", l.Err())
|
||||
}
|
||||
return
|
||||
case css.IdentToken:
|
||||
fmt.Println("Identifier", string(text))
|
||||
case css.NumberToken:
|
||||
fmt.Println("Number", string(text))
|
||||
// ...
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Parser
|
||||
### Usage
|
||||
The following creates a new Parser.
|
||||
``` go
|
||||
// true because this is the content of an inline style attribute
|
||||
p := css.NewParser(bytes.NewBufferString("color: red;"), true)
|
||||
```
|
||||
|
||||
To iterate over the stylesheet, use:
|
||||
``` go
|
||||
for {
|
||||
gt, _, data := p.Next()
|
||||
if gt == css.ErrorGrammar {
|
||||
break
|
||||
}
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
All grammar units returned by `Next`:
|
||||
``` go
|
||||
ErrorGrammar
|
||||
AtRuleGrammar
|
||||
EndAtRuleGrammar
|
||||
RulesetGrammar
|
||||
EndRulesetGrammar
|
||||
DeclarationGrammar
|
||||
TokenGrammar
|
||||
```
|
||||
|
||||
### Examples
|
||||
``` go
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/tdewolff/parse/css"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// true because this is the content of an inline style attribute
|
||||
p := css.NewParser(bytes.NewBufferString("color: red;"), true)
|
||||
out := ""
|
||||
for {
|
||||
gt, _, data := p.Next()
|
||||
if gt == css.ErrorGrammar {
|
||||
break
|
||||
} else if gt == css.AtRuleGrammar || gt == css.BeginAtRuleGrammar || gt == css.BeginRulesetGrammar || gt == css.DeclarationGrammar {
|
||||
out += string(data)
|
||||
if gt == css.DeclarationGrammar {
|
||||
out += ":"
|
||||
}
|
||||
for _, val := range p.Values() {
|
||||
out += string(val.Data)
|
||||
}
|
||||
if gt == css.BeginAtRuleGrammar || gt == css.BeginRulesetGrammar {
|
||||
out += "{"
|
||||
} else if gt == css.AtRuleGrammar || gt == css.DeclarationGrammar {
|
||||
out += ";"
|
||||
}
|
||||
} else {
|
||||
out += string(data)
|
||||
}
|
||||
}
|
||||
fmt.Println(out)
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## License
|
||||
Released under the [MIT license](https://github.com/tdewolff/parse/blob/master/LICENSE.md).
|
||||
|
||||
[1]: http://golang.org/ "Go Language"
|
1207
vendor/github.com/tdewolff/parse/css/hash.go
generated
vendored
1207
vendor/github.com/tdewolff/parse/css/hash.go
generated
vendored
File diff suppressed because it is too large
Load diff
4
vendor/github.com/tdewolff/parse/css/parse.go
generated
vendored
4
vendor/github.com/tdewolff/parse/css/parse.go
generated
vendored
|
@ -70,6 +70,10 @@ type Token struct {
|
|||
Data []byte
|
||||
}
|
||||
|
||||
func (t Token) String() string {
|
||||
return t.TokenType.String() + "('" + string(t.Data) + "')"
|
||||
}
|
||||
|
||||
// Parser is the state for the parser.
|
||||
type Parser struct {
|
||||
l *Lexer
|
||||
|
|
7
vendor/github.com/tdewolff/parse/strconv/int.go
generated
vendored
7
vendor/github.com/tdewolff/parse/strconv/int.go
generated
vendored
|
@ -1,6 +1,8 @@
|
|||
package strconv // import "github.com/tdewolff/parse/strconv"
|
||||
|
||||
import "math"
|
||||
import (
|
||||
"math"
|
||||
)
|
||||
|
||||
// Int parses a byte-slice and returns the integer it represents.
|
||||
// If an invalid character is encountered, it will stop there.
|
||||
|
@ -34,6 +36,9 @@ func ParseInt(b []byte) (int64, int) {
|
|||
|
||||
func LenInt(i int64) int {
|
||||
if i < 0 {
|
||||
if i == -9223372036854775808 {
|
||||
return 19
|
||||
}
|
||||
i = -i
|
||||
}
|
||||
switch {
|
||||
|
|
83
vendor/github.com/tdewolff/parse/strconv/price.go
generated
vendored
Normal file
83
vendor/github.com/tdewolff/parse/strconv/price.go
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
|||
package strconv
|
||||
|
||||
// AppendPrice will append an int64 formatted as a price, where the int64 is the price in cents.
|
||||
// It does not display whether a price is negative or not.
|
||||
func AppendPrice(b []byte, price int64, dec bool, milSeparator byte, decSeparator byte) []byte {
|
||||
if price < 0 {
|
||||
if price == -9223372036854775808 {
|
||||
x := []byte("92 233 720 368 547 758 08")
|
||||
x[2] = milSeparator
|
||||
x[6] = milSeparator
|
||||
x[10] = milSeparator
|
||||
x[14] = milSeparator
|
||||
x[18] = milSeparator
|
||||
x[22] = decSeparator
|
||||
return append(b, x...)
|
||||
}
|
||||
price = -price
|
||||
}
|
||||
|
||||
// rounding
|
||||
if !dec {
|
||||
firstDec := (price / 10) % 10
|
||||
if firstDec >= 5 {
|
||||
price += 100
|
||||
}
|
||||
}
|
||||
|
||||
// calculate size
|
||||
n := LenInt(price) - 2
|
||||
if n > 0 {
|
||||
n += (n - 1) / 3 // mil separator
|
||||
} else {
|
||||
n = 1
|
||||
}
|
||||
if dec {
|
||||
n += 2 + 1 // decimals + dec separator
|
||||
}
|
||||
|
||||
// resize byte slice
|
||||
i := len(b)
|
||||
if i+n > cap(b) {
|
||||
b = append(b, make([]byte, n)...)
|
||||
} else {
|
||||
b = b[:i+n]
|
||||
}
|
||||
|
||||
// print fractional-part
|
||||
i += n - 1
|
||||
if dec {
|
||||
for j := 0; j < 2; j++ {
|
||||
c := byte(price%10) + '0'
|
||||
price /= 10
|
||||
b[i] = c
|
||||
i--
|
||||
}
|
||||
b[i] = decSeparator
|
||||
i--
|
||||
} else {
|
||||
price /= 100
|
||||
}
|
||||
|
||||
if price == 0 {
|
||||
b[i] = '0'
|
||||
return b
|
||||
}
|
||||
|
||||
// print integer-part
|
||||
j := 0
|
||||
for price > 0 {
|
||||
if j == 3 {
|
||||
b[i] = milSeparator
|
||||
i--
|
||||
j = 0
|
||||
}
|
||||
|
||||
c := byte(price%10) + '0'
|
||||
price /= 10
|
||||
b[i] = c
|
||||
i--
|
||||
j++
|
||||
}
|
||||
return b
|
||||
}
|
101
vendor/github.com/tdewolff/parse/xml/README.md
generated
vendored
101
vendor/github.com/tdewolff/parse/xml/README.md
generated
vendored
|
@ -1,101 +0,0 @@
|
|||
# XML [![GoDoc](http://godoc.org/github.com/tdewolff/parse/xml?status.svg)](http://godoc.org/github.com/tdewolff/parse/xml) [![GoCover](http://gocover.io/_badge/github.com/tdewolff/parse/xml)](http://gocover.io/github.com/tdewolff/parse/xml)
|
||||
|
||||
This package is an XML lexer written in [Go][1]. It follows the specification at [Extensible Markup Language (XML) 1.0 (Fifth Edition)](http://www.w3.org/TR/REC-xml/). The lexer takes an io.Reader and converts it into tokens until the EOF.
|
||||
|
||||
## Installation
|
||||
Run the following command
|
||||
|
||||
go get github.com/tdewolff/parse/xml
|
||||
|
||||
or add the following import and run project with `go get`
|
||||
|
||||
import "github.com/tdewolff/parse/xml"
|
||||
|
||||
## Lexer
|
||||
### Usage
|
||||
The following initializes a new Lexer with io.Reader `r`:
|
||||
``` go
|
||||
l := xml.NewLexer(r)
|
||||
```
|
||||
|
||||
To tokenize until EOF an error, use:
|
||||
``` go
|
||||
for {
|
||||
tt, data := l.Next()
|
||||
switch tt {
|
||||
case xml.ErrorToken:
|
||||
// error or EOF set in l.Err()
|
||||
return
|
||||
case xml.StartTagToken:
|
||||
// ...
|
||||
for {
|
||||
ttAttr, dataAttr := l.Next()
|
||||
if ttAttr != xml.AttributeToken {
|
||||
// handle StartTagCloseToken/StartTagCloseVoidToken/StartTagClosePIToken
|
||||
break
|
||||
}
|
||||
// ...
|
||||
}
|
||||
case xml.EndTagToken:
|
||||
// ...
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
All tokens:
|
||||
``` go
|
||||
ErrorToken TokenType = iota // extra token when errors occur
|
||||
CommentToken
|
||||
CDATAToken
|
||||
StartTagToken
|
||||
StartTagCloseToken
|
||||
StartTagCloseVoidToken
|
||||
StartTagClosePIToken
|
||||
EndTagToken
|
||||
AttributeToken
|
||||
TextToken
|
||||
```
|
||||
|
||||
### Examples
|
||||
``` go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/tdewolff/parse/xml"
|
||||
)
|
||||
|
||||
// Tokenize XML from stdin.
|
||||
func main() {
|
||||
l := xml.NewLexer(os.Stdin)
|
||||
for {
|
||||
tt, data := l.Next()
|
||||
switch tt {
|
||||
case xml.ErrorToken:
|
||||
if l.Err() != io.EOF {
|
||||
fmt.Println("Error on line", l.Line(), ":", l.Err())
|
||||
}
|
||||
return
|
||||
case xml.StartTagToken:
|
||||
fmt.Println("Tag", string(data))
|
||||
for {
|
||||
ttAttr, dataAttr := l.Next()
|
||||
if ttAttr != xml.AttributeToken {
|
||||
break
|
||||
}
|
||||
|
||||
key := dataAttr
|
||||
val := l.AttrVal()
|
||||
fmt.Println("Attribute", string(key), "=", string(val))
|
||||
}
|
||||
// ...
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## License
|
||||
Released under the [MIT license](https://github.com/tdewolff/parse/blob/master/LICENSE.md).
|
||||
|
||||
[1]: http://golang.org/ "Go Language"
|
12
vendor/golang.org/x/image/font/font.go
generated
vendored
12
vendor/golang.org/x/image/font/font.go
generated
vendored
|
@ -86,6 +86,18 @@ type Metrics struct {
|
|||
// value is typically positive, even though a descender goes below the
|
||||
// baseline.
|
||||
Descent fixed.Int26_6
|
||||
|
||||
// XHeight is the distance from the top of non-ascending lowercase letters
|
||||
// to the baseline.
|
||||
XHeight fixed.Int26_6
|
||||
|
||||
// CapHeight is the distance from the top of uppercase letters to the
|
||||
// baseline.
|
||||
CapHeight fixed.Int26_6
|
||||
|
||||
// CaretSlope is the slope of a caret as a vector with the Y axis pointing up.
|
||||
// The slope {0, 1} is the vertical caret.
|
||||
CaretSlope image.Point
|
||||
}
|
||||
|
||||
// Drawer draws text on a destination image.
|
||||
|
|
23
vendor/gopkg.in/validator.v2/.gitignore
generated
vendored
23
vendor/gopkg.in/validator.v2/.gitignore
generated
vendored
|
@ -1,23 +0,0 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
10
vendor/gopkg.in/validator.v2/.travis.yml
generated
vendored
10
vendor/gopkg.in/validator.v2/.travis.yml
generated
vendored
|
@ -1,10 +0,0 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.5
|
||||
- 1.6
|
||||
- 1.7
|
||||
go_import_path: gopkg.in/validator.v2
|
||||
script:
|
||||
- go test -race -v -bench=.
|
||||
notifications:
|
||||
email: false
|
188
vendor/gopkg.in/validator.v2/README.md
generated
vendored
188
vendor/gopkg.in/validator.v2/README.md
generated
vendored
|
@ -1,188 +0,0 @@
|
|||
Package validator
|
||||
================
|
||||
|
||||
Package validator implements variable validations
|
||||
|
||||
Installation
|
||||
============
|
||||
|
||||
Just use go get.
|
||||
|
||||
```bash
|
||||
go get gopkg.in/validator.v2
|
||||
```
|
||||
|
||||
And then just import the package into your own code.
|
||||
|
||||
```go
|
||||
import (
|
||||
"gopkg.in/validator.v2"
|
||||
)
|
||||
```
|
||||
|
||||
Usage
|
||||
=====
|
||||
|
||||
Please see http://godoc.org/gopkg.in/validator.v2 for detailed usage docs.
|
||||
A simple example would be.
|
||||
|
||||
```go
|
||||
type NewUserRequest struct {
|
||||
Username string `validate:"min=3,max=40,regexp=^[a-zA-Z]*$"`
|
||||
Name string `validate:"nonzero"`
|
||||
Age int `validate:"min=21"`
|
||||
Password string `validate:"min=8"`
|
||||
}
|
||||
|
||||
nur := NewUserRequest{Username: "something", Age: 20}
|
||||
if errs := validator.Validate(nur); errs != nil {
|
||||
// values not valid, deal with errors here
|
||||
}
|
||||
```
|
||||
|
||||
Builtin validators
|
||||
|
||||
Here is the list of validators buildin in the package. Validators buildin
|
||||
will check the element pointed to if the value to check is a pointer.
|
||||
The `nil` pointer is treated as a valid value by validators buildin other
|
||||
than `nonzero`, so you should to use `nonzero` if you don't want to
|
||||
accept a `nil` pointer.
|
||||
|
||||
```
|
||||
len
|
||||
For numeric numbers, len will simply make sure that the
|
||||
value is equal to the parameter given. For strings, it
|
||||
checks that the string length is exactly that number of
|
||||
characters. For slices, arrays, and maps, validates the
|
||||
number of items. (Usage: len=10)
|
||||
|
||||
max
|
||||
For numeric numbers, max will simply make sure that the
|
||||
value is lesser or equal to the parameter given. For strings,
|
||||
it checks that the string length is at most that number of
|
||||
characters. For slices, arrays, and maps, validates the
|
||||
number of items. (Usage: max=10)
|
||||
|
||||
min
|
||||
For numeric numbers, min will simply make sure that the value
|
||||
is greater or equal to the parameter given. For strings, it
|
||||
checks that the string length is at least that number of
|
||||
characters. For slices, arrays, and maps, validates the
|
||||
number of items. (Usage: min=10)
|
||||
|
||||
nonzero
|
||||
This validates that the value is not zero. The appropriate
|
||||
zero value is given by the Go spec (e.g. for int it's 0, for
|
||||
string it's "", for pointers is nil, etc.) For structs, it
|
||||
will not check to see if the struct itself has all zero
|
||||
values, instead use a pointer or put nonzero on the struct's
|
||||
keys that you care about. (Usage: nonzero)
|
||||
|
||||
regexp
|
||||
Only valid for string types, it will validator that the
|
||||
value matches the regular expression provided as parameter.
|
||||
(Usage: regexp=^a.*b$)
|
||||
```
|
||||
|
||||
Custom validators
|
||||
|
||||
It is possible to define custom validators by using SetValidationFunc.
|
||||
First, one needs to create a validation function.
|
||||
|
||||
```go
|
||||
// Very simple validator
|
||||
func notZZ(v interface{}, param string) error {
|
||||
st := reflect.ValueOf(v)
|
||||
if st.Kind() != reflect.String {
|
||||
return errors.New("notZZ only validates strings")
|
||||
}
|
||||
if st.String() == "ZZ" {
|
||||
return errors.New("value cannot be ZZ")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
Then one needs to add it to the list of validators and give it a "tag"
|
||||
name.
|
||||
|
||||
```go
|
||||
validator.SetValidationFunc("notzz", notZZ)
|
||||
```
|
||||
|
||||
Then it is possible to use the notzz validation tag. This will print
|
||||
"Field A error: value cannot be ZZ"
|
||||
|
||||
```go
|
||||
type T struct {
|
||||
A string `validate:"nonzero,notzz"`
|
||||
}
|
||||
t := T{"ZZ"}
|
||||
if errs := validator.Validate(t); errs != nil {
|
||||
fmt.Printf("Field A error: %s\n", errs["A"][0])
|
||||
}
|
||||
```
|
||||
|
||||
You can also have multiple sets of validator rules with SetTag().
|
||||
|
||||
```go
|
||||
type T struct {
|
||||
A int `foo:"nonzero" bar:"min=10"`
|
||||
}
|
||||
t := T{5}
|
||||
SetTag("foo")
|
||||
validator.Validate(t) // valid as it's nonzero
|
||||
SetTag("bar")
|
||||
validator.Validate(t) // invalid as it's less than 10
|
||||
```
|
||||
|
||||
SetTag is probably better used with multiple validators.
|
||||
|
||||
```go
|
||||
fooValidator := validator.NewValidator()
|
||||
fooValidator.SetTag("foo")
|
||||
barValidator := validator.NewValidator()
|
||||
barValidator.SetTag("bar")
|
||||
fooValidator.Validate(t)
|
||||
barValidator.Validate(t)
|
||||
```
|
||||
|
||||
This keeps the default validator's tag clean. Again, please refer to
|
||||
godocs for a lot of more examples and different uses.
|
||||
|
||||
Pull requests policy
|
||||
====================
|
||||
|
||||
tl;dr. Contributions are welcome.
|
||||
|
||||
The repository is organized in version branches. Pull requests to, say, the
|
||||
`v2` branch that break API compatibility will not be accepted. It is okay to
|
||||
break the API in master, *not in the branches*.
|
||||
|
||||
As for validation functions, the preference is to keep the main code simple
|
||||
and add most new functions to the validator-contrib repository.
|
||||
|
||||
https://github.com/go-validator/validator-contrib
|
||||
|
||||
For improvements and/or fixes to the builtin validation functions, please
|
||||
make sure the behaviour will not break existing functionality in the branches.
|
||||
If you see a case where the functionality of the builtin will change
|
||||
significantly, please send a pull request against `master`. We can discuss then
|
||||
whether the changes should be incorporated in the version branches as well.
|
||||
|
||||
License
|
||||
=======
|
||||
|
||||
Copyright 2014 Roberto Teixeira <robteix@robteix.com>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
12
vendor/gopkg.in/yaml.v2/.travis.yml
generated
vendored
12
vendor/gopkg.in/yaml.v2/.travis.yml
generated
vendored
|
@ -1,12 +0,0 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- 1.7
|
||||
- 1.8
|
||||
- 1.9
|
||||
- tip
|
||||
|
||||
go_import_path: gopkg.in/yaml.v2
|
133
vendor/gopkg.in/yaml.v2/README.md
generated
vendored
133
vendor/gopkg.in/yaml.v2/README.md
generated
vendored
|
@ -1,133 +0,0 @@
|
|||
# YAML support for the Go language
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
The yaml package enables Go programs to comfortably encode and decode YAML
|
||||
values. It was developed within [Canonical](https://www.canonical.com) as
|
||||
part of the [juju](https://juju.ubuntu.com) project, and is based on a
|
||||
pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
|
||||
C library to parse and generate YAML data quickly and reliably.
|
||||
|
||||
Compatibility
|
||||
-------------
|
||||
|
||||
The yaml package supports most of YAML 1.1 and 1.2, including support for
|
||||
anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
|
||||
implemented, and base-60 floats from YAML 1.1 are purposefully not
|
||||
supported since they're a poor design and are gone in YAML 1.2.
|
||||
|
||||
Installation and usage
|
||||
----------------------
|
||||
|
||||
The import path for the package is *gopkg.in/yaml.v2*.
|
||||
|
||||
To install it, run:
|
||||
|
||||
go get gopkg.in/yaml.v2
|
||||
|
||||
API documentation
|
||||
-----------------
|
||||
|
||||
If opened in a browser, the import path itself leads to the API documentation:
|
||||
|
||||
* [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
|
||||
|
||||
API stability
|
||||
-------------
|
||||
|
||||
The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
|
||||
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
|
||||
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
```Go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
var data = `
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d: [3, 4]
|
||||
`
|
||||
|
||||
// Note: struct fields must be public in order for unmarshal to
|
||||
// correctly populate the data.
|
||||
type T struct {
|
||||
A string
|
||||
B struct {
|
||||
RenamedC int `yaml:"c"`
|
||||
D []int `yaml:",flow"`
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
t := T{}
|
||||
|
||||
err := yaml.Unmarshal([]byte(data), &t)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- t:\n%v\n\n", t)
|
||||
|
||||
d, err := yaml.Marshal(&t)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- t dump:\n%s\n\n", string(d))
|
||||
|
||||
m := make(map[interface{}]interface{})
|
||||
|
||||
err = yaml.Unmarshal([]byte(data), &m)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- m:\n%v\n\n", m)
|
||||
|
||||
d, err = yaml.Marshal(&m)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- m dump:\n%s\n\n", string(d))
|
||||
}
|
||||
```
|
||||
|
||||
This example will generate the following output:
|
||||
|
||||
```
|
||||
--- t:
|
||||
{Easy! {2 [3 4]}}
|
||||
|
||||
--- t dump:
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d: [3, 4]
|
||||
|
||||
|
||||
--- m:
|
||||
map[a:Easy! b:map[c:2 d:[3 4]]]
|
||||
|
||||
--- m dump:
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d:
|
||||
- 3
|
||||
- 4
|
||||
```
|
||||
|
5
vendor/gopkg.in/yaml.v2/go.mod
generated
vendored
5
vendor/gopkg.in/yaml.v2/go.mod
generated
vendored
|
@ -1,5 +0,0 @@
|
|||
module "gopkg.in/yaml.v2"
|
||||
|
||||
require (
|
||||
"gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405
|
||||
)
|
Loading…
Reference in a new issue