mirror of
https://github.com/Luzifer/promcertcheck.git
synced 2024-11-08 16:00:08 +00:00
Remove old vendoring
Signed-off-by: Knut Ahlers <knut@ahlers.me>
This commit is contained in:
parent
583890fadd
commit
c54d13eb94
501 changed files with 0 additions and 239987 deletions
180
Gopkg.lock
generated
180
Gopkg.lock
generated
|
@ -1,180 +0,0 @@
|
|||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ac6e59cf5217c887f6872dc24ebcc3dbbbca6828d7f30b2f24008ec88b5839f3"
|
||||
name = "github.com/Luzifer/go_helpers"
|
||||
packages = ["str"]
|
||||
pruneopts = "NUT"
|
||||
revision = "c3bea85c97943065c31d13b2193a9ef8c8488fb2"
|
||||
version = "v2.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f6cc072a289a686fda22819d871cd1b0407640141b2f6616dfbab957c96bf6c3"
|
||||
name = "github.com/Luzifer/rconfig"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "5b80190bff90ccb9899db31e45baac7b1bede03b"
|
||||
version = "v2.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:707ebe952a8b3d00b343c01536c79c73771d100f63ec6babeaed5c79e2b8a8dd"
|
||||
name = "github.com/beorn7/perks"
|
||||
packages = ["quantile"]
|
||||
pruneopts = "NUT"
|
||||
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:237a3d25a7c82c4af199026ec273a4035f951c782414bbe1a63e508a46db0ef5"
|
||||
name = "github.com/flosch/pongo2"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "24195e6d38b06020d7a92c7b11960cf2e7cad2f2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:97df918963298c287643883209a2c3f642e6593379f97ab400c2a2e219ab647d"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
pruneopts = "NUT"
|
||||
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:bf0932b52b437415f41054bb880e44bc604b3f02b7a3bff360530f6def7f5c7d"
|
||||
name = "github.com/juju/errors"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "22422dad46e14561a0854ad42497a75af9b61909"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:c8a452cc8dd4ef9f857570ce2be31ca257a0928bf3c2b08cd7e11972b985c6d7"
|
||||
name = "github.com/konsorten/go-windows-terminal-sequences"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "b729f2633dfe35f4d1d8a32385f6685610ce1cb5"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6"
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
packages = ["pbutil"]
|
||||
pruneopts = "NUT"
|
||||
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:3e5fd795ebf6a9e13e67d644da76130af7a6003286531f9573f8074c228b66a3"
|
||||
name = "github.com/prometheus/client_golang"
|
||||
packages = ["prometheus"]
|
||||
pruneopts = "NUT"
|
||||
revision = "c5b7fccd204277076155f10851dad72b76a49317"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4"
|
||||
name = "github.com/prometheus/client_model"
|
||||
packages = ["go"]
|
||||
pruneopts = "NUT"
|
||||
revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:fad5a35eea6a1a33d6c8f949fbc146f24275ca809ece854248187683f52cc30b"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = [
|
||||
"expfmt",
|
||||
"internal/bitbucket.org/ww/goautoneg",
|
||||
"model",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "c7de2306084e37d54b8be01f3541a8464345e9a5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:26a2f5e891cc4d2321f18a0caa84c8e788663c17bed6a487f3cbe2c4295292d0"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = [
|
||||
".",
|
||||
"internal/util",
|
||||
"nfs",
|
||||
"xfs",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "418d78d0b9a7b7de3a6bbc8a23def624cc977bb2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:53c3320ee307f01fd24a88e396a8d2239cd8346d1a085320209319f2d33f59cc"
|
||||
name = "github.com/robfig/cron"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "b41be1df696709bb6395fe435af20370037c0b4c"
|
||||
version = "v1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:01252cd79aac70f16cac02a72a1067dd136e0ad6d5b597d0129cf74c739fd8d1"
|
||||
name = "github.com/sirupsen/logrus"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "a67f783a3814b8729bd2dac5780b5f78f8dbd64d"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e3707aeaccd2adc89eba6c062fec72116fe1fc1ba71097da85b4d8ae1668a675"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "9a97c102cda95a86cec2345a6f09f55a939babf5"
|
||||
version = "v1.0.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:3f3a05ae0b95893d90b9b3b5afdb79a9b3d96e4e36e099d841ae602e4aca0da8"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ssh/terminal"]
|
||||
pruneopts = "NUT"
|
||||
revision = "5295e8364332db77d75fce11f1d19c053919a9c9"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:4ea08b5c4c51887a54caee2954db97b620736ae510bdc8514f691d3a9375d93a"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "e4b3c5e9061176387e7cea65e4dc5853801f3fb7"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
digest = "1:1ab6db2d2bd353449c5d1e976ba7a92a0ece6e83aaab3e6674f8f2f1faebb85a"
|
||||
name = "gopkg.in/validator.v2"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "135c24b11c19e52befcae2ec3fca5d9b78c4e98e"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7c95b35057a0ff2e19f707173cc1a947fa43a6eb5c4d300d196ece0334046082"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||
version = "v2.2.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
input-imports = [
|
||||
"github.com/Luzifer/go_helpers/str",
|
||||
"github.com/Luzifer/rconfig",
|
||||
"github.com/flosch/pongo2",
|
||||
"github.com/prometheus/client_golang/prometheus",
|
||||
"github.com/robfig/cron",
|
||||
"github.com/sirupsen/logrus",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
55
Gopkg.toml
55
Gopkg.toml
|
@ -1,55 +0,0 @@
|
|||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
#
|
||||
# [prune]
|
||||
# non-go = false
|
||||
# go-tests = true
|
||||
# unused-packages = true
|
||||
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/Luzifer/go_helpers"
|
||||
version = "2.5.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/Luzifer/rconfig"
|
||||
version = "2.2.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/flosch/pongo2"
|
||||
branch = "master" # Use v4 which is not yet properly tagged
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
version = "0.8.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/robfig/cron"
|
||||
version = "1.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/sirupsen/logrus"
|
||||
version = "1.0.5"
|
||||
|
||||
[prune]
|
||||
non-go = true
|
||||
go-tests = true
|
||||
unused-packages = true
|
202
vendor/github.com/Luzifer/go_helpers/LICENSE
generated
vendored
202
vendor/github.com/Luzifer/go_helpers/LICENSE
generated
vendored
|
@ -1,202 +0,0 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016- Knut Ahlers <knut@ahlers.me>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
21
vendor/github.com/Luzifer/go_helpers/str/slice.go
generated
vendored
21
vendor/github.com/Luzifer/go_helpers/str/slice.go
generated
vendored
|
@ -1,21 +0,0 @@
|
|||
package str
|
||||
|
||||
// AppendIfMissing adds a string to a slice when it's not present yet
|
||||
func AppendIfMissing(slice []string, s string) []string {
|
||||
for _, e := range slice {
|
||||
if e == s {
|
||||
return slice
|
||||
}
|
||||
}
|
||||
return append(slice, s)
|
||||
}
|
||||
|
||||
// StringInSlice checks for the existence of a string in the slice
|
||||
func StringInSlice(a string, list []string) bool {
|
||||
for _, b := range list {
|
||||
if b == a {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
202
vendor/github.com/Luzifer/rconfig/LICENSE
generated
vendored
202
vendor/github.com/Luzifer/rconfig/LICENSE
generated
vendored
|
@ -1,202 +0,0 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2015- Knut Ahlers <knut@ahlers.me>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
64
vendor/github.com/Luzifer/rconfig/autoenv.go
generated
vendored
64
vendor/github.com/Luzifer/rconfig/autoenv.go
generated
vendored
|
@ -1,64 +0,0 @@
|
|||
package rconfig
|
||||
|
||||
import "strings"
|
||||
|
||||
type characterClass [2]rune
|
||||
|
||||
func (c characterClass) Contains(r rune) bool {
|
||||
return c[0] <= r && c[1] >= r
|
||||
}
|
||||
|
||||
type characterClasses []characterClass
|
||||
|
||||
func (c characterClasses) Contains(r rune) bool {
|
||||
for _, cc := range c {
|
||||
if cc.Contains(r) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
charGroupUpperLetter = characterClass{'A', 'Z'}
|
||||
charGroupLowerLetter = characterClass{'a', 'z'}
|
||||
charGroupNumber = characterClass{'0', '9'}
|
||||
charGroupLowerNumber = characterClasses{charGroupLowerLetter, charGroupNumber}
|
||||
)
|
||||
|
||||
func deriveEnvVarName(s string) string {
|
||||
var (
|
||||
words []string
|
||||
word []rune
|
||||
)
|
||||
|
||||
for _, l := range s {
|
||||
switch {
|
||||
case charGroupUpperLetter.Contains(l):
|
||||
if len(word) > 0 && charGroupLowerNumber.Contains(word[len(word)-1]) {
|
||||
words = append(words, string(word))
|
||||
word = []rune{}
|
||||
}
|
||||
word = append(word, l)
|
||||
|
||||
case charGroupLowerLetter.Contains(l):
|
||||
if len(word) > 1 && charGroupUpperLetter.Contains(word[len(word)-1]) {
|
||||
words = append(words, string(word[0:len(word)-1]))
|
||||
word = word[len(word)-1:]
|
||||
}
|
||||
word = append(word, l)
|
||||
|
||||
case charGroupNumber.Contains(l):
|
||||
word = append(word, l)
|
||||
|
||||
default:
|
||||
if len(word) > 0 {
|
||||
words = append(words, string(word))
|
||||
}
|
||||
word = []rune{}
|
||||
}
|
||||
}
|
||||
words = append(words, string(word))
|
||||
|
||||
return strings.ToUpper(strings.Join(words, "_"))
|
||||
}
|
456
vendor/github.com/Luzifer/rconfig/config.go
generated
vendored
456
vendor/github.com/Luzifer/rconfig/config.go
generated
vendored
|
@ -1,456 +0,0 @@
|
|||
// Package rconfig implements a CLI configuration reader with struct-embedded
|
||||
// defaults, environment variables and posix compatible flag parsing using
|
||||
// the pflag library.
|
||||
package rconfig
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
validator "gopkg.in/validator.v2"
|
||||
)
|
||||
|
||||
type afterFunc func() error
|
||||
|
||||
var (
|
||||
autoEnv bool
|
||||
fs *pflag.FlagSet
|
||||
variableDefaults map[string]string
|
||||
|
||||
timeParserFormats = []string{
|
||||
// Default constants
|
||||
time.RFC3339Nano, time.RFC3339,
|
||||
time.RFC1123Z, time.RFC1123,
|
||||
time.RFC822Z, time.RFC822,
|
||||
time.RFC850, time.RubyDate, time.UnixDate, time.ANSIC,
|
||||
"2006-01-02 15:04:05.999999999 -0700 MST",
|
||||
// More uncommon time formats
|
||||
"2006-01-02 15:04:05", "2006-01-02 15:04:05Z07:00", // Simplified ISO time format
|
||||
"01/02/2006 15:04:05", "01/02/2006 15:04:05Z07:00", // US time format
|
||||
"02.01.2006 15:04:05", "02.01.2006 15:04:05Z07:00", // DE time format
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
variableDefaults = make(map[string]string)
|
||||
}
|
||||
|
||||
// Parse takes the pointer to a struct filled with variables which should be read
|
||||
// from ENV, default or flag. The precedence in this is flag > ENV > default. So
|
||||
// if a flag is specified on the CLI it will overwrite the ENV and otherwise ENV
|
||||
// overwrites the default specified.
|
||||
//
|
||||
// For your configuration struct you can use the following struct-tags to control
|
||||
// the behavior of rconfig:
|
||||
//
|
||||
// default: Set a default value
|
||||
// vardefault: Read the default value from the variable defaults
|
||||
// env: Read the value from this environment variable
|
||||
// flag: Flag to read in format "long,short" (for example "listen,l")
|
||||
// description: A help text for Usage output to guide your users
|
||||
//
|
||||
// The format you need to specify those values you can see in the example to this
|
||||
// function.
|
||||
//
|
||||
func Parse(config interface{}) error {
|
||||
return parse(config, nil)
|
||||
}
|
||||
|
||||
// ParseAndValidate works exactly like Parse but implements an additional run of
|
||||
// the go-validator package on the configuration struct. Therefore additonal struct
|
||||
// tags are supported like described in the readme file of the go-validator package:
|
||||
//
|
||||
// https://github.com/go-validator/validator/tree/v2#usage
|
||||
func ParseAndValidate(config interface{}) error {
|
||||
return parseAndValidate(config, nil)
|
||||
}
|
||||
|
||||
// Args returns the non-flag command-line arguments.
|
||||
func Args() []string {
|
||||
return fs.Args()
|
||||
}
|
||||
|
||||
// AddTimeParserFormats adds custom formats to parse time.Time fields
|
||||
func AddTimeParserFormats(f ...string) {
|
||||
timeParserFormats = append(timeParserFormats, f...)
|
||||
}
|
||||
|
||||
// AutoEnv enables or disables automated env variable guessing. If no `env` struct
|
||||
// tag was set and AutoEnv is enabled the env variable name is derived from the
|
||||
// name of the field: `MyFieldName` will get `MY_FIELD_NAME`
|
||||
func AutoEnv(enable bool) {
|
||||
autoEnv = enable
|
||||
}
|
||||
|
||||
// Usage prints a basic usage with the corresponding defaults for the flags to
|
||||
// os.Stdout. The defaults are derived from the `default` struct-tag and the ENV.
|
||||
func Usage() {
|
||||
if fs != nil && fs.Parsed() {
|
||||
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
|
||||
fs.PrintDefaults()
|
||||
}
|
||||
}
|
||||
|
||||
// SetVariableDefaults presets the parser with a map of default values to be used
|
||||
// when specifying the vardefault tag
|
||||
func SetVariableDefaults(defaults map[string]string) {
|
||||
variableDefaults = defaults
|
||||
}
|
||||
|
||||
func parseAndValidate(in interface{}, args []string) error {
|
||||
if err := parse(in, args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return validator.Validate(in)
|
||||
}
|
||||
|
||||
func parse(in interface{}, args []string) error {
|
||||
if args == nil {
|
||||
args = os.Args
|
||||
}
|
||||
|
||||
fs = pflag.NewFlagSet(os.Args[0], pflag.ExitOnError)
|
||||
afterFuncs, err := execTags(in, fs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := fs.Parse(args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if afterFuncs != nil {
|
||||
for _, f := range afterFuncs {
|
||||
if err := f(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func execTags(in interface{}, fs *pflag.FlagSet) ([]afterFunc, error) {
|
||||
if reflect.TypeOf(in).Kind() != reflect.Ptr {
|
||||
return nil, errors.New("Calling parser with non-pointer")
|
||||
}
|
||||
|
||||
if reflect.ValueOf(in).Elem().Kind() != reflect.Struct {
|
||||
return nil, errors.New("Calling parser with pointer to non-struct")
|
||||
}
|
||||
|
||||
afterFuncs := []afterFunc{}
|
||||
|
||||
st := reflect.ValueOf(in).Elem()
|
||||
for i := 0; i < st.NumField(); i++ {
|
||||
valField := st.Field(i)
|
||||
typeField := st.Type().Field(i)
|
||||
|
||||
if typeField.Tag.Get("default") == "" && typeField.Tag.Get("env") == "" && typeField.Tag.Get("flag") == "" && typeField.Type.Kind() != reflect.Struct {
|
||||
// None of our supported tags is present and it's not a sub-struct
|
||||
continue
|
||||
}
|
||||
|
||||
value := varDefault(typeField.Tag.Get("vardefault"), typeField.Tag.Get("default"))
|
||||
value = envDefault(typeField, value)
|
||||
parts := strings.Split(typeField.Tag.Get("flag"), ",")
|
||||
|
||||
switch typeField.Type {
|
||||
case reflect.TypeOf(time.Duration(0)):
|
||||
v, err := time.ParseDuration(value)
|
||||
if err != nil {
|
||||
if value == "" {
|
||||
v = time.Duration(0)
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if typeField.Tag.Get("flag") != "" {
|
||||
if len(parts) == 1 {
|
||||
fs.DurationVar(valField.Addr().Interface().(*time.Duration), parts[0], v, typeField.Tag.Get("description"))
|
||||
} else {
|
||||
fs.DurationVarP(valField.Addr().Interface().(*time.Duration), parts[0], parts[1], v, typeField.Tag.Get("description"))
|
||||
}
|
||||
} else {
|
||||
valField.Set(reflect.ValueOf(v))
|
||||
}
|
||||
continue
|
||||
|
||||
case reflect.TypeOf(time.Time{}):
|
||||
var sVar string
|
||||
|
||||
if typeField.Tag.Get("flag") != "" {
|
||||
if len(parts) == 1 {
|
||||
fs.StringVar(&sVar, parts[0], value, typeField.Tag.Get("description"))
|
||||
} else {
|
||||
fs.StringVarP(&sVar, parts[0], parts[1], value, typeField.Tag.Get("description"))
|
||||
}
|
||||
} else {
|
||||
sVar = value
|
||||
}
|
||||
|
||||
afterFuncs = append(afterFuncs, func(valField reflect.Value, sVar *string) func() error {
|
||||
return func() error {
|
||||
if *sVar == "" {
|
||||
// No time, no problem
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check whether we could have a timestamp
|
||||
if ts, err := strconv.ParseInt(*sVar, 10, 64); err == nil {
|
||||
t := time.Unix(ts, 0)
|
||||
valField.Set(reflect.ValueOf(t))
|
||||
return nil
|
||||
}
|
||||
|
||||
// We haven't so lets walk through possible time formats
|
||||
matched := false
|
||||
for _, tf := range timeParserFormats {
|
||||
if t, err := time.Parse(tf, *sVar); err == nil {
|
||||
matched = true
|
||||
valField.Set(reflect.ValueOf(t))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if !matched {
|
||||
return fmt.Errorf("Value %q did not match expected time formats", *sVar)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}(valField, &sVar))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
switch typeField.Type.Kind() {
|
||||
case reflect.String:
|
||||
if typeField.Tag.Get("flag") != "" {
|
||||
if len(parts) == 1 {
|
||||
fs.StringVar(valField.Addr().Interface().(*string), parts[0], value, typeField.Tag.Get("description"))
|
||||
} else {
|
||||
fs.StringVarP(valField.Addr().Interface().(*string), parts[0], parts[1], value, typeField.Tag.Get("description"))
|
||||
}
|
||||
} else {
|
||||
valField.SetString(value)
|
||||
}
|
||||
|
||||
case reflect.Bool:
|
||||
v := value == "true"
|
||||
if typeField.Tag.Get("flag") != "" {
|
||||
if len(parts) == 1 {
|
||||
fs.BoolVar(valField.Addr().Interface().(*bool), parts[0], v, typeField.Tag.Get("description"))
|
||||
} else {
|
||||
fs.BoolVarP(valField.Addr().Interface().(*bool), parts[0], parts[1], v, typeField.Tag.Get("description"))
|
||||
}
|
||||
} else {
|
||||
valField.SetBool(v)
|
||||
}
|
||||
|
||||
case reflect.Int, reflect.Int8, reflect.Int32, reflect.Int64:
|
||||
vt, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
if value == "" {
|
||||
vt = 0
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if typeField.Tag.Get("flag") != "" {
|
||||
registerFlagInt(typeField.Type.Kind(), fs, valField.Addr().Interface(), parts, vt, typeField.Tag.Get("description"))
|
||||
} else {
|
||||
valField.SetInt(vt)
|
||||
}
|
||||
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
vt, err := strconv.ParseUint(value, 10, 64)
|
||||
if err != nil {
|
||||
if value == "" {
|
||||
vt = 0
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if typeField.Tag.Get("flag") != "" {
|
||||
registerFlagUint(typeField.Type.Kind(), fs, valField.Addr().Interface(), parts, vt, typeField.Tag.Get("description"))
|
||||
} else {
|
||||
valField.SetUint(vt)
|
||||
}
|
||||
|
||||
case reflect.Float32, reflect.Float64:
|
||||
vt, err := strconv.ParseFloat(value, 64)
|
||||
if err != nil {
|
||||
if value == "" {
|
||||
vt = 0.0
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if typeField.Tag.Get("flag") != "" {
|
||||
registerFlagFloat(typeField.Type.Kind(), fs, valField.Addr().Interface(), parts, vt, typeField.Tag.Get("description"))
|
||||
} else {
|
||||
valField.SetFloat(vt)
|
||||
}
|
||||
|
||||
case reflect.Struct:
|
||||
afs, err := execTags(valField.Addr().Interface(), fs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
afterFuncs = append(afterFuncs, afs...)
|
||||
|
||||
case reflect.Slice:
|
||||
switch typeField.Type.Elem().Kind() {
|
||||
case reflect.Int:
|
||||
def := []int{}
|
||||
for _, v := range strings.Split(value, ",") {
|
||||
it, err := strconv.ParseInt(strings.TrimSpace(v), 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
def = append(def, int(it))
|
||||
}
|
||||
if len(parts) == 1 {
|
||||
fs.IntSliceVar(valField.Addr().Interface().(*[]int), parts[0], def, typeField.Tag.Get("description"))
|
||||
} else {
|
||||
fs.IntSliceVarP(valField.Addr().Interface().(*[]int), parts[0], parts[1], def, typeField.Tag.Get("description"))
|
||||
}
|
||||
case reflect.String:
|
||||
del := typeField.Tag.Get("delimiter")
|
||||
if len(del) == 0 {
|
||||
del = ","
|
||||
}
|
||||
var def = []string{}
|
||||
if value != "" {
|
||||
def = strings.Split(value, del)
|
||||
}
|
||||
if len(parts) == 1 {
|
||||
fs.StringSliceVar(valField.Addr().Interface().(*[]string), parts[0], def, typeField.Tag.Get("description"))
|
||||
} else {
|
||||
fs.StringSliceVarP(valField.Addr().Interface().(*[]string), parts[0], parts[1], def, typeField.Tag.Get("description"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return afterFuncs, nil
|
||||
}
|
||||
|
||||
func registerFlagFloat(t reflect.Kind, fs *pflag.FlagSet, field interface{}, parts []string, vt float64, desc string) {
|
||||
switch t {
|
||||
case reflect.Float32:
|
||||
if len(parts) == 1 {
|
||||
fs.Float32Var(field.(*float32), parts[0], float32(vt), desc)
|
||||
} else {
|
||||
fs.Float32VarP(field.(*float32), parts[0], parts[1], float32(vt), desc)
|
||||
}
|
||||
case reflect.Float64:
|
||||
if len(parts) == 1 {
|
||||
fs.Float64Var(field.(*float64), parts[0], float64(vt), desc)
|
||||
} else {
|
||||
fs.Float64VarP(field.(*float64), parts[0], parts[1], float64(vt), desc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func registerFlagInt(t reflect.Kind, fs *pflag.FlagSet, field interface{}, parts []string, vt int64, desc string) {
|
||||
switch t {
|
||||
case reflect.Int:
|
||||
if len(parts) == 1 {
|
||||
fs.IntVar(field.(*int), parts[0], int(vt), desc)
|
||||
} else {
|
||||
fs.IntVarP(field.(*int), parts[0], parts[1], int(vt), desc)
|
||||
}
|
||||
case reflect.Int8:
|
||||
if len(parts) == 1 {
|
||||
fs.Int8Var(field.(*int8), parts[0], int8(vt), desc)
|
||||
} else {
|
||||
fs.Int8VarP(field.(*int8), parts[0], parts[1], int8(vt), desc)
|
||||
}
|
||||
case reflect.Int32:
|
||||
if len(parts) == 1 {
|
||||
fs.Int32Var(field.(*int32), parts[0], int32(vt), desc)
|
||||
} else {
|
||||
fs.Int32VarP(field.(*int32), parts[0], parts[1], int32(vt), desc)
|
||||
}
|
||||
case reflect.Int64:
|
||||
if len(parts) == 1 {
|
||||
fs.Int64Var(field.(*int64), parts[0], int64(vt), desc)
|
||||
} else {
|
||||
fs.Int64VarP(field.(*int64), parts[0], parts[1], int64(vt), desc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func registerFlagUint(t reflect.Kind, fs *pflag.FlagSet, field interface{}, parts []string, vt uint64, desc string) {
|
||||
switch t {
|
||||
case reflect.Uint:
|
||||
if len(parts) == 1 {
|
||||
fs.UintVar(field.(*uint), parts[0], uint(vt), desc)
|
||||
} else {
|
||||
fs.UintVarP(field.(*uint), parts[0], parts[1], uint(vt), desc)
|
||||
}
|
||||
case reflect.Uint8:
|
||||
if len(parts) == 1 {
|
||||
fs.Uint8Var(field.(*uint8), parts[0], uint8(vt), desc)
|
||||
} else {
|
||||
fs.Uint8VarP(field.(*uint8), parts[0], parts[1], uint8(vt), desc)
|
||||
}
|
||||
case reflect.Uint16:
|
||||
if len(parts) == 1 {
|
||||
fs.Uint16Var(field.(*uint16), parts[0], uint16(vt), desc)
|
||||
} else {
|
||||
fs.Uint16VarP(field.(*uint16), parts[0], parts[1], uint16(vt), desc)
|
||||
}
|
||||
case reflect.Uint32:
|
||||
if len(parts) == 1 {
|
||||
fs.Uint32Var(field.(*uint32), parts[0], uint32(vt), desc)
|
||||
} else {
|
||||
fs.Uint32VarP(field.(*uint32), parts[0], parts[1], uint32(vt), desc)
|
||||
}
|
||||
case reflect.Uint64:
|
||||
if len(parts) == 1 {
|
||||
fs.Uint64Var(field.(*uint64), parts[0], uint64(vt), desc)
|
||||
} else {
|
||||
fs.Uint64VarP(field.(*uint64), parts[0], parts[1], uint64(vt), desc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func envDefault(field reflect.StructField, def string) string {
|
||||
value := def
|
||||
|
||||
env := field.Tag.Get("env")
|
||||
if env == "" && autoEnv {
|
||||
env = deriveEnvVarName(field.Name)
|
||||
}
|
||||
|
||||
if env != "" {
|
||||
if e := os.Getenv(env); e != "" {
|
||||
value = e
|
||||
}
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func varDefault(name, def string) string {
|
||||
value := def
|
||||
|
||||
if name != "" {
|
||||
if v, ok := variableDefaults[name]; ok {
|
||||
value = v
|
||||
}
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
27
vendor/github.com/Luzifer/rconfig/vardefault_providers.go
generated
vendored
27
vendor/github.com/Luzifer/rconfig/vardefault_providers.go
generated
vendored
|
@ -1,27 +0,0 @@
|
|||
package rconfig
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// VarDefaultsFromYAMLFile reads contents of a file and calls VarDefaultsFromYAML
|
||||
func VarDefaultsFromYAMLFile(filename string) map[string]string {
|
||||
data, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return make(map[string]string)
|
||||
}
|
||||
|
||||
return VarDefaultsFromYAML(data)
|
||||
}
|
||||
|
||||
// VarDefaultsFromYAML creates a vardefaults map from YAML raw data
|
||||
func VarDefaultsFromYAML(in []byte) map[string]string {
|
||||
out := make(map[string]string)
|
||||
err := yaml.Unmarshal(in, &out)
|
||||
if err != nil {
|
||||
return make(map[string]string)
|
||||
}
|
||||
return out
|
||||
}
|
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
|
@ -1,20 +0,0 @@
|
|||
Copyright (C) 2013 Blake Mizerany
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
|
@ -1,316 +0,0 @@
|
|||
// Package quantile computes approximate quantiles over an unbounded data
|
||||
// stream within low memory and CPU bounds.
|
||||
//
|
||||
// A small amount of accuracy is traded to achieve the above properties.
|
||||
//
|
||||
// Multiple streams can be merged before calling Query to generate a single set
|
||||
// of results. This is meaningful when the streams represent the same type of
|
||||
// data. See Merge and Samples.
|
||||
//
|
||||
// For more detailed information about the algorithm used, see:
|
||||
//
|
||||
// Effective Computation of Biased Quantiles over Data Streams
|
||||
//
|
||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Sample holds an observed value and meta information for compression. JSON
|
||||
// tags have been added for convenience.
|
||||
type Sample struct {
|
||||
Value float64 `json:",string"`
|
||||
Width float64 `json:",string"`
|
||||
Delta float64 `json:",string"`
|
||||
}
|
||||
|
||||
// Samples represents a slice of samples. It implements sort.Interface.
|
||||
type Samples []Sample
|
||||
|
||||
func (a Samples) Len() int { return len(a) }
|
||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
type invariant func(s *stream, r float64) float64
|
||||
|
||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the lower ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewLowBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * r
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the higher ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewHighBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * (s.n - r)
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||
// space and computation time. The targets map maps the desired quantiles to
|
||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||
// is guaranteed to be within (Quantile±Epsilon).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||
func NewTargeted(targetMap map[float64]float64) *Stream {
|
||||
// Convert map to slice to avoid slow iterations on a map.
|
||||
// ƒ is called on the hot path, so converting the map to a slice
|
||||
// beforehand results in significant CPU savings.
|
||||
targets := targetMapToSlice(targetMap)
|
||||
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
var m = math.MaxFloat64
|
||||
var f float64
|
||||
for _, t := range targets {
|
||||
if t.quantile*s.n <= r {
|
||||
f = (2 * t.epsilon * r) / t.quantile
|
||||
} else {
|
||||
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
|
||||
}
|
||||
if f < m {
|
||||
m = f
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
type target struct {
|
||||
quantile float64
|
||||
epsilon float64
|
||||
}
|
||||
|
||||
func targetMapToSlice(targetMap map[float64]float64) []target {
|
||||
targets := make([]target, 0, len(targetMap))
|
||||
|
||||
for quantile, epsilon := range targetMap {
|
||||
t := target{
|
||||
quantile: quantile,
|
||||
epsilon: epsilon,
|
||||
}
|
||||
targets = append(targets, t)
|
||||
}
|
||||
|
||||
return targets
|
||||
}
|
||||
|
||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||
// design. Take care when using across multiple goroutines.
|
||||
type Stream struct {
|
||||
*stream
|
||||
b Samples
|
||||
sorted bool
|
||||
}
|
||||
|
||||
func newStream(ƒ invariant) *Stream {
|
||||
x := &stream{ƒ: ƒ}
|
||||
return &Stream{x, make(Samples, 0, 500), true}
|
||||
}
|
||||
|
||||
// Insert inserts v into the stream.
|
||||
func (s *Stream) Insert(v float64) {
|
||||
s.insert(Sample{Value: v, Width: 1})
|
||||
}
|
||||
|
||||
func (s *Stream) insert(sample Sample) {
|
||||
s.b = append(s.b, sample)
|
||||
s.sorted = false
|
||||
if len(s.b) == cap(s.b) {
|
||||
s.flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Query returns the computed qth percentiles value. If s was created with
|
||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||
// will return an unspecified result.
|
||||
func (s *Stream) Query(q float64) float64 {
|
||||
if !s.flushed() {
|
||||
// Fast path when there hasn't been enough data for a flush;
|
||||
// this also yields better accuracy for small sets of data.
|
||||
l := len(s.b)
|
||||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(math.Ceil(float64(l) * q))
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
s.maybeSort()
|
||||
return s.b[i].Value
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.query(q)
|
||||
}
|
||||
|
||||
// Merge merges samples into the underlying streams samples. This is handy when
|
||||
// merging multiple streams from separate threads, database shards, etc.
|
||||
//
|
||||
// ATTENTION: This method is broken and does not yield correct results. The
|
||||
// underlying algorithm is not capable of merging streams correctly.
|
||||
func (s *Stream) Merge(samples Samples) {
|
||||
sort.Sort(samples)
|
||||
s.stream.merge(samples)
|
||||
}
|
||||
|
||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||
func (s *Stream) Reset() {
|
||||
s.stream.reset()
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
// Samples returns stream samples held by s.
|
||||
func (s *Stream) Samples() Samples {
|
||||
if !s.flushed() {
|
||||
return s.b
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.samples()
|
||||
}
|
||||
|
||||
// Count returns the total number of samples observed in the stream
|
||||
// since initialization.
|
||||
func (s *Stream) Count() int {
|
||||
return len(s.b) + s.stream.count()
|
||||
}
|
||||
|
||||
func (s *Stream) flush() {
|
||||
s.maybeSort()
|
||||
s.stream.merge(s.b)
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
func (s *Stream) maybeSort() {
|
||||
if !s.sorted {
|
||||
s.sorted = true
|
||||
sort.Sort(s.b)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stream) flushed() bool {
|
||||
return len(s.stream.l) > 0
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
n float64
|
||||
l []Sample
|
||||
ƒ invariant
|
||||
}
|
||||
|
||||
func (s *stream) reset() {
|
||||
s.l = s.l[:0]
|
||||
s.n = 0
|
||||
}
|
||||
|
||||
func (s *stream) insert(v float64) {
|
||||
s.merge(Samples{{v, 1, 0}})
|
||||
}
|
||||
|
||||
func (s *stream) merge(samples Samples) {
|
||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||
// whole summaries. The paper doesn't mention merging summaries at
|
||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||
// do merges properly.
|
||||
var r float64
|
||||
i := 0
|
||||
for _, sample := range samples {
|
||||
for ; i < len(s.l); i++ {
|
||||
c := s.l[i]
|
||||
if c.Value > sample.Value {
|
||||
// Insert at position i.
|
||||
s.l = append(s.l, Sample{})
|
||||
copy(s.l[i+1:], s.l[i:])
|
||||
s.l[i] = Sample{
|
||||
sample.Value,
|
||||
sample.Width,
|
||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||
// TODO(beorn7): How to calculate delta correctly?
|
||||
}
|
||||
i++
|
||||
goto inserted
|
||||
}
|
||||
r += c.Width
|
||||
}
|
||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||
i++
|
||||
inserted:
|
||||
s.n += sample.Width
|
||||
r += sample.Width
|
||||
}
|
||||
s.compress()
|
||||
}
|
||||
|
||||
func (s *stream) count() int {
|
||||
return int(s.n)
|
||||
}
|
||||
|
||||
func (s *stream) query(q float64) float64 {
|
||||
t := math.Ceil(q * s.n)
|
||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||
p := s.l[0]
|
||||
var r float64
|
||||
for _, c := range s.l[1:] {
|
||||
r += p.Width
|
||||
if r+c.Width+c.Delta > t {
|
||||
return p.Value
|
||||
}
|
||||
p = c
|
||||
}
|
||||
return p.Value
|
||||
}
|
||||
|
||||
func (s *stream) compress() {
|
||||
if len(s.l) < 2 {
|
||||
return
|
||||
}
|
||||
x := s.l[len(s.l)-1]
|
||||
xi := len(s.l) - 1
|
||||
r := s.n - 1 - x.Width
|
||||
|
||||
for i := len(s.l) - 2; i >= 0; i-- {
|
||||
c := s.l[i]
|
||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||
x.Width += c.Width
|
||||
s.l[xi] = x
|
||||
// Remove element at i.
|
||||
copy(s.l[i:], s.l[i+1:])
|
||||
s.l = s.l[:len(s.l)-1]
|
||||
xi -= 1
|
||||
} else {
|
||||
x = c
|
||||
xi = i
|
||||
}
|
||||
r -= c.Width
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stream) samples() Samples {
|
||||
samples := make(Samples, len(s.l))
|
||||
copy(samples, s.l)
|
||||
return samples
|
||||
}
|
10
vendor/github.com/flosch/pongo2/AUTHORS
generated
vendored
10
vendor/github.com/flosch/pongo2/AUTHORS
generated
vendored
|
@ -1,10 +0,0 @@
|
|||
Main author and maintainer of pongo2:
|
||||
|
||||
* Florian Schlachter <flori@n-schlachter.de>
|
||||
|
||||
Contributors (in no specific order):
|
||||
|
||||
* @romanoaugusto88
|
||||
* @vitalbh
|
||||
|
||||
Feel free to add yourself to the list or to modify your entry if you did a contribution.
|
20
vendor/github.com/flosch/pongo2/LICENSE
generated
vendored
20
vendor/github.com/flosch/pongo2/LICENSE
generated
vendored
|
@ -1,20 +0,0 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013-2014 Florian Schlachter
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
136
vendor/github.com/flosch/pongo2/context.go
generated
vendored
136
vendor/github.com/flosch/pongo2/context.go
generated
vendored
|
@ -1,136 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
|
||||
"github.com/juju/errors"
|
||||
)
|
||||
|
||||
var reIdentifiers = regexp.MustCompile("^[a-zA-Z0-9_]+$")
|
||||
|
||||
var autoescape = true
|
||||
|
||||
func SetAutoescape(newValue bool) {
|
||||
autoescape = newValue
|
||||
}
|
||||
|
||||
// A Context type provides constants, variables, instances or functions to a template.
|
||||
//
|
||||
// pongo2 automatically provides meta-information or functions through the "pongo2"-key.
|
||||
// Currently, context["pongo2"] contains the following keys:
|
||||
// 1. version: returns the version string
|
||||
//
|
||||
// Template examples for accessing items from your context:
|
||||
// {{ myconstant }}
|
||||
// {{ myfunc("test", 42) }}
|
||||
// {{ user.name }}
|
||||
// {{ pongo2.version }}
|
||||
type Context map[string]interface{}
|
||||
|
||||
func (c Context) checkForValidIdentifiers() *Error {
|
||||
for k, v := range c {
|
||||
if !reIdentifiers.MatchString(k) {
|
||||
return &Error{
|
||||
Sender: "checkForValidIdentifiers",
|
||||
OrigError: errors.Errorf("context-key '%s' (value: '%+v') is not a valid identifier", k, v),
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update updates this context with the key/value-pairs from another context.
|
||||
func (c Context) Update(other Context) Context {
|
||||
for k, v := range other {
|
||||
c[k] = v
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// ExecutionContext contains all data important for the current rendering state.
|
||||
//
|
||||
// If you're writing a custom tag, your tag's Execute()-function will
|
||||
// have access to the ExecutionContext. This struct stores anything
|
||||
// about the current rendering process's Context including
|
||||
// the Context provided by the user (field Public).
|
||||
// You can safely use the Private context to provide data to the user's
|
||||
// template (like a 'forloop'-information). The Shared-context is used
|
||||
// to share data between tags. All ExecutionContexts share this context.
|
||||
//
|
||||
// Please be careful when accessing the Public data.
|
||||
// PLEASE DO NOT MODIFY THE PUBLIC CONTEXT (read-only).
|
||||
//
|
||||
// To create your own execution context within tags, use the
|
||||
// NewChildExecutionContext(parent) function.
|
||||
type ExecutionContext struct {
|
||||
template *Template
|
||||
|
||||
Autoescape bool
|
||||
Public Context
|
||||
Private Context
|
||||
Shared Context
|
||||
}
|
||||
|
||||
var pongo2MetaContext = Context{
|
||||
"version": Version,
|
||||
}
|
||||
|
||||
func newExecutionContext(tpl *Template, ctx Context) *ExecutionContext {
|
||||
privateCtx := make(Context)
|
||||
|
||||
// Make the pongo2-related funcs/vars available to the context
|
||||
privateCtx["pongo2"] = pongo2MetaContext
|
||||
|
||||
return &ExecutionContext{
|
||||
template: tpl,
|
||||
|
||||
Public: ctx,
|
||||
Private: privateCtx,
|
||||
Autoescape: autoescape,
|
||||
}
|
||||
}
|
||||
|
||||
func NewChildExecutionContext(parent *ExecutionContext) *ExecutionContext {
|
||||
newctx := &ExecutionContext{
|
||||
template: parent.template,
|
||||
|
||||
Public: parent.Public,
|
||||
Private: make(Context),
|
||||
Autoescape: parent.Autoescape,
|
||||
}
|
||||
newctx.Shared = parent.Shared
|
||||
|
||||
// Copy all existing private items
|
||||
newctx.Private.Update(parent.Private)
|
||||
|
||||
return newctx
|
||||
}
|
||||
|
||||
func (ctx *ExecutionContext) Error(msg string, token *Token) *Error {
|
||||
return ctx.OrigError(errors.New(msg), token)
|
||||
}
|
||||
|
||||
func (ctx *ExecutionContext) OrigError(err error, token *Token) *Error {
|
||||
filename := ctx.template.name
|
||||
var line, col int
|
||||
if token != nil {
|
||||
// No tokens available
|
||||
// TODO: Add location (from where?)
|
||||
filename = token.Filename
|
||||
line = token.Line
|
||||
col = token.Col
|
||||
}
|
||||
return &Error{
|
||||
Template: ctx.template,
|
||||
Filename: filename,
|
||||
Line: line,
|
||||
Column: col,
|
||||
Token: token,
|
||||
Sender: "execution",
|
||||
OrigError: err,
|
||||
}
|
||||
}
|
||||
|
||||
func (ctx *ExecutionContext) Logf(format string, args ...interface{}) {
|
||||
ctx.template.set.logf(format, args...)
|
||||
}
|
31
vendor/github.com/flosch/pongo2/doc.go
generated
vendored
31
vendor/github.com/flosch/pongo2/doc.go
generated
vendored
|
@ -1,31 +0,0 @@
|
|||
// A Django-syntax like template-engine
|
||||
//
|
||||
// Blog posts about pongo2 (including introduction and migration):
|
||||
// https://www.florian-schlachter.de/?tag=pongo2
|
||||
//
|
||||
// Complete documentation on the template language:
|
||||
// https://docs.djangoproject.com/en/dev/topics/templates/
|
||||
//
|
||||
// Try out pongo2 live in the pongo2 playground:
|
||||
// https://www.florian-schlachter.de/pongo2/
|
||||
//
|
||||
// Make sure to read README.md in the repository as well.
|
||||
//
|
||||
// A tiny example with template strings:
|
||||
//
|
||||
// (Snippet on playground: https://www.florian-schlachter.de/pongo2/?id=1206546277)
|
||||
//
|
||||
// // Compile the template first (i. e. creating the AST)
|
||||
// tpl, err := pongo2.FromString("Hello {{ name|capfirst }}!")
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
// // Now you can render the template with the given
|
||||
// // pongo2.Context how often you want to.
|
||||
// out, err := tpl.Execute(pongo2.Context{"name": "fred"})
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
// fmt.Println(out) // Output: Hello Fred!
|
||||
//
|
||||
package pongo2
|
91
vendor/github.com/flosch/pongo2/error.go
generated
vendored
91
vendor/github.com/flosch/pongo2/error.go
generated
vendored
|
@ -1,91 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
// The Error type is being used to address an error during lexing, parsing or
|
||||
// execution. If you want to return an error object (for example in your own
|
||||
// tag or filter) fill this object with as much information as you have.
|
||||
// Make sure "Sender" is always given (if you're returning an error within
|
||||
// a filter, make Sender equals 'filter:yourfilter'; same goes for tags: 'tag:mytag').
|
||||
// It's okay if you only fill in ErrorMsg if you don't have any other details at hand.
|
||||
type Error struct {
|
||||
Template *Template
|
||||
Filename string
|
||||
Line int
|
||||
Column int
|
||||
Token *Token
|
||||
Sender string
|
||||
OrigError error
|
||||
}
|
||||
|
||||
func (e *Error) updateFromTokenIfNeeded(template *Template, t *Token) *Error {
|
||||
if e.Template == nil {
|
||||
e.Template = template
|
||||
}
|
||||
|
||||
if e.Token == nil {
|
||||
e.Token = t
|
||||
if e.Line <= 0 {
|
||||
e.Line = t.Line
|
||||
e.Column = t.Col
|
||||
}
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
// Returns a nice formatted error string.
|
||||
func (e *Error) Error() string {
|
||||
s := "[Error"
|
||||
if e.Sender != "" {
|
||||
s += " (where: " + e.Sender + ")"
|
||||
}
|
||||
if e.Filename != "" {
|
||||
s += " in " + e.Filename
|
||||
}
|
||||
if e.Line > 0 {
|
||||
s += fmt.Sprintf(" | Line %d Col %d", e.Line, e.Column)
|
||||
if e.Token != nil {
|
||||
s += fmt.Sprintf(" near '%s'", e.Token.Val)
|
||||
}
|
||||
}
|
||||
s += "] "
|
||||
s += e.OrigError.Error()
|
||||
return s
|
||||
}
|
||||
|
||||
// RawLine returns the affected line from the original template, if available.
|
||||
func (e *Error) RawLine() (line string, available bool, outErr error) {
|
||||
if e.Line <= 0 || e.Filename == "<string>" {
|
||||
return "", false, nil
|
||||
}
|
||||
|
||||
filename := e.Filename
|
||||
if e.Template != nil {
|
||||
filename = e.Template.set.resolveFilename(e.Template, e.Filename)
|
||||
}
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
defer func() {
|
||||
err := file.Close()
|
||||
if err != nil && outErr == nil {
|
||||
outErr = err
|
||||
}
|
||||
}()
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
l := 0
|
||||
for scanner.Scan() {
|
||||
l++
|
||||
if l == e.Line {
|
||||
return scanner.Text(), true, nil
|
||||
}
|
||||
}
|
||||
return "", false, nil
|
||||
}
|
143
vendor/github.com/flosch/pongo2/filters.go
generated
vendored
143
vendor/github.com/flosch/pongo2/filters.go
generated
vendored
|
@ -1,143 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/juju/errors"
|
||||
)
|
||||
|
||||
// FilterFunction is the type filter functions must fulfil
|
||||
type FilterFunction func(in *Value, param *Value) (out *Value, err *Error)
|
||||
|
||||
var filters map[string]FilterFunction
|
||||
|
||||
func init() {
|
||||
filters = make(map[string]FilterFunction)
|
||||
}
|
||||
|
||||
// FilterExists returns true if the given filter is already registered
|
||||
func FilterExists(name string) bool {
|
||||
_, existing := filters[name]
|
||||
return existing
|
||||
}
|
||||
|
||||
// RegisterFilter registers a new filter. If there's already a filter with the same
|
||||
// name, RegisterFilter will panic. You usually want to call this
|
||||
// function in the filter's init() function:
|
||||
// http://golang.org/doc/effective_go.html#init
|
||||
//
|
||||
// See http://www.florian-schlachter.de/post/pongo2/ for more about
|
||||
// writing filters and tags.
|
||||
func RegisterFilter(name string, fn FilterFunction) error {
|
||||
if FilterExists(name) {
|
||||
return errors.Errorf("filter with name '%s' is already registered", name)
|
||||
}
|
||||
filters[name] = fn
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReplaceFilter replaces an already registered filter with a new implementation. Use this
|
||||
// function with caution since it allows you to change existing filter behaviour.
|
||||
func ReplaceFilter(name string, fn FilterFunction) error {
|
||||
if !FilterExists(name) {
|
||||
return errors.Errorf("filter with name '%s' does not exist (therefore cannot be overridden)", name)
|
||||
}
|
||||
filters[name] = fn
|
||||
return nil
|
||||
}
|
||||
|
||||
// MustApplyFilter behaves like ApplyFilter, but panics on an error.
|
||||
func MustApplyFilter(name string, value *Value, param *Value) *Value {
|
||||
val, err := ApplyFilter(name, value, param)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// ApplyFilter applies a filter to a given value using the given parameters.
|
||||
// Returns a *pongo2.Value or an error.
|
||||
func ApplyFilter(name string, value *Value, param *Value) (*Value, *Error) {
|
||||
fn, existing := filters[name]
|
||||
if !existing {
|
||||
return nil, &Error{
|
||||
Sender: "applyfilter",
|
||||
OrigError: errors.Errorf("Filter with name '%s' not found.", name),
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure param is a *Value
|
||||
if param == nil {
|
||||
param = AsValue(nil)
|
||||
}
|
||||
|
||||
return fn(value, param)
|
||||
}
|
||||
|
||||
type filterCall struct {
|
||||
token *Token
|
||||
|
||||
name string
|
||||
parameter IEvaluator
|
||||
|
||||
filterFunc FilterFunction
|
||||
}
|
||||
|
||||
func (fc *filterCall) Execute(v *Value, ctx *ExecutionContext) (*Value, *Error) {
|
||||
var param *Value
|
||||
var err *Error
|
||||
|
||||
if fc.parameter != nil {
|
||||
param, err = fc.parameter.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
param = AsValue(nil)
|
||||
}
|
||||
|
||||
filteredValue, err := fc.filterFunc(v, param)
|
||||
if err != nil {
|
||||
return nil, err.updateFromTokenIfNeeded(ctx.template, fc.token)
|
||||
}
|
||||
return filteredValue, nil
|
||||
}
|
||||
|
||||
// Filter = IDENT | IDENT ":" FilterArg | IDENT "|" Filter
|
||||
func (p *Parser) parseFilter() (*filterCall, *Error) {
|
||||
identToken := p.MatchType(TokenIdentifier)
|
||||
|
||||
// Check filter ident
|
||||
if identToken == nil {
|
||||
return nil, p.Error("Filter name must be an identifier.", nil)
|
||||
}
|
||||
|
||||
filter := &filterCall{
|
||||
token: identToken,
|
||||
name: identToken.Val,
|
||||
}
|
||||
|
||||
// Get the appropriate filter function and bind it
|
||||
filterFn, exists := filters[identToken.Val]
|
||||
if !exists {
|
||||
return nil, p.Error(fmt.Sprintf("Filter '%s' does not exist.", identToken.Val), identToken)
|
||||
}
|
||||
|
||||
filter.filterFunc = filterFn
|
||||
|
||||
// Check for filter-argument (2 tokens needed: ':' ARG)
|
||||
if p.Match(TokenSymbol, ":") != nil {
|
||||
if p.Peek(TokenSymbol, "}}") != nil {
|
||||
return nil, p.Error("Filter parameter required after ':'.", nil)
|
||||
}
|
||||
|
||||
// Get filter argument expression
|
||||
v, err := p.parseVariableOrLiteral()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
filter.parameter = v
|
||||
}
|
||||
|
||||
return filter, nil
|
||||
}
|
927
vendor/github.com/flosch/pongo2/filters_builtin.go
generated
vendored
927
vendor/github.com/flosch/pongo2/filters_builtin.go
generated
vendored
|
@ -1,927 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
/* Filters that are provided through github.com/flosch/pongo2-addons:
|
||||
------------------------------------------------------------------
|
||||
|
||||
filesizeformat
|
||||
slugify
|
||||
timesince
|
||||
timeuntil
|
||||
|
||||
Filters that won't be added:
|
||||
----------------------------
|
||||
|
||||
get_static_prefix (reason: web-framework specific)
|
||||
pprint (reason: python-specific)
|
||||
static (reason: web-framework specific)
|
||||
|
||||
Reconsideration (not implemented yet):
|
||||
--------------------------------------
|
||||
|
||||
force_escape (reason: not yet needed since this is the behaviour of pongo2's escape filter)
|
||||
safeseq (reason: same reason as `force_escape`)
|
||||
unordered_list (python-specific; not sure whether needed or not)
|
||||
dictsort (python-specific; maybe one could add a filter to sort a list of structs by a specific field name)
|
||||
dictsortreversed (see dictsort)
|
||||
*/
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/juju/errors"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().Unix())
|
||||
|
||||
RegisterFilter("escape", filterEscape)
|
||||
RegisterFilter("safe", filterSafe)
|
||||
RegisterFilter("escapejs", filterEscapejs)
|
||||
|
||||
RegisterFilter("add", filterAdd)
|
||||
RegisterFilter("addslashes", filterAddslashes)
|
||||
RegisterFilter("capfirst", filterCapfirst)
|
||||
RegisterFilter("center", filterCenter)
|
||||
RegisterFilter("cut", filterCut)
|
||||
RegisterFilter("date", filterDate)
|
||||
RegisterFilter("default", filterDefault)
|
||||
RegisterFilter("default_if_none", filterDefaultIfNone)
|
||||
RegisterFilter("divisibleby", filterDivisibleby)
|
||||
RegisterFilter("first", filterFirst)
|
||||
RegisterFilter("floatformat", filterFloatformat)
|
||||
RegisterFilter("get_digit", filterGetdigit)
|
||||
RegisterFilter("iriencode", filterIriencode)
|
||||
RegisterFilter("join", filterJoin)
|
||||
RegisterFilter("last", filterLast)
|
||||
RegisterFilter("length", filterLength)
|
||||
RegisterFilter("length_is", filterLengthis)
|
||||
RegisterFilter("linebreaks", filterLinebreaks)
|
||||
RegisterFilter("linebreaksbr", filterLinebreaksbr)
|
||||
RegisterFilter("linenumbers", filterLinenumbers)
|
||||
RegisterFilter("ljust", filterLjust)
|
||||
RegisterFilter("lower", filterLower)
|
||||
RegisterFilter("make_list", filterMakelist)
|
||||
RegisterFilter("phone2numeric", filterPhone2numeric)
|
||||
RegisterFilter("pluralize", filterPluralize)
|
||||
RegisterFilter("random", filterRandom)
|
||||
RegisterFilter("removetags", filterRemovetags)
|
||||
RegisterFilter("rjust", filterRjust)
|
||||
RegisterFilter("slice", filterSlice)
|
||||
RegisterFilter("split", filterSplit)
|
||||
RegisterFilter("stringformat", filterStringformat)
|
||||
RegisterFilter("striptags", filterStriptags)
|
||||
RegisterFilter("time", filterDate) // time uses filterDate (same golang-format)
|
||||
RegisterFilter("title", filterTitle)
|
||||
RegisterFilter("truncatechars", filterTruncatechars)
|
||||
RegisterFilter("truncatechars_html", filterTruncatecharsHTML)
|
||||
RegisterFilter("truncatewords", filterTruncatewords)
|
||||
RegisterFilter("truncatewords_html", filterTruncatewordsHTML)
|
||||
RegisterFilter("upper", filterUpper)
|
||||
RegisterFilter("urlencode", filterUrlencode)
|
||||
RegisterFilter("urlize", filterUrlize)
|
||||
RegisterFilter("urlizetrunc", filterUrlizetrunc)
|
||||
RegisterFilter("wordcount", filterWordcount)
|
||||
RegisterFilter("wordwrap", filterWordwrap)
|
||||
RegisterFilter("yesno", filterYesno)
|
||||
|
||||
RegisterFilter("float", filterFloat) // pongo-specific
|
||||
RegisterFilter("integer", filterInteger) // pongo-specific
|
||||
}
|
||||
|
||||
func filterTruncatecharsHelper(s string, newLen int) string {
|
||||
runes := []rune(s)
|
||||
if newLen < len(runes) {
|
||||
if newLen >= 3 {
|
||||
return fmt.Sprintf("%s...", string(runes[:newLen-3]))
|
||||
}
|
||||
// Not enough space for the ellipsis
|
||||
return string(runes[:newLen])
|
||||
}
|
||||
return string(runes)
|
||||
}
|
||||
|
||||
func filterTruncateHTMLHelper(value string, newOutput *bytes.Buffer, cond func() bool, fn func(c rune, s int, idx int) int, finalize func()) {
|
||||
vLen := len(value)
|
||||
var tagStack []string
|
||||
idx := 0
|
||||
|
||||
for idx < vLen && !cond() {
|
||||
c, s := utf8.DecodeRuneInString(value[idx:])
|
||||
if c == utf8.RuneError {
|
||||
idx += s
|
||||
continue
|
||||
}
|
||||
|
||||
if c == '<' {
|
||||
newOutput.WriteRune(c)
|
||||
idx += s // consume "<"
|
||||
|
||||
if idx+1 < vLen {
|
||||
if value[idx] == '/' {
|
||||
// Close tag
|
||||
|
||||
newOutput.WriteString("/")
|
||||
|
||||
tag := ""
|
||||
idx++ // consume "/"
|
||||
|
||||
for idx < vLen {
|
||||
c2, size2 := utf8.DecodeRuneInString(value[idx:])
|
||||
if c2 == utf8.RuneError {
|
||||
idx += size2
|
||||
continue
|
||||
}
|
||||
|
||||
// End of tag found
|
||||
if c2 == '>' {
|
||||
idx++ // consume ">"
|
||||
break
|
||||
}
|
||||
tag += string(c2)
|
||||
idx += size2
|
||||
}
|
||||
|
||||
if len(tagStack) > 0 {
|
||||
// Ideally, the close tag is TOP of tag stack
|
||||
// In malformed HTML, it must not be, so iterate through the stack and remove the tag
|
||||
for i := len(tagStack) - 1; i >= 0; i-- {
|
||||
if tagStack[i] == tag {
|
||||
// Found the tag
|
||||
tagStack[i] = tagStack[len(tagStack)-1]
|
||||
tagStack = tagStack[:len(tagStack)-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newOutput.WriteString(tag)
|
||||
newOutput.WriteString(">")
|
||||
} else {
|
||||
// Open tag
|
||||
|
||||
tag := ""
|
||||
|
||||
params := false
|
||||
for idx < vLen {
|
||||
c2, size2 := utf8.DecodeRuneInString(value[idx:])
|
||||
if c2 == utf8.RuneError {
|
||||
idx += size2
|
||||
continue
|
||||
}
|
||||
|
||||
newOutput.WriteRune(c2)
|
||||
|
||||
// End of tag found
|
||||
if c2 == '>' {
|
||||
idx++ // consume ">"
|
||||
break
|
||||
}
|
||||
|
||||
if !params {
|
||||
if c2 == ' ' {
|
||||
params = true
|
||||
} else {
|
||||
tag += string(c2)
|
||||
}
|
||||
}
|
||||
|
||||
idx += size2
|
||||
}
|
||||
|
||||
// Add tag to stack
|
||||
tagStack = append(tagStack, tag)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
idx = fn(c, s, idx)
|
||||
}
|
||||
}
|
||||
|
||||
finalize()
|
||||
|
||||
for i := len(tagStack) - 1; i >= 0; i-- {
|
||||
tag := tagStack[i]
|
||||
// Close everything from the regular tag stack
|
||||
newOutput.WriteString(fmt.Sprintf("</%s>", tag))
|
||||
}
|
||||
}
|
||||
|
||||
func filterTruncatechars(in *Value, param *Value) (*Value, *Error) {
|
||||
s := in.String()
|
||||
newLen := param.Integer()
|
||||
return AsValue(filterTruncatecharsHelper(s, newLen)), nil
|
||||
}
|
||||
|
||||
func filterTruncatecharsHTML(in *Value, param *Value) (*Value, *Error) {
|
||||
value := in.String()
|
||||
newLen := max(param.Integer()-3, 0)
|
||||
|
||||
newOutput := bytes.NewBuffer(nil)
|
||||
|
||||
textcounter := 0
|
||||
|
||||
filterTruncateHTMLHelper(value, newOutput, func() bool {
|
||||
return textcounter >= newLen
|
||||
}, func(c rune, s int, idx int) int {
|
||||
textcounter++
|
||||
newOutput.WriteRune(c)
|
||||
|
||||
return idx + s
|
||||
}, func() {
|
||||
if textcounter >= newLen && textcounter < len(value) {
|
||||
newOutput.WriteString("...")
|
||||
}
|
||||
})
|
||||
|
||||
return AsSafeValue(newOutput.String()), nil
|
||||
}
|
||||
|
||||
func filterTruncatewords(in *Value, param *Value) (*Value, *Error) {
|
||||
words := strings.Fields(in.String())
|
||||
n := param.Integer()
|
||||
if n <= 0 {
|
||||
return AsValue(""), nil
|
||||
}
|
||||
nlen := min(len(words), n)
|
||||
out := make([]string, 0, nlen)
|
||||
for i := 0; i < nlen; i++ {
|
||||
out = append(out, words[i])
|
||||
}
|
||||
|
||||
if n < len(words) {
|
||||
out = append(out, "...")
|
||||
}
|
||||
|
||||
return AsValue(strings.Join(out, " ")), nil
|
||||
}
|
||||
|
||||
func filterTruncatewordsHTML(in *Value, param *Value) (*Value, *Error) {
|
||||
value := in.String()
|
||||
newLen := max(param.Integer(), 0)
|
||||
|
||||
newOutput := bytes.NewBuffer(nil)
|
||||
|
||||
wordcounter := 0
|
||||
|
||||
filterTruncateHTMLHelper(value, newOutput, func() bool {
|
||||
return wordcounter >= newLen
|
||||
}, func(_ rune, _ int, idx int) int {
|
||||
// Get next word
|
||||
wordFound := false
|
||||
|
||||
for idx < len(value) {
|
||||
c2, size2 := utf8.DecodeRuneInString(value[idx:])
|
||||
if c2 == utf8.RuneError {
|
||||
idx += size2
|
||||
continue
|
||||
}
|
||||
|
||||
if c2 == '<' {
|
||||
// HTML tag start, don't consume it
|
||||
return idx
|
||||
}
|
||||
|
||||
newOutput.WriteRune(c2)
|
||||
idx += size2
|
||||
|
||||
if c2 == ' ' || c2 == '.' || c2 == ',' || c2 == ';' {
|
||||
// Word ends here, stop capturing it now
|
||||
break
|
||||
} else {
|
||||
wordFound = true
|
||||
}
|
||||
}
|
||||
|
||||
if wordFound {
|
||||
wordcounter++
|
||||
}
|
||||
|
||||
return idx
|
||||
}, func() {
|
||||
if wordcounter >= newLen {
|
||||
newOutput.WriteString("...")
|
||||
}
|
||||
})
|
||||
|
||||
return AsSafeValue(newOutput.String()), nil
|
||||
}
|
||||
|
||||
func filterEscape(in *Value, param *Value) (*Value, *Error) {
|
||||
output := strings.Replace(in.String(), "&", "&", -1)
|
||||
output = strings.Replace(output, ">", ">", -1)
|
||||
output = strings.Replace(output, "<", "<", -1)
|
||||
output = strings.Replace(output, "\"", """, -1)
|
||||
output = strings.Replace(output, "'", "'", -1)
|
||||
return AsValue(output), nil
|
||||
}
|
||||
|
||||
func filterSafe(in *Value, param *Value) (*Value, *Error) {
|
||||
return in, nil // nothing to do here, just to keep track of the safe application
|
||||
}
|
||||
|
||||
func filterEscapejs(in *Value, param *Value) (*Value, *Error) {
|
||||
sin := in.String()
|
||||
|
||||
var b bytes.Buffer
|
||||
|
||||
idx := 0
|
||||
for idx < len(sin) {
|
||||
c, size := utf8.DecodeRuneInString(sin[idx:])
|
||||
if c == utf8.RuneError {
|
||||
idx += size
|
||||
continue
|
||||
}
|
||||
|
||||
if c == '\\' {
|
||||
// Escape seq?
|
||||
if idx+1 < len(sin) {
|
||||
switch sin[idx+1] {
|
||||
case 'r':
|
||||
b.WriteString(fmt.Sprintf(`\u%04X`, '\r'))
|
||||
idx += 2
|
||||
continue
|
||||
case 'n':
|
||||
b.WriteString(fmt.Sprintf(`\u%04X`, '\n'))
|
||||
idx += 2
|
||||
continue
|
||||
/*case '\'':
|
||||
b.WriteString(fmt.Sprintf(`\u%04X`, '\''))
|
||||
idx += 2
|
||||
continue
|
||||
case '"':
|
||||
b.WriteString(fmt.Sprintf(`\u%04X`, '"'))
|
||||
idx += 2
|
||||
continue*/
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == ' ' || c == '/' {
|
||||
b.WriteRune(c)
|
||||
} else {
|
||||
b.WriteString(fmt.Sprintf(`\u%04X`, c))
|
||||
}
|
||||
|
||||
idx += size
|
||||
}
|
||||
|
||||
return AsValue(b.String()), nil
|
||||
}
|
||||
|
||||
func filterAdd(in *Value, param *Value) (*Value, *Error) {
|
||||
if in.IsNumber() && param.IsNumber() {
|
||||
if in.IsFloat() || param.IsFloat() {
|
||||
return AsValue(in.Float() + param.Float()), nil
|
||||
}
|
||||
return AsValue(in.Integer() + param.Integer()), nil
|
||||
}
|
||||
// If in/param is not a number, we're relying on the
|
||||
// Value's String() conversion and just add them both together
|
||||
return AsValue(in.String() + param.String()), nil
|
||||
}
|
||||
|
||||
func filterAddslashes(in *Value, param *Value) (*Value, *Error) {
|
||||
output := strings.Replace(in.String(), "\\", "\\\\", -1)
|
||||
output = strings.Replace(output, "\"", "\\\"", -1)
|
||||
output = strings.Replace(output, "'", "\\'", -1)
|
||||
return AsValue(output), nil
|
||||
}
|
||||
|
||||
func filterCut(in *Value, param *Value) (*Value, *Error) {
|
||||
return AsValue(strings.Replace(in.String(), param.String(), "", -1)), nil
|
||||
}
|
||||
|
||||
func filterLength(in *Value, param *Value) (*Value, *Error) {
|
||||
return AsValue(in.Len()), nil
|
||||
}
|
||||
|
||||
func filterLengthis(in *Value, param *Value) (*Value, *Error) {
|
||||
return AsValue(in.Len() == param.Integer()), nil
|
||||
}
|
||||
|
||||
func filterDefault(in *Value, param *Value) (*Value, *Error) {
|
||||
if !in.IsTrue() {
|
||||
return param, nil
|
||||
}
|
||||
return in, nil
|
||||
}
|
||||
|
||||
func filterDefaultIfNone(in *Value, param *Value) (*Value, *Error) {
|
||||
if in.IsNil() {
|
||||
return param, nil
|
||||
}
|
||||
return in, nil
|
||||
}
|
||||
|
||||
func filterDivisibleby(in *Value, param *Value) (*Value, *Error) {
|
||||
if param.Integer() == 0 {
|
||||
return AsValue(false), nil
|
||||
}
|
||||
return AsValue(in.Integer()%param.Integer() == 0), nil
|
||||
}
|
||||
|
||||
func filterFirst(in *Value, param *Value) (*Value, *Error) {
|
||||
if in.CanSlice() && in.Len() > 0 {
|
||||
return in.Index(0), nil
|
||||
}
|
||||
return AsValue(""), nil
|
||||
}
|
||||
|
||||
func filterFloatformat(in *Value, param *Value) (*Value, *Error) {
|
||||
val := in.Float()
|
||||
|
||||
decimals := -1
|
||||
if !param.IsNil() {
|
||||
// Any argument provided?
|
||||
decimals = param.Integer()
|
||||
}
|
||||
|
||||
// if the argument is not a number (e. g. empty), the default
|
||||
// behaviour is trim the result
|
||||
trim := !param.IsNumber()
|
||||
|
||||
if decimals <= 0 {
|
||||
// argument is negative or zero, so we
|
||||
// want the output being trimmed
|
||||
decimals = -decimals
|
||||
trim = true
|
||||
}
|
||||
|
||||
if trim {
|
||||
// Remove zeroes
|
||||
if float64(int(val)) == val {
|
||||
return AsValue(in.Integer()), nil
|
||||
}
|
||||
}
|
||||
|
||||
return AsValue(strconv.FormatFloat(val, 'f', decimals, 64)), nil
|
||||
}
|
||||
|
||||
func filterGetdigit(in *Value, param *Value) (*Value, *Error) {
|
||||
i := param.Integer()
|
||||
l := len(in.String()) // do NOT use in.Len() here!
|
||||
if i <= 0 || i > l {
|
||||
return in, nil
|
||||
}
|
||||
return AsValue(in.String()[l-i] - 48), nil
|
||||
}
|
||||
|
||||
const filterIRIChars = "/#%[]=:;$&()+,!?*@'~"
|
||||
|
||||
func filterIriencode(in *Value, param *Value) (*Value, *Error) {
|
||||
var b bytes.Buffer
|
||||
|
||||
sin := in.String()
|
||||
for _, r := range sin {
|
||||
if strings.IndexRune(filterIRIChars, r) >= 0 {
|
||||
b.WriteRune(r)
|
||||
} else {
|
||||
b.WriteString(url.QueryEscape(string(r)))
|
||||
}
|
||||
}
|
||||
|
||||
return AsValue(b.String()), nil
|
||||
}
|
||||
|
||||
func filterJoin(in *Value, param *Value) (*Value, *Error) {
|
||||
if !in.CanSlice() {
|
||||
return in, nil
|
||||
}
|
||||
sep := param.String()
|
||||
sl := make([]string, 0, in.Len())
|
||||
for i := 0; i < in.Len(); i++ {
|
||||
sl = append(sl, in.Index(i).String())
|
||||
}
|
||||
return AsValue(strings.Join(sl, sep)), nil
|
||||
}
|
||||
|
||||
func filterLast(in *Value, param *Value) (*Value, *Error) {
|
||||
if in.CanSlice() && in.Len() > 0 {
|
||||
return in.Index(in.Len() - 1), nil
|
||||
}
|
||||
return AsValue(""), nil
|
||||
}
|
||||
|
||||
func filterUpper(in *Value, param *Value) (*Value, *Error) {
|
||||
return AsValue(strings.ToUpper(in.String())), nil
|
||||
}
|
||||
|
||||
func filterLower(in *Value, param *Value) (*Value, *Error) {
|
||||
return AsValue(strings.ToLower(in.String())), nil
|
||||
}
|
||||
|
||||
func filterMakelist(in *Value, param *Value) (*Value, *Error) {
|
||||
s := in.String()
|
||||
result := make([]string, 0, len(s))
|
||||
for _, c := range s {
|
||||
result = append(result, string(c))
|
||||
}
|
||||
return AsValue(result), nil
|
||||
}
|
||||
|
||||
func filterCapfirst(in *Value, param *Value) (*Value, *Error) {
|
||||
if in.Len() <= 0 {
|
||||
return AsValue(""), nil
|
||||
}
|
||||
t := in.String()
|
||||
r, size := utf8.DecodeRuneInString(t)
|
||||
return AsValue(strings.ToUpper(string(r)) + t[size:]), nil
|
||||
}
|
||||
|
||||
func filterCenter(in *Value, param *Value) (*Value, *Error) {
|
||||
width := param.Integer()
|
||||
slen := in.Len()
|
||||
if width <= slen {
|
||||
return in, nil
|
||||
}
|
||||
|
||||
spaces := width - slen
|
||||
left := spaces/2 + spaces%2
|
||||
right := spaces / 2
|
||||
|
||||
return AsValue(fmt.Sprintf("%s%s%s", strings.Repeat(" ", left),
|
||||
in.String(), strings.Repeat(" ", right))), nil
|
||||
}
|
||||
|
||||
func filterDate(in *Value, param *Value) (*Value, *Error) {
|
||||
t, isTime := in.Interface().(time.Time)
|
||||
if !isTime {
|
||||
return nil, &Error{
|
||||
Sender: "filter:date",
|
||||
OrigError: errors.New("filter input argument must be of type 'time.Time'"),
|
||||
}
|
||||
}
|
||||
return AsValue(t.Format(param.String())), nil
|
||||
}
|
||||
|
||||
func filterFloat(in *Value, param *Value) (*Value, *Error) {
|
||||
return AsValue(in.Float()), nil
|
||||
}
|
||||
|
||||
func filterInteger(in *Value, param *Value) (*Value, *Error) {
|
||||
return AsValue(in.Integer()), nil
|
||||
}
|
||||
|
||||
func filterLinebreaks(in *Value, param *Value) (*Value, *Error) {
|
||||
if in.Len() == 0 {
|
||||
return in, nil
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
|
||||
// Newline = <br />
|
||||
// Double newline = <p>...</p>
|
||||
lines := strings.Split(in.String(), "\n")
|
||||
lenlines := len(lines)
|
||||
|
||||
opened := false
|
||||
|
||||
for idx, line := range lines {
|
||||
|
||||
if !opened {
|
||||
b.WriteString("<p>")
|
||||
opened = true
|
||||
}
|
||||
|
||||
b.WriteString(line)
|
||||
|
||||
if idx < lenlines-1 && strings.TrimSpace(lines[idx]) != "" {
|
||||
// We've not reached the end
|
||||
if strings.TrimSpace(lines[idx+1]) == "" {
|
||||
// Next line is empty
|
||||
if opened {
|
||||
b.WriteString("</p>")
|
||||
opened = false
|
||||
}
|
||||
} else {
|
||||
b.WriteString("<br />")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if opened {
|
||||
b.WriteString("</p>")
|
||||
}
|
||||
|
||||
return AsValue(b.String()), nil
|
||||
}
|
||||
|
||||
func filterSplit(in *Value, param *Value) (*Value, *Error) {
|
||||
chunks := strings.Split(in.String(), param.String())
|
||||
|
||||
return AsValue(chunks), nil
|
||||
}
|
||||
|
||||
func filterLinebreaksbr(in *Value, param *Value) (*Value, *Error) {
|
||||
return AsValue(strings.Replace(in.String(), "\n", "<br />", -1)), nil
|
||||
}
|
||||
|
||||
func filterLinenumbers(in *Value, param *Value) (*Value, *Error) {
|
||||
lines := strings.Split(in.String(), "\n")
|
||||
output := make([]string, 0, len(lines))
|
||||
for idx, line := range lines {
|
||||
output = append(output, fmt.Sprintf("%d. %s", idx+1, line))
|
||||
}
|
||||
return AsValue(strings.Join(output, "\n")), nil
|
||||
}
|
||||
|
||||
func filterLjust(in *Value, param *Value) (*Value, *Error) {
|
||||
times := param.Integer() - in.Len()
|
||||
if times < 0 {
|
||||
times = 0
|
||||
}
|
||||
return AsValue(fmt.Sprintf("%s%s", in.String(), strings.Repeat(" ", times))), nil
|
||||
}
|
||||
|
||||
func filterUrlencode(in *Value, param *Value) (*Value, *Error) {
|
||||
return AsValue(url.QueryEscape(in.String())), nil
|
||||
}
|
||||
|
||||
// TODO: This regexp could do some work
|
||||
var filterUrlizeURLRegexp = regexp.MustCompile(`((((http|https)://)|www\.|((^|[ ])[0-9A-Za-z_\-]+(\.com|\.net|\.org|\.info|\.biz|\.de))))(?U:.*)([ ]+|$)`)
|
||||
var filterUrlizeEmailRegexp = regexp.MustCompile(`(\w+@\w+\.\w{2,4})`)
|
||||
|
||||
func filterUrlizeHelper(input string, autoescape bool, trunc int) (string, error) {
|
||||
var soutErr error
|
||||
sout := filterUrlizeURLRegexp.ReplaceAllStringFunc(input, func(raw_url string) string {
|
||||
var prefix string
|
||||
var suffix string
|
||||
if strings.HasPrefix(raw_url, " ") {
|
||||
prefix = " "
|
||||
}
|
||||
if strings.HasSuffix(raw_url, " ") {
|
||||
suffix = " "
|
||||
}
|
||||
|
||||
raw_url = strings.TrimSpace(raw_url)
|
||||
|
||||
t, err := ApplyFilter("iriencode", AsValue(raw_url), nil)
|
||||
if err != nil {
|
||||
soutErr = err
|
||||
return ""
|
||||
}
|
||||
url := t.String()
|
||||
|
||||
if !strings.HasPrefix(url, "http") {
|
||||
url = fmt.Sprintf("http://%s", url)
|
||||
}
|
||||
|
||||
title := raw_url
|
||||
|
||||
if trunc > 3 && len(title) > trunc {
|
||||
title = fmt.Sprintf("%s...", title[:trunc-3])
|
||||
}
|
||||
|
||||
if autoescape {
|
||||
t, err := ApplyFilter("escape", AsValue(title), nil)
|
||||
if err != nil {
|
||||
soutErr = err
|
||||
return ""
|
||||
}
|
||||
title = t.String()
|
||||
}
|
||||
|
||||
return fmt.Sprintf(`%s<a href="%s" rel="nofollow">%s</a>%s`, prefix, url, title, suffix)
|
||||
})
|
||||
if soutErr != nil {
|
||||
return "", soutErr
|
||||
}
|
||||
|
||||
sout = filterUrlizeEmailRegexp.ReplaceAllStringFunc(sout, func(mail string) string {
|
||||
title := mail
|
||||
|
||||
if trunc > 3 && len(title) > trunc {
|
||||
title = fmt.Sprintf("%s...", title[:trunc-3])
|
||||
}
|
||||
|
||||
return fmt.Sprintf(`<a href="mailto:%s">%s</a>`, mail, title)
|
||||
})
|
||||
|
||||
return sout, nil
|
||||
}
|
||||
|
||||
func filterUrlize(in *Value, param *Value) (*Value, *Error) {
|
||||
autoescape := true
|
||||
if param.IsBool() {
|
||||
autoescape = param.Bool()
|
||||
}
|
||||
|
||||
s, err := filterUrlizeHelper(in.String(), autoescape, -1)
|
||||
if err != nil {
|
||||
|
||||
}
|
||||
|
||||
return AsValue(s), nil
|
||||
}
|
||||
|
||||
func filterUrlizetrunc(in *Value, param *Value) (*Value, *Error) {
|
||||
s, err := filterUrlizeHelper(in.String(), true, param.Integer())
|
||||
if err != nil {
|
||||
return nil, &Error{
|
||||
Sender: "filter:urlizetrunc",
|
||||
OrigError: errors.New("you cannot pass more than 2 arguments to filter 'pluralize'"),
|
||||
}
|
||||
}
|
||||
return AsValue(s), nil
|
||||
}
|
||||
|
||||
func filterStringformat(in *Value, param *Value) (*Value, *Error) {
|
||||
return AsValue(fmt.Sprintf(param.String(), in.Interface())), nil
|
||||
}
|
||||
|
||||
var reStriptags = regexp.MustCompile("<[^>]*?>")
|
||||
|
||||
func filterStriptags(in *Value, param *Value) (*Value, *Error) {
|
||||
s := in.String()
|
||||
|
||||
// Strip all tags
|
||||
s = reStriptags.ReplaceAllString(s, "")
|
||||
|
||||
return AsValue(strings.TrimSpace(s)), nil
|
||||
}
|
||||
|
||||
// https://en.wikipedia.org/wiki/Phoneword
|
||||
var filterPhone2numericMap = map[string]string{
|
||||
"a": "2", "b": "2", "c": "2", "d": "3", "e": "3", "f": "3", "g": "4", "h": "4", "i": "4", "j": "5", "k": "5",
|
||||
"l": "5", "m": "6", "n": "6", "o": "6", "p": "7", "q": "7", "r": "7", "s": "7", "t": "8", "u": "8", "v": "8",
|
||||
"w": "9", "x": "9", "y": "9", "z": "9",
|
||||
}
|
||||
|
||||
func filterPhone2numeric(in *Value, param *Value) (*Value, *Error) {
|
||||
sin := in.String()
|
||||
for k, v := range filterPhone2numericMap {
|
||||
sin = strings.Replace(sin, k, v, -1)
|
||||
sin = strings.Replace(sin, strings.ToUpper(k), v, -1)
|
||||
}
|
||||
return AsValue(sin), nil
|
||||
}
|
||||
|
||||
func filterPluralize(in *Value, param *Value) (*Value, *Error) {
|
||||
if in.IsNumber() {
|
||||
// Works only on numbers
|
||||
if param.Len() > 0 {
|
||||
endings := strings.Split(param.String(), ",")
|
||||
if len(endings) > 2 {
|
||||
return nil, &Error{
|
||||
Sender: "filter:pluralize",
|
||||
OrigError: errors.New("you cannot pass more than 2 arguments to filter 'pluralize'"),
|
||||
}
|
||||
}
|
||||
if len(endings) == 1 {
|
||||
// 1 argument
|
||||
if in.Integer() != 1 {
|
||||
return AsValue(endings[0]), nil
|
||||
}
|
||||
} else {
|
||||
if in.Integer() != 1 {
|
||||
// 2 arguments
|
||||
return AsValue(endings[1]), nil
|
||||
}
|
||||
return AsValue(endings[0]), nil
|
||||
}
|
||||
} else {
|
||||
if in.Integer() != 1 {
|
||||
// return default 's'
|
||||
return AsValue("s"), nil
|
||||
}
|
||||
}
|
||||
|
||||
return AsValue(""), nil
|
||||
}
|
||||
return nil, &Error{
|
||||
Sender: "filter:pluralize",
|
||||
OrigError: errors.New("filter 'pluralize' does only work on numbers"),
|
||||
}
|
||||
}
|
||||
|
||||
func filterRandom(in *Value, param *Value) (*Value, *Error) {
|
||||
if !in.CanSlice() || in.Len() <= 0 {
|
||||
return in, nil
|
||||
}
|
||||
i := rand.Intn(in.Len())
|
||||
return in.Index(i), nil
|
||||
}
|
||||
|
||||
func filterRemovetags(in *Value, param *Value) (*Value, *Error) {
|
||||
s := in.String()
|
||||
tags := strings.Split(param.String(), ",")
|
||||
|
||||
// Strip only specific tags
|
||||
for _, tag := range tags {
|
||||
re := regexp.MustCompile(fmt.Sprintf("</?%s/?>", tag))
|
||||
s = re.ReplaceAllString(s, "")
|
||||
}
|
||||
|
||||
return AsValue(strings.TrimSpace(s)), nil
|
||||
}
|
||||
|
||||
func filterRjust(in *Value, param *Value) (*Value, *Error) {
|
||||
return AsValue(fmt.Sprintf(fmt.Sprintf("%%%ds", param.Integer()), in.String())), nil
|
||||
}
|
||||
|
||||
func filterSlice(in *Value, param *Value) (*Value, *Error) {
|
||||
comp := strings.Split(param.String(), ":")
|
||||
if len(comp) != 2 {
|
||||
return nil, &Error{
|
||||
Sender: "filter:slice",
|
||||
OrigError: errors.New("Slice string must have the format 'from:to' [from/to can be omitted, but the ':' is required]"),
|
||||
}
|
||||
}
|
||||
|
||||
if !in.CanSlice() {
|
||||
return in, nil
|
||||
}
|
||||
|
||||
from := AsValue(comp[0]).Integer()
|
||||
to := in.Len()
|
||||
|
||||
if from > to {
|
||||
from = to
|
||||
}
|
||||
|
||||
vto := AsValue(comp[1]).Integer()
|
||||
if vto >= from && vto <= in.Len() {
|
||||
to = vto
|
||||
}
|
||||
|
||||
return in.Slice(from, to), nil
|
||||
}
|
||||
|
||||
func filterTitle(in *Value, param *Value) (*Value, *Error) {
|
||||
if !in.IsString() {
|
||||
return AsValue(""), nil
|
||||
}
|
||||
return AsValue(strings.Title(strings.ToLower(in.String()))), nil
|
||||
}
|
||||
|
||||
func filterWordcount(in *Value, param *Value) (*Value, *Error) {
|
||||
return AsValue(len(strings.Fields(in.String()))), nil
|
||||
}
|
||||
|
||||
func filterWordwrap(in *Value, param *Value) (*Value, *Error) {
|
||||
words := strings.Fields(in.String())
|
||||
wordsLen := len(words)
|
||||
wrapAt := param.Integer()
|
||||
if wrapAt <= 0 {
|
||||
return in, nil
|
||||
}
|
||||
|
||||
linecount := wordsLen/wrapAt + wordsLen%wrapAt
|
||||
lines := make([]string, 0, linecount)
|
||||
for i := 0; i < linecount; i++ {
|
||||
lines = append(lines, strings.Join(words[wrapAt*i:min(wrapAt*(i+1), wordsLen)], " "))
|
||||
}
|
||||
return AsValue(strings.Join(lines, "\n")), nil
|
||||
}
|
||||
|
||||
func filterYesno(in *Value, param *Value) (*Value, *Error) {
|
||||
choices := map[int]string{
|
||||
0: "yes",
|
||||
1: "no",
|
||||
2: "maybe",
|
||||
}
|
||||
paramString := param.String()
|
||||
customChoices := strings.Split(paramString, ",")
|
||||
if len(paramString) > 0 {
|
||||
if len(customChoices) > 3 {
|
||||
return nil, &Error{
|
||||
Sender: "filter:yesno",
|
||||
OrigError: errors.Errorf("You cannot pass more than 3 options to the 'yesno'-filter (got: '%s').", paramString),
|
||||
}
|
||||
}
|
||||
if len(customChoices) < 2 {
|
||||
return nil, &Error{
|
||||
Sender: "filter:yesno",
|
||||
OrigError: errors.Errorf("You must pass either no or at least 2 arguments to the 'yesno'-filter (got: '%s').", paramString),
|
||||
}
|
||||
}
|
||||
|
||||
// Map to the options now
|
||||
choices[0] = customChoices[0]
|
||||
choices[1] = customChoices[1]
|
||||
if len(customChoices) == 3 {
|
||||
choices[2] = customChoices[2]
|
||||
}
|
||||
}
|
||||
|
||||
// maybe
|
||||
if in.IsNil() {
|
||||
return AsValue(choices[2]), nil
|
||||
}
|
||||
|
||||
// yes
|
||||
if in.IsTrue() {
|
||||
return AsValue(choices[0]), nil
|
||||
}
|
||||
|
||||
// no
|
||||
return AsValue(choices[1]), nil
|
||||
}
|
15
vendor/github.com/flosch/pongo2/helpers.go
generated
vendored
15
vendor/github.com/flosch/pongo2/helpers.go
generated
vendored
|
@ -1,15 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
func max(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
432
vendor/github.com/flosch/pongo2/lexer.go
generated
vendored
432
vendor/github.com/flosch/pongo2/lexer.go
generated
vendored
|
@ -1,432 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/juju/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
TokenError = iota
|
||||
EOF
|
||||
|
||||
TokenHTML
|
||||
|
||||
TokenKeyword
|
||||
TokenIdentifier
|
||||
TokenString
|
||||
TokenNumber
|
||||
TokenSymbol
|
||||
)
|
||||
|
||||
var (
|
||||
tokenSpaceChars = " \n\r\t"
|
||||
tokenIdentifierChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_"
|
||||
tokenIdentifierCharsWithDigits = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789"
|
||||
tokenDigits = "0123456789"
|
||||
|
||||
// Available symbols in pongo2 (within filters/tag)
|
||||
TokenSymbols = []string{
|
||||
// 3-Char symbols
|
||||
"{{-", "-}}", "{%-", "-%}",
|
||||
|
||||
// 2-Char symbols
|
||||
"==", ">=", "<=", "&&", "||", "{{", "}}", "{%", "%}", "!=", "<>",
|
||||
|
||||
// 1-Char symbol
|
||||
"(", ")", "+", "-", "*", "<", ">", "/", "^", ",", ".", "!", "|", ":", "=", "%",
|
||||
}
|
||||
|
||||
// Available keywords in pongo2
|
||||
TokenKeywords = []string{"in", "and", "or", "not", "true", "false", "as", "export"}
|
||||
)
|
||||
|
||||
type TokenType int
|
||||
type Token struct {
|
||||
Filename string
|
||||
Typ TokenType
|
||||
Val string
|
||||
Line int
|
||||
Col int
|
||||
TrimWhitespaces bool
|
||||
}
|
||||
|
||||
type lexerStateFn func() lexerStateFn
|
||||
type lexer struct {
|
||||
name string
|
||||
input string
|
||||
start int // start pos of the item
|
||||
pos int // current pos
|
||||
width int // width of last rune
|
||||
tokens []*Token
|
||||
errored bool
|
||||
startline int
|
||||
startcol int
|
||||
line int
|
||||
col int
|
||||
|
||||
inVerbatim bool
|
||||
verbatimName string
|
||||
}
|
||||
|
||||
func (t *Token) String() string {
|
||||
val := t.Val
|
||||
if len(val) > 1000 {
|
||||
val = fmt.Sprintf("%s...%s", val[:10], val[len(val)-5:len(val)])
|
||||
}
|
||||
|
||||
typ := ""
|
||||
switch t.Typ {
|
||||
case TokenHTML:
|
||||
typ = "HTML"
|
||||
case TokenError:
|
||||
typ = "Error"
|
||||
case TokenIdentifier:
|
||||
typ = "Identifier"
|
||||
case TokenKeyword:
|
||||
typ = "Keyword"
|
||||
case TokenNumber:
|
||||
typ = "Number"
|
||||
case TokenString:
|
||||
typ = "String"
|
||||
case TokenSymbol:
|
||||
typ = "Symbol"
|
||||
default:
|
||||
typ = "Unknown"
|
||||
}
|
||||
|
||||
return fmt.Sprintf("<Token Typ=%s (%d) Val='%s' Line=%d Col=%d, WT=%t>",
|
||||
typ, t.Typ, val, t.Line, t.Col, t.TrimWhitespaces)
|
||||
}
|
||||
|
||||
func lex(name string, input string) ([]*Token, *Error) {
|
||||
l := &lexer{
|
||||
name: name,
|
||||
input: input,
|
||||
tokens: make([]*Token, 0, 100),
|
||||
line: 1,
|
||||
col: 1,
|
||||
startline: 1,
|
||||
startcol: 1,
|
||||
}
|
||||
l.run()
|
||||
if l.errored {
|
||||
errtoken := l.tokens[len(l.tokens)-1]
|
||||
return nil, &Error{
|
||||
Filename: name,
|
||||
Line: errtoken.Line,
|
||||
Column: errtoken.Col,
|
||||
Sender: "lexer",
|
||||
OrigError: errors.New(errtoken.Val),
|
||||
}
|
||||
}
|
||||
return l.tokens, nil
|
||||
}
|
||||
|
||||
func (l *lexer) value() string {
|
||||
return l.input[l.start:l.pos]
|
||||
}
|
||||
|
||||
func (l *lexer) length() int {
|
||||
return l.pos - l.start
|
||||
}
|
||||
|
||||
func (l *lexer) emit(t TokenType) {
|
||||
tok := &Token{
|
||||
Filename: l.name,
|
||||
Typ: t,
|
||||
Val: l.value(),
|
||||
Line: l.startline,
|
||||
Col: l.startcol,
|
||||
}
|
||||
|
||||
if t == TokenString {
|
||||
// Escape sequence \" in strings
|
||||
tok.Val = strings.Replace(tok.Val, `\"`, `"`, -1)
|
||||
tok.Val = strings.Replace(tok.Val, `\\`, `\`, -1)
|
||||
}
|
||||
|
||||
if t == TokenSymbol && len(tok.Val) == 3 && (strings.HasSuffix(tok.Val, "-") || strings.HasPrefix(tok.Val, "-")) {
|
||||
tok.TrimWhitespaces = true
|
||||
tok.Val = strings.Replace(tok.Val, "-", "", -1)
|
||||
}
|
||||
|
||||
l.tokens = append(l.tokens, tok)
|
||||
l.start = l.pos
|
||||
l.startline = l.line
|
||||
l.startcol = l.col
|
||||
}
|
||||
|
||||
func (l *lexer) next() rune {
|
||||
if l.pos >= len(l.input) {
|
||||
l.width = 0
|
||||
return EOF
|
||||
}
|
||||
r, w := utf8.DecodeRuneInString(l.input[l.pos:])
|
||||
l.width = w
|
||||
l.pos += l.width
|
||||
l.col += l.width
|
||||
return r
|
||||
}
|
||||
|
||||
func (l *lexer) backup() {
|
||||
l.pos -= l.width
|
||||
l.col -= l.width
|
||||
}
|
||||
|
||||
func (l *lexer) peek() rune {
|
||||
r := l.next()
|
||||
l.backup()
|
||||
return r
|
||||
}
|
||||
|
||||
func (l *lexer) ignore() {
|
||||
l.start = l.pos
|
||||
l.startline = l.line
|
||||
l.startcol = l.col
|
||||
}
|
||||
|
||||
func (l *lexer) accept(what string) bool {
|
||||
if strings.IndexRune(what, l.next()) >= 0 {
|
||||
return true
|
||||
}
|
||||
l.backup()
|
||||
return false
|
||||
}
|
||||
|
||||
func (l *lexer) acceptRun(what string) {
|
||||
for strings.IndexRune(what, l.next()) >= 0 {
|
||||
}
|
||||
l.backup()
|
||||
}
|
||||
|
||||
func (l *lexer) errorf(format string, args ...interface{}) lexerStateFn {
|
||||
t := &Token{
|
||||
Filename: l.name,
|
||||
Typ: TokenError,
|
||||
Val: fmt.Sprintf(format, args...),
|
||||
Line: l.startline,
|
||||
Col: l.startcol,
|
||||
}
|
||||
l.tokens = append(l.tokens, t)
|
||||
l.errored = true
|
||||
l.startline = l.line
|
||||
l.startcol = l.col
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *lexer) eof() bool {
|
||||
return l.start >= len(l.input)-1
|
||||
}
|
||||
|
||||
func (l *lexer) run() {
|
||||
for {
|
||||
// TODO: Support verbatim tag names
|
||||
// https://docs.djangoproject.com/en/dev/ref/templates/builtins/#verbatim
|
||||
if l.inVerbatim {
|
||||
name := l.verbatimName
|
||||
if name != "" {
|
||||
name += " "
|
||||
}
|
||||
if strings.HasPrefix(l.input[l.pos:], fmt.Sprintf("{%% endverbatim %s%%}", name)) { // end verbatim
|
||||
if l.pos > l.start {
|
||||
l.emit(TokenHTML)
|
||||
}
|
||||
w := len("{% endverbatim %}")
|
||||
l.pos += w
|
||||
l.col += w
|
||||
l.ignore()
|
||||
l.inVerbatim = false
|
||||
}
|
||||
} else if strings.HasPrefix(l.input[l.pos:], "{% verbatim %}") { // tag
|
||||
if l.pos > l.start {
|
||||
l.emit(TokenHTML)
|
||||
}
|
||||
l.inVerbatim = true
|
||||
w := len("{% verbatim %}")
|
||||
l.pos += w
|
||||
l.col += w
|
||||
l.ignore()
|
||||
}
|
||||
|
||||
if !l.inVerbatim {
|
||||
// Ignore single-line comments {# ... #}
|
||||
if strings.HasPrefix(l.input[l.pos:], "{#") {
|
||||
if l.pos > l.start {
|
||||
l.emit(TokenHTML)
|
||||
}
|
||||
|
||||
l.pos += 2 // pass '{#'
|
||||
l.col += 2
|
||||
|
||||
for {
|
||||
switch l.peek() {
|
||||
case EOF:
|
||||
l.errorf("Single-line comment not closed.")
|
||||
return
|
||||
case '\n':
|
||||
l.errorf("Newline not permitted in a single-line comment.")
|
||||
return
|
||||
}
|
||||
|
||||
if strings.HasPrefix(l.input[l.pos:], "#}") {
|
||||
l.pos += 2 // pass '#}'
|
||||
l.col += 2
|
||||
break
|
||||
}
|
||||
|
||||
l.next()
|
||||
}
|
||||
l.ignore() // ignore whole comment
|
||||
|
||||
// Comment skipped
|
||||
continue // next token
|
||||
}
|
||||
|
||||
if strings.HasPrefix(l.input[l.pos:], "{{") || // variable
|
||||
strings.HasPrefix(l.input[l.pos:], "{%") { // tag
|
||||
if l.pos > l.start {
|
||||
l.emit(TokenHTML)
|
||||
}
|
||||
l.tokenize()
|
||||
if l.errored {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
switch l.peek() {
|
||||
case '\n':
|
||||
l.line++
|
||||
l.col = 0
|
||||
}
|
||||
if l.next() == EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if l.pos > l.start {
|
||||
l.emit(TokenHTML)
|
||||
}
|
||||
|
||||
if l.inVerbatim {
|
||||
l.errorf("verbatim-tag not closed, got EOF.")
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lexer) tokenize() {
|
||||
for state := l.stateCode; state != nil; {
|
||||
state = state()
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lexer) stateCode() lexerStateFn {
|
||||
outer_loop:
|
||||
for {
|
||||
switch {
|
||||
case l.accept(tokenSpaceChars):
|
||||
if l.value() == "\n" {
|
||||
return l.errorf("Newline not allowed within tag/variable.")
|
||||
}
|
||||
l.ignore()
|
||||
continue
|
||||
case l.accept(tokenIdentifierChars):
|
||||
return l.stateIdentifier
|
||||
case l.accept(tokenDigits):
|
||||
return l.stateNumber
|
||||
case l.accept(`"'`):
|
||||
return l.stateString
|
||||
}
|
||||
|
||||
// Check for symbol
|
||||
for _, sym := range TokenSymbols {
|
||||
if strings.HasPrefix(l.input[l.start:], sym) {
|
||||
l.pos += len(sym)
|
||||
l.col += l.length()
|
||||
l.emit(TokenSymbol)
|
||||
|
||||
if sym == "%}" || sym == "-%}" || sym == "}}" || sym == "-}}" {
|
||||
// Tag/variable end, return after emit
|
||||
return nil
|
||||
}
|
||||
|
||||
continue outer_loop
|
||||
}
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
// Normal shut down
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *lexer) stateIdentifier() lexerStateFn {
|
||||
l.acceptRun(tokenIdentifierChars)
|
||||
l.acceptRun(tokenIdentifierCharsWithDigits)
|
||||
for _, kw := range TokenKeywords {
|
||||
if kw == l.value() {
|
||||
l.emit(TokenKeyword)
|
||||
return l.stateCode
|
||||
}
|
||||
}
|
||||
l.emit(TokenIdentifier)
|
||||
return l.stateCode
|
||||
}
|
||||
|
||||
func (l *lexer) stateNumber() lexerStateFn {
|
||||
l.acceptRun(tokenDigits)
|
||||
if l.accept(tokenIdentifierCharsWithDigits) {
|
||||
// This seems to be an identifier starting with a number.
|
||||
// See https://github.com/flosch/pongo2/issues/151
|
||||
return l.stateIdentifier()
|
||||
}
|
||||
/*
|
||||
Maybe context-sensitive number lexing?
|
||||
* comments.0.Text // first comment
|
||||
* usercomments.1.0 // second user, first comment
|
||||
* if (score >= 8.5) // 8.5 as a number
|
||||
|
||||
if l.peek() == '.' {
|
||||
l.accept(".")
|
||||
if !l.accept(tokenDigits) {
|
||||
return l.errorf("Malformed number.")
|
||||
}
|
||||
l.acceptRun(tokenDigits)
|
||||
}
|
||||
*/
|
||||
l.emit(TokenNumber)
|
||||
return l.stateCode
|
||||
}
|
||||
|
||||
func (l *lexer) stateString() lexerStateFn {
|
||||
quotationMark := l.value()
|
||||
l.ignore()
|
||||
l.startcol-- // we're starting the position at the first "
|
||||
for !l.accept(quotationMark) {
|
||||
switch l.next() {
|
||||
case '\\':
|
||||
// escape sequence
|
||||
switch l.peek() {
|
||||
case '"', '\\':
|
||||
l.next()
|
||||
default:
|
||||
return l.errorf("Unknown escape sequence: \\%c", l.peek())
|
||||
}
|
||||
case EOF:
|
||||
return l.errorf("Unexpected EOF, string not closed.")
|
||||
case '\n':
|
||||
return l.errorf("Newline in string is not allowed.")
|
||||
}
|
||||
}
|
||||
l.backup()
|
||||
l.emit(TokenString)
|
||||
|
||||
l.next()
|
||||
l.ignore()
|
||||
|
||||
return l.stateCode
|
||||
}
|
16
vendor/github.com/flosch/pongo2/nodes.go
generated
vendored
16
vendor/github.com/flosch/pongo2/nodes.go
generated
vendored
|
@ -1,16 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
// The root document
|
||||
type nodeDocument struct {
|
||||
Nodes []INode
|
||||
}
|
||||
|
||||
func (doc *nodeDocument) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
for _, n := range doc.Nodes {
|
||||
err := n.Execute(ctx, writer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
23
vendor/github.com/flosch/pongo2/nodes_html.go
generated
vendored
23
vendor/github.com/flosch/pongo2/nodes_html.go
generated
vendored
|
@ -1,23 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type nodeHTML struct {
|
||||
token *Token
|
||||
trimLeft bool
|
||||
trimRight bool
|
||||
}
|
||||
|
||||
func (n *nodeHTML) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
res := n.token.Val
|
||||
if n.trimLeft {
|
||||
res = strings.TrimLeft(res, tokenSpaceChars)
|
||||
}
|
||||
if n.trimRight {
|
||||
res = strings.TrimRight(res, tokenSpaceChars)
|
||||
}
|
||||
writer.WriteString(res)
|
||||
return nil
|
||||
}
|
16
vendor/github.com/flosch/pongo2/nodes_wrapper.go
generated
vendored
16
vendor/github.com/flosch/pongo2/nodes_wrapper.go
generated
vendored
|
@ -1,16 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
type NodeWrapper struct {
|
||||
Endtag string
|
||||
nodes []INode
|
||||
}
|
||||
|
||||
func (wrapper *NodeWrapper) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
for _, n := range wrapper.nodes {
|
||||
err := n.Execute(ctx, writer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
309
vendor/github.com/flosch/pongo2/parser.go
generated
vendored
309
vendor/github.com/flosch/pongo2/parser.go
generated
vendored
|
@ -1,309 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/juju/errors"
|
||||
)
|
||||
|
||||
type INode interface {
|
||||
Execute(*ExecutionContext, TemplateWriter) *Error
|
||||
}
|
||||
|
||||
type IEvaluator interface {
|
||||
INode
|
||||
GetPositionToken() *Token
|
||||
Evaluate(*ExecutionContext) (*Value, *Error)
|
||||
FilterApplied(name string) bool
|
||||
}
|
||||
|
||||
// The parser provides you a comprehensive and easy tool to
|
||||
// work with the template document and arguments provided by
|
||||
// the user for your custom tag.
|
||||
//
|
||||
// The parser works on a token list which will be provided by pongo2.
|
||||
// A token is a unit you can work with. Tokens are either of type identifier,
|
||||
// string, number, keyword, HTML or symbol.
|
||||
//
|
||||
// (See Token's documentation for more about tokens)
|
||||
type Parser struct {
|
||||
name string
|
||||
idx int
|
||||
tokens []*Token
|
||||
lastToken *Token
|
||||
|
||||
// if the parser parses a template document, here will be
|
||||
// a reference to it (needed to access the template through Tags)
|
||||
template *Template
|
||||
}
|
||||
|
||||
// Creates a new parser to parse tokens.
|
||||
// Used inside pongo2 to parse documents and to provide an easy-to-use
|
||||
// parser for tag authors
|
||||
func newParser(name string, tokens []*Token, template *Template) *Parser {
|
||||
p := &Parser{
|
||||
name: name,
|
||||
tokens: tokens,
|
||||
template: template,
|
||||
}
|
||||
if len(tokens) > 0 {
|
||||
p.lastToken = tokens[len(tokens)-1]
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// Consume one token. It will be gone forever.
|
||||
func (p *Parser) Consume() {
|
||||
p.ConsumeN(1)
|
||||
}
|
||||
|
||||
// Consume N tokens. They will be gone forever.
|
||||
func (p *Parser) ConsumeN(count int) {
|
||||
p.idx += count
|
||||
}
|
||||
|
||||
// Returns the current token.
|
||||
func (p *Parser) Current() *Token {
|
||||
return p.Get(p.idx)
|
||||
}
|
||||
|
||||
// Returns the CURRENT token if the given type matches.
|
||||
// Consumes this token on success.
|
||||
func (p *Parser) MatchType(typ TokenType) *Token {
|
||||
if t := p.PeekType(typ); t != nil {
|
||||
p.Consume()
|
||||
return t
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns the CURRENT token if the given type AND value matches.
|
||||
// Consumes this token on success.
|
||||
func (p *Parser) Match(typ TokenType, val string) *Token {
|
||||
if t := p.Peek(typ, val); t != nil {
|
||||
p.Consume()
|
||||
return t
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns the CURRENT token if the given type AND *one* of
|
||||
// the given values matches.
|
||||
// Consumes this token on success.
|
||||
func (p *Parser) MatchOne(typ TokenType, vals ...string) *Token {
|
||||
for _, val := range vals {
|
||||
if t := p.Peek(typ, val); t != nil {
|
||||
p.Consume()
|
||||
return t
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns the CURRENT token if the given type matches.
|
||||
// It DOES NOT consume the token.
|
||||
func (p *Parser) PeekType(typ TokenType) *Token {
|
||||
return p.PeekTypeN(0, typ)
|
||||
}
|
||||
|
||||
// Returns the CURRENT token if the given type AND value matches.
|
||||
// It DOES NOT consume the token.
|
||||
func (p *Parser) Peek(typ TokenType, val string) *Token {
|
||||
return p.PeekN(0, typ, val)
|
||||
}
|
||||
|
||||
// Returns the CURRENT token if the given type AND *one* of
|
||||
// the given values matches.
|
||||
// It DOES NOT consume the token.
|
||||
func (p *Parser) PeekOne(typ TokenType, vals ...string) *Token {
|
||||
for _, v := range vals {
|
||||
t := p.PeekN(0, typ, v)
|
||||
if t != nil {
|
||||
return t
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns the tokens[current position + shift] token if the
|
||||
// given type AND value matches for that token.
|
||||
// DOES NOT consume the token.
|
||||
func (p *Parser) PeekN(shift int, typ TokenType, val string) *Token {
|
||||
t := p.Get(p.idx + shift)
|
||||
if t != nil {
|
||||
if t.Typ == typ && t.Val == val {
|
||||
return t
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns the tokens[current position + shift] token if the given type matches.
|
||||
// DOES NOT consume the token for that token.
|
||||
func (p *Parser) PeekTypeN(shift int, typ TokenType) *Token {
|
||||
t := p.Get(p.idx + shift)
|
||||
if t != nil {
|
||||
if t.Typ == typ {
|
||||
return t
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns the UNCONSUMED token count.
|
||||
func (p *Parser) Remaining() int {
|
||||
return len(p.tokens) - p.idx
|
||||
}
|
||||
|
||||
// Returns the total token count.
|
||||
func (p *Parser) Count() int {
|
||||
return len(p.tokens)
|
||||
}
|
||||
|
||||
// Returns tokens[i] or NIL (if i >= len(tokens))
|
||||
func (p *Parser) Get(i int) *Token {
|
||||
if i < len(p.tokens) && i >= 0 {
|
||||
return p.tokens[i]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns tokens[current-position + shift] or NIL
|
||||
// (if (current-position + i) >= len(tokens))
|
||||
func (p *Parser) GetR(shift int) *Token {
|
||||
i := p.idx + shift
|
||||
return p.Get(i)
|
||||
}
|
||||
|
||||
// Error produces a nice error message and returns an error-object.
|
||||
// The 'token'-argument is optional. If provided, it will take
|
||||
// the token's position information. If not provided, it will
|
||||
// automatically use the CURRENT token's position information.
|
||||
func (p *Parser) Error(msg string, token *Token) *Error {
|
||||
if token == nil {
|
||||
// Set current token
|
||||
token = p.Current()
|
||||
if token == nil {
|
||||
// Set to last token
|
||||
if len(p.tokens) > 0 {
|
||||
token = p.tokens[len(p.tokens)-1]
|
||||
}
|
||||
}
|
||||
}
|
||||
var line, col int
|
||||
if token != nil {
|
||||
line = token.Line
|
||||
col = token.Col
|
||||
}
|
||||
return &Error{
|
||||
Template: p.template,
|
||||
Filename: p.name,
|
||||
Sender: "parser",
|
||||
Line: line,
|
||||
Column: col,
|
||||
Token: token,
|
||||
OrigError: errors.New(msg),
|
||||
}
|
||||
}
|
||||
|
||||
// Wraps all nodes between starting tag and "{% endtag %}" and provides
|
||||
// one simple interface to execute the wrapped nodes.
|
||||
// It returns a parser to process provided arguments to the tag.
|
||||
func (p *Parser) WrapUntilTag(names ...string) (*NodeWrapper, *Parser, *Error) {
|
||||
wrapper := &NodeWrapper{}
|
||||
|
||||
var tagArgs []*Token
|
||||
|
||||
for p.Remaining() > 0 {
|
||||
// New tag, check whether we have to stop wrapping here
|
||||
if p.Peek(TokenSymbol, "{%") != nil {
|
||||
tagIdent := p.PeekTypeN(1, TokenIdentifier)
|
||||
|
||||
if tagIdent != nil {
|
||||
// We've found a (!) end-tag
|
||||
|
||||
found := false
|
||||
for _, n := range names {
|
||||
if tagIdent.Val == n {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// We only process the tag if we've found an end tag
|
||||
if found {
|
||||
// Okay, endtag found.
|
||||
p.ConsumeN(2) // '{%' tagname
|
||||
|
||||
for {
|
||||
if p.Match(TokenSymbol, "%}") != nil {
|
||||
// Okay, end the wrapping here
|
||||
wrapper.Endtag = tagIdent.Val
|
||||
return wrapper, newParser(p.template.name, tagArgs, p.template), nil
|
||||
}
|
||||
t := p.Current()
|
||||
p.Consume()
|
||||
if t == nil {
|
||||
return nil, nil, p.Error("Unexpected EOF.", p.lastToken)
|
||||
}
|
||||
tagArgs = append(tagArgs, t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Otherwise process next element to be wrapped
|
||||
node, err := p.parseDocElement()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
wrapper.nodes = append(wrapper.nodes, node)
|
||||
}
|
||||
|
||||
return nil, nil, p.Error(fmt.Sprintf("Unexpected EOF, expected tag %s.", strings.Join(names, " or ")),
|
||||
p.lastToken)
|
||||
}
|
||||
|
||||
// Skips all nodes between starting tag and "{% endtag %}"
|
||||
func (p *Parser) SkipUntilTag(names ...string) *Error {
|
||||
for p.Remaining() > 0 {
|
||||
// New tag, check whether we have to stop wrapping here
|
||||
if p.Peek(TokenSymbol, "{%") != nil {
|
||||
tagIdent := p.PeekTypeN(1, TokenIdentifier)
|
||||
|
||||
if tagIdent != nil {
|
||||
// We've found a (!) end-tag
|
||||
|
||||
found := false
|
||||
for _, n := range names {
|
||||
if tagIdent.Val == n {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// We only process the tag if we've found an end tag
|
||||
if found {
|
||||
// Okay, endtag found.
|
||||
p.ConsumeN(2) // '{%' tagname
|
||||
|
||||
for {
|
||||
if p.Match(TokenSymbol, "%}") != nil {
|
||||
// Done skipping, exit.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
t := p.Current()
|
||||
p.Consume()
|
||||
if t == nil {
|
||||
return p.Error("Unexpected EOF.", p.lastToken)
|
||||
}
|
||||
}
|
||||
|
||||
return p.Error(fmt.Sprintf("Unexpected EOF, expected tag %s.", strings.Join(names, " or ")), p.lastToken)
|
||||
}
|
59
vendor/github.com/flosch/pongo2/parser_document.go
generated
vendored
59
vendor/github.com/flosch/pongo2/parser_document.go
generated
vendored
|
@ -1,59 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
// Doc = { ( Filter | Tag | HTML ) }
|
||||
func (p *Parser) parseDocElement() (INode, *Error) {
|
||||
t := p.Current()
|
||||
|
||||
switch t.Typ {
|
||||
case TokenHTML:
|
||||
n := &nodeHTML{token: t}
|
||||
left := p.PeekTypeN(-1, TokenSymbol)
|
||||
right := p.PeekTypeN(1, TokenSymbol)
|
||||
n.trimLeft = left != nil && left.TrimWhitespaces
|
||||
n.trimRight = right != nil && right.TrimWhitespaces
|
||||
p.Consume() // consume HTML element
|
||||
return n, nil
|
||||
case TokenSymbol:
|
||||
switch t.Val {
|
||||
case "{{":
|
||||
// parse variable
|
||||
variable, err := p.parseVariableElement()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return variable, nil
|
||||
case "{%":
|
||||
// parse tag
|
||||
tag, err := p.parseTagElement()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tag, nil
|
||||
}
|
||||
}
|
||||
return nil, p.Error("Unexpected token (only HTML/tags/filters in templates allowed)", t)
|
||||
}
|
||||
|
||||
func (tpl *Template) parse() *Error {
|
||||
tpl.parser = newParser(tpl.name, tpl.tokens, tpl)
|
||||
doc, err := tpl.parser.parseDocument()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tpl.root = doc
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseDocument() (*nodeDocument, *Error) {
|
||||
doc := &nodeDocument{}
|
||||
|
||||
for p.Remaining() > 0 {
|
||||
node, err := p.parseDocElement()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
doc.Nodes = append(doc.Nodes, node)
|
||||
}
|
||||
|
||||
return doc, nil
|
||||
}
|
503
vendor/github.com/flosch/pongo2/parser_expression.go
generated
vendored
503
vendor/github.com/flosch/pongo2/parser_expression.go
generated
vendored
|
@ -1,503 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
)
|
||||
|
||||
type Expression struct {
|
||||
// TODO: Add location token?
|
||||
expr1 IEvaluator
|
||||
expr2 IEvaluator
|
||||
opToken *Token
|
||||
}
|
||||
|
||||
type relationalExpression struct {
|
||||
// TODO: Add location token?
|
||||
expr1 IEvaluator
|
||||
expr2 IEvaluator
|
||||
opToken *Token
|
||||
}
|
||||
|
||||
type simpleExpression struct {
|
||||
negate bool
|
||||
negativeSign bool
|
||||
term1 IEvaluator
|
||||
term2 IEvaluator
|
||||
opToken *Token
|
||||
}
|
||||
|
||||
type term struct {
|
||||
// TODO: Add location token?
|
||||
factor1 IEvaluator
|
||||
factor2 IEvaluator
|
||||
opToken *Token
|
||||
}
|
||||
|
||||
type power struct {
|
||||
// TODO: Add location token?
|
||||
power1 IEvaluator
|
||||
power2 IEvaluator
|
||||
}
|
||||
|
||||
func (expr *Expression) FilterApplied(name string) bool {
|
||||
return expr.expr1.FilterApplied(name) && (expr.expr2 == nil ||
|
||||
(expr.expr2 != nil && expr.expr2.FilterApplied(name)))
|
||||
}
|
||||
|
||||
func (expr *relationalExpression) FilterApplied(name string) bool {
|
||||
return expr.expr1.FilterApplied(name) && (expr.expr2 == nil ||
|
||||
(expr.expr2 != nil && expr.expr2.FilterApplied(name)))
|
||||
}
|
||||
|
||||
func (expr *simpleExpression) FilterApplied(name string) bool {
|
||||
return expr.term1.FilterApplied(name) && (expr.term2 == nil ||
|
||||
(expr.term2 != nil && expr.term2.FilterApplied(name)))
|
||||
}
|
||||
|
||||
func (expr *term) FilterApplied(name string) bool {
|
||||
return expr.factor1.FilterApplied(name) && (expr.factor2 == nil ||
|
||||
(expr.factor2 != nil && expr.factor2.FilterApplied(name)))
|
||||
}
|
||||
|
||||
func (expr *power) FilterApplied(name string) bool {
|
||||
return expr.power1.FilterApplied(name) && (expr.power2 == nil ||
|
||||
(expr.power2 != nil && expr.power2.FilterApplied(name)))
|
||||
}
|
||||
|
||||
func (expr *Expression) GetPositionToken() *Token {
|
||||
return expr.expr1.GetPositionToken()
|
||||
}
|
||||
|
||||
func (expr *relationalExpression) GetPositionToken() *Token {
|
||||
return expr.expr1.GetPositionToken()
|
||||
}
|
||||
|
||||
func (expr *simpleExpression) GetPositionToken() *Token {
|
||||
return expr.term1.GetPositionToken()
|
||||
}
|
||||
|
||||
func (expr *term) GetPositionToken() *Token {
|
||||
return expr.factor1.GetPositionToken()
|
||||
}
|
||||
|
||||
func (expr *power) GetPositionToken() *Token {
|
||||
return expr.power1.GetPositionToken()
|
||||
}
|
||||
|
||||
func (expr *Expression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
value, err := expr.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writer.WriteString(value.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (expr *relationalExpression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
value, err := expr.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writer.WriteString(value.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (expr *simpleExpression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
value, err := expr.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writer.WriteString(value.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (expr *term) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
value, err := expr.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writer.WriteString(value.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (expr *power) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
value, err := expr.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writer.WriteString(value.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (expr *Expression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
|
||||
v1, err := expr.expr1.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if expr.expr2 != nil {
|
||||
switch expr.opToken.Val {
|
||||
case "and", "&&":
|
||||
if !v1.IsTrue() {
|
||||
return AsValue(false), nil
|
||||
} else {
|
||||
v2, err := expr.expr2.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return AsValue(v2.IsTrue()), nil
|
||||
}
|
||||
case "or", "||":
|
||||
if v1.IsTrue() {
|
||||
return AsValue(true), nil
|
||||
} else {
|
||||
v2, err := expr.expr2.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return AsValue(v2.IsTrue()), nil
|
||||
}
|
||||
default:
|
||||
return nil, ctx.Error(fmt.Sprintf("unimplemented: %s", expr.opToken.Val), expr.opToken)
|
||||
}
|
||||
} else {
|
||||
return v1, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (expr *relationalExpression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
|
||||
v1, err := expr.expr1.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if expr.expr2 != nil {
|
||||
v2, err := expr.expr2.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch expr.opToken.Val {
|
||||
case "<=":
|
||||
if v1.IsFloat() || v2.IsFloat() {
|
||||
return AsValue(v1.Float() <= v2.Float()), nil
|
||||
}
|
||||
return AsValue(v1.Integer() <= v2.Integer()), nil
|
||||
case ">=":
|
||||
if v1.IsFloat() || v2.IsFloat() {
|
||||
return AsValue(v1.Float() >= v2.Float()), nil
|
||||
}
|
||||
return AsValue(v1.Integer() >= v2.Integer()), nil
|
||||
case "==":
|
||||
return AsValue(v1.EqualValueTo(v2)), nil
|
||||
case ">":
|
||||
if v1.IsFloat() || v2.IsFloat() {
|
||||
return AsValue(v1.Float() > v2.Float()), nil
|
||||
}
|
||||
return AsValue(v1.Integer() > v2.Integer()), nil
|
||||
case "<":
|
||||
if v1.IsFloat() || v2.IsFloat() {
|
||||
return AsValue(v1.Float() < v2.Float()), nil
|
||||
}
|
||||
return AsValue(v1.Integer() < v2.Integer()), nil
|
||||
case "!=", "<>":
|
||||
return AsValue(!v1.EqualValueTo(v2)), nil
|
||||
case "in":
|
||||
return AsValue(v2.Contains(v1)), nil
|
||||
default:
|
||||
return nil, ctx.Error(fmt.Sprintf("unimplemented: %s", expr.opToken.Val), expr.opToken)
|
||||
}
|
||||
} else {
|
||||
return v1, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (expr *simpleExpression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
|
||||
t1, err := expr.term1.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := t1
|
||||
|
||||
if expr.negate {
|
||||
result = result.Negate()
|
||||
}
|
||||
|
||||
if expr.negativeSign {
|
||||
if result.IsNumber() {
|
||||
switch {
|
||||
case result.IsFloat():
|
||||
result = AsValue(-1 * result.Float())
|
||||
case result.IsInteger():
|
||||
result = AsValue(-1 * result.Integer())
|
||||
default:
|
||||
return nil, ctx.Error("Operation between a number and a non-(float/integer) is not possible", nil)
|
||||
}
|
||||
} else {
|
||||
return nil, ctx.Error("Negative sign on a non-number expression", expr.GetPositionToken())
|
||||
}
|
||||
}
|
||||
|
||||
if expr.term2 != nil {
|
||||
t2, err := expr.term2.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch expr.opToken.Val {
|
||||
case "+":
|
||||
if result.IsFloat() || t2.IsFloat() {
|
||||
// Result will be a float
|
||||
return AsValue(result.Float() + t2.Float()), nil
|
||||
}
|
||||
// Result will be an integer
|
||||
return AsValue(result.Integer() + t2.Integer()), nil
|
||||
case "-":
|
||||
if result.IsFloat() || t2.IsFloat() {
|
||||
// Result will be a float
|
||||
return AsValue(result.Float() - t2.Float()), nil
|
||||
}
|
||||
// Result will be an integer
|
||||
return AsValue(result.Integer() - t2.Integer()), nil
|
||||
default:
|
||||
return nil, ctx.Error("Unimplemented", expr.GetPositionToken())
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (expr *term) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
|
||||
f1, err := expr.factor1.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if expr.factor2 != nil {
|
||||
f2, err := expr.factor2.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch expr.opToken.Val {
|
||||
case "*":
|
||||
if f1.IsFloat() || f2.IsFloat() {
|
||||
// Result will be float
|
||||
return AsValue(f1.Float() * f2.Float()), nil
|
||||
}
|
||||
// Result will be int
|
||||
return AsValue(f1.Integer() * f2.Integer()), nil
|
||||
case "/":
|
||||
if f1.IsFloat() || f2.IsFloat() {
|
||||
// Result will be float
|
||||
return AsValue(f1.Float() / f2.Float()), nil
|
||||
}
|
||||
// Result will be int
|
||||
return AsValue(f1.Integer() / f2.Integer()), nil
|
||||
case "%":
|
||||
// Result will be int
|
||||
return AsValue(f1.Integer() % f2.Integer()), nil
|
||||
default:
|
||||
return nil, ctx.Error("unimplemented", expr.opToken)
|
||||
}
|
||||
} else {
|
||||
return f1, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (expr *power) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
|
||||
p1, err := expr.power1.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if expr.power2 != nil {
|
||||
p2, err := expr.power2.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return AsValue(math.Pow(p1.Float(), p2.Float())), nil
|
||||
}
|
||||
return p1, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseFactor() (IEvaluator, *Error) {
|
||||
if p.Match(TokenSymbol, "(") != nil {
|
||||
expr, err := p.ParseExpression()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.Match(TokenSymbol, ")") == nil {
|
||||
return nil, p.Error("Closing bracket expected after expression", nil)
|
||||
}
|
||||
return expr, nil
|
||||
}
|
||||
|
||||
return p.parseVariableOrLiteralWithFilter()
|
||||
}
|
||||
|
||||
func (p *Parser) parsePower() (IEvaluator, *Error) {
|
||||
pw := new(power)
|
||||
|
||||
power1, err := p.parseFactor()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pw.power1 = power1
|
||||
|
||||
if p.Match(TokenSymbol, "^") != nil {
|
||||
power2, err := p.parsePower()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pw.power2 = power2
|
||||
}
|
||||
|
||||
if pw.power2 == nil {
|
||||
// Shortcut for faster evaluation
|
||||
return pw.power1, nil
|
||||
}
|
||||
|
||||
return pw, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseTerm() (IEvaluator, *Error) {
|
||||
returnTerm := new(term)
|
||||
|
||||
factor1, err := p.parsePower()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
returnTerm.factor1 = factor1
|
||||
|
||||
for p.PeekOne(TokenSymbol, "*", "/", "%") != nil {
|
||||
if returnTerm.opToken != nil {
|
||||
// Create new sub-term
|
||||
returnTerm = &term{
|
||||
factor1: returnTerm,
|
||||
}
|
||||
}
|
||||
|
||||
op := p.Current()
|
||||
p.Consume()
|
||||
|
||||
factor2, err := p.parsePower()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
returnTerm.opToken = op
|
||||
returnTerm.factor2 = factor2
|
||||
}
|
||||
|
||||
if returnTerm.opToken == nil {
|
||||
// Shortcut for faster evaluation
|
||||
return returnTerm.factor1, nil
|
||||
}
|
||||
|
||||
return returnTerm, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseSimpleExpression() (IEvaluator, *Error) {
|
||||
expr := new(simpleExpression)
|
||||
|
||||
if sign := p.MatchOne(TokenSymbol, "+", "-"); sign != nil {
|
||||
if sign.Val == "-" {
|
||||
expr.negativeSign = true
|
||||
}
|
||||
}
|
||||
|
||||
if p.Match(TokenSymbol, "!") != nil || p.Match(TokenKeyword, "not") != nil {
|
||||
expr.negate = true
|
||||
}
|
||||
|
||||
term1, err := p.parseTerm()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
expr.term1 = term1
|
||||
|
||||
for p.PeekOne(TokenSymbol, "+", "-") != nil {
|
||||
if expr.opToken != nil {
|
||||
// New sub expr
|
||||
expr = &simpleExpression{
|
||||
term1: expr,
|
||||
}
|
||||
}
|
||||
|
||||
op := p.Current()
|
||||
p.Consume()
|
||||
|
||||
term2, err := p.parseTerm()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
expr.term2 = term2
|
||||
expr.opToken = op
|
||||
}
|
||||
|
||||
if expr.negate == false && expr.negativeSign == false && expr.term2 == nil {
|
||||
// Shortcut for faster evaluation
|
||||
return expr.term1, nil
|
||||
}
|
||||
|
||||
return expr, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseRelationalExpression() (IEvaluator, *Error) {
|
||||
expr1, err := p.parseSimpleExpression()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
expr := &relationalExpression{
|
||||
expr1: expr1,
|
||||
}
|
||||
|
||||
if t := p.MatchOne(TokenSymbol, "==", "<=", ">=", "!=", "<>", ">", "<"); t != nil {
|
||||
expr2, err := p.parseRelationalExpression()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
expr.opToken = t
|
||||
expr.expr2 = expr2
|
||||
} else if t := p.MatchOne(TokenKeyword, "in"); t != nil {
|
||||
expr2, err := p.parseSimpleExpression()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
expr.opToken = t
|
||||
expr.expr2 = expr2
|
||||
}
|
||||
|
||||
if expr.expr2 == nil {
|
||||
// Shortcut for faster evaluation
|
||||
return expr.expr1, nil
|
||||
}
|
||||
|
||||
return expr, nil
|
||||
}
|
||||
|
||||
func (p *Parser) ParseExpression() (IEvaluator, *Error) {
|
||||
rexpr1, err := p.parseRelationalExpression()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
exp := &Expression{
|
||||
expr1: rexpr1,
|
||||
}
|
||||
|
||||
if p.PeekOne(TokenSymbol, "&&", "||") != nil || p.PeekOne(TokenKeyword, "and", "or") != nil {
|
||||
op := p.Current()
|
||||
p.Consume()
|
||||
expr2, err := p.ParseExpression()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
exp.expr2 = expr2
|
||||
exp.opToken = op
|
||||
}
|
||||
|
||||
if exp.expr2 == nil {
|
||||
// Shortcut for faster evaluation
|
||||
return exp.expr1, nil
|
||||
}
|
||||
|
||||
return exp, nil
|
||||
}
|
14
vendor/github.com/flosch/pongo2/pongo2.go
generated
vendored
14
vendor/github.com/flosch/pongo2/pongo2.go
generated
vendored
|
@ -1,14 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
// Version string
|
||||
const Version = "dev"
|
||||
|
||||
// Must panics, if a Template couldn't successfully parsed. This is how you
|
||||
// would use it:
|
||||
// var baseTemplate = pongo2.Must(pongo2.FromFile("templates/base.html"))
|
||||
func Must(tpl *Template, err error) *Template {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return tpl
|
||||
}
|
135
vendor/github.com/flosch/pongo2/tags.go
generated
vendored
135
vendor/github.com/flosch/pongo2/tags.go
generated
vendored
|
@ -1,135 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
/* Incomplete:
|
||||
-----------
|
||||
|
||||
verbatim (only the "name" argument is missing for verbatim)
|
||||
|
||||
Reconsideration:
|
||||
----------------
|
||||
|
||||
debug (reason: not sure what to output yet)
|
||||
regroup / Grouping on other properties (reason: maybe too python-specific; not sure how useful this would be in Go)
|
||||
|
||||
Following built-in tags wont be added:
|
||||
--------------------------------------
|
||||
|
||||
csrf_token (reason: web-framework specific)
|
||||
load (reason: python-specific)
|
||||
url (reason: web-framework specific)
|
||||
*/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/juju/errors"
|
||||
)
|
||||
|
||||
type INodeTag interface {
|
||||
INode
|
||||
}
|
||||
|
||||
// This is the function signature of the tag's parser you will have
|
||||
// to implement in order to create a new tag.
|
||||
//
|
||||
// 'doc' is providing access to the whole document while 'arguments'
|
||||
// is providing access to the user's arguments to the tag:
|
||||
//
|
||||
// {% your_tag_name some "arguments" 123 %}
|
||||
//
|
||||
// start_token will be the *Token with the tag's name in it (here: your_tag_name).
|
||||
//
|
||||
// Please see the Parser documentation on how to use the parser.
|
||||
// See RegisterTag()'s documentation for more information about
|
||||
// writing a tag as well.
|
||||
type TagParser func(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error)
|
||||
|
||||
type tag struct {
|
||||
name string
|
||||
parser TagParser
|
||||
}
|
||||
|
||||
var tags map[string]*tag
|
||||
|
||||
func init() {
|
||||
tags = make(map[string]*tag)
|
||||
}
|
||||
|
||||
// Registers a new tag. You usually want to call this
|
||||
// function in the tag's init() function:
|
||||
// http://golang.org/doc/effective_go.html#init
|
||||
//
|
||||
// See http://www.florian-schlachter.de/post/pongo2/ for more about
|
||||
// writing filters and tags.
|
||||
func RegisterTag(name string, parserFn TagParser) error {
|
||||
_, existing := tags[name]
|
||||
if existing {
|
||||
return errors.Errorf("tag with name '%s' is already registered", name)
|
||||
}
|
||||
tags[name] = &tag{
|
||||
name: name,
|
||||
parser: parserFn,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Replaces an already registered tag with a new implementation. Use this
|
||||
// function with caution since it allows you to change existing tag behaviour.
|
||||
func ReplaceTag(name string, parserFn TagParser) error {
|
||||
_, existing := tags[name]
|
||||
if !existing {
|
||||
return errors.Errorf("tag with name '%s' does not exist (therefore cannot be overridden)", name)
|
||||
}
|
||||
tags[name] = &tag{
|
||||
name: name,
|
||||
parser: parserFn,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Tag = "{%" IDENT ARGS "%}"
|
||||
func (p *Parser) parseTagElement() (INodeTag, *Error) {
|
||||
p.Consume() // consume "{%"
|
||||
tokenName := p.MatchType(TokenIdentifier)
|
||||
|
||||
// Check for identifier
|
||||
if tokenName == nil {
|
||||
return nil, p.Error("Tag name must be an identifier.", nil)
|
||||
}
|
||||
|
||||
// Check for the existing tag
|
||||
tag, exists := tags[tokenName.Val]
|
||||
if !exists {
|
||||
// Does not exists
|
||||
return nil, p.Error(fmt.Sprintf("Tag '%s' not found (or beginning tag not provided)", tokenName.Val), tokenName)
|
||||
}
|
||||
|
||||
// Check sandbox tag restriction
|
||||
if _, isBanned := p.template.set.bannedTags[tokenName.Val]; isBanned {
|
||||
return nil, p.Error(fmt.Sprintf("Usage of tag '%s' is not allowed (sandbox restriction active).", tokenName.Val), tokenName)
|
||||
}
|
||||
|
||||
var argsToken []*Token
|
||||
for p.Peek(TokenSymbol, "%}") == nil && p.Remaining() > 0 {
|
||||
// Add token to args
|
||||
argsToken = append(argsToken, p.Current())
|
||||
p.Consume() // next token
|
||||
}
|
||||
|
||||
// EOF?
|
||||
if p.Remaining() == 0 {
|
||||
return nil, p.Error("Unexpectedly reached EOF, no tag end found.", p.lastToken)
|
||||
}
|
||||
|
||||
p.Match(TokenSymbol, "%}")
|
||||
|
||||
argParser := newParser(p.name, argsToken, p.template)
|
||||
if len(argsToken) == 0 {
|
||||
// This is done to have nice EOF error messages
|
||||
argParser.lastToken = tokenName
|
||||
}
|
||||
|
||||
p.template.level++
|
||||
defer func() { p.template.level-- }()
|
||||
return tag.parser(p, tokenName, argParser)
|
||||
}
|
52
vendor/github.com/flosch/pongo2/tags_autoescape.go
generated
vendored
52
vendor/github.com/flosch/pongo2/tags_autoescape.go
generated
vendored
|
@ -1,52 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
type tagAutoescapeNode struct {
|
||||
wrapper *NodeWrapper
|
||||
autoescape bool
|
||||
}
|
||||
|
||||
func (node *tagAutoescapeNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
old := ctx.Autoescape
|
||||
ctx.Autoescape = node.autoescape
|
||||
|
||||
err := node.wrapper.Execute(ctx, writer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx.Autoescape = old
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func tagAutoescapeParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
|
||||
autoescapeNode := &tagAutoescapeNode{}
|
||||
|
||||
wrapper, _, err := doc.WrapUntilTag("endautoescape")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
autoescapeNode.wrapper = wrapper
|
||||
|
||||
modeToken := arguments.MatchType(TokenIdentifier)
|
||||
if modeToken == nil {
|
||||
return nil, arguments.Error("A mode is required for autoescape-tag.", nil)
|
||||
}
|
||||
if modeToken.Val == "on" {
|
||||
autoescapeNode.autoescape = true
|
||||
} else if modeToken.Val == "off" {
|
||||
autoescapeNode.autoescape = false
|
||||
} else {
|
||||
return nil, arguments.Error("Only 'on' or 'off' is valid as an autoescape-mode.", nil)
|
||||
}
|
||||
|
||||
if arguments.Remaining() > 0 {
|
||||
return nil, arguments.Error("Malformed autoescape-tag arguments.", nil)
|
||||
}
|
||||
|
||||
return autoescapeNode, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterTag("autoescape", tagAutoescapeParser)
|
||||
}
|
129
vendor/github.com/flosch/pongo2/tags_block.go
generated
vendored
129
vendor/github.com/flosch/pongo2/tags_block.go
generated
vendored
|
@ -1,129 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type tagBlockNode struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (node *tagBlockNode) getBlockWrappers(tpl *Template) []*NodeWrapper {
|
||||
nodeWrappers := make([]*NodeWrapper, 0)
|
||||
var t *NodeWrapper
|
||||
|
||||
for tpl != nil {
|
||||
t = tpl.blocks[node.name]
|
||||
if t != nil {
|
||||
nodeWrappers = append(nodeWrappers, t)
|
||||
}
|
||||
tpl = tpl.child
|
||||
}
|
||||
|
||||
return nodeWrappers
|
||||
}
|
||||
|
||||
func (node *tagBlockNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
tpl := ctx.template
|
||||
if tpl == nil {
|
||||
panic("internal error: tpl == nil")
|
||||
}
|
||||
|
||||
// Determine the block to execute
|
||||
blockWrappers := node.getBlockWrappers(tpl)
|
||||
lenBlockWrappers := len(blockWrappers)
|
||||
|
||||
if lenBlockWrappers == 0 {
|
||||
return ctx.Error("internal error: len(block_wrappers) == 0 in tagBlockNode.Execute()", nil)
|
||||
}
|
||||
|
||||
blockWrapper := blockWrappers[lenBlockWrappers-1]
|
||||
ctx.Private["block"] = tagBlockInformation{
|
||||
ctx: ctx,
|
||||
wrappers: blockWrappers[0 : lenBlockWrappers-1],
|
||||
}
|
||||
err := blockWrapper.Execute(ctx, writer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type tagBlockInformation struct {
|
||||
ctx *ExecutionContext
|
||||
wrappers []*NodeWrapper
|
||||
}
|
||||
|
||||
func (t tagBlockInformation) Super() string {
|
||||
lenWrappers := len(t.wrappers)
|
||||
|
||||
if lenWrappers == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
superCtx := NewChildExecutionContext(t.ctx)
|
||||
superCtx.Private["block"] = tagBlockInformation{
|
||||
ctx: t.ctx,
|
||||
wrappers: t.wrappers[0 : lenWrappers-1],
|
||||
}
|
||||
|
||||
blockWrapper := t.wrappers[lenWrappers-1]
|
||||
buf := bytes.NewBufferString("")
|
||||
err := blockWrapper.Execute(superCtx, &templateWriter{buf})
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func tagBlockParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
|
||||
if arguments.Count() == 0 {
|
||||
return nil, arguments.Error("Tag 'block' requires an identifier.", nil)
|
||||
}
|
||||
|
||||
nameToken := arguments.MatchType(TokenIdentifier)
|
||||
if nameToken == nil {
|
||||
return nil, arguments.Error("First argument for tag 'block' must be an identifier.", nil)
|
||||
}
|
||||
|
||||
if arguments.Remaining() != 0 {
|
||||
return nil, arguments.Error("Tag 'block' takes exactly 1 argument (an identifier).", nil)
|
||||
}
|
||||
|
||||
wrapper, endtagargs, err := doc.WrapUntilTag("endblock")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if endtagargs.Remaining() > 0 {
|
||||
endtagnameToken := endtagargs.MatchType(TokenIdentifier)
|
||||
if endtagnameToken != nil {
|
||||
if endtagnameToken.Val != nameToken.Val {
|
||||
return nil, endtagargs.Error(fmt.Sprintf("Name for 'endblock' must equal to 'block'-tag's name ('%s' != '%s').",
|
||||
nameToken.Val, endtagnameToken.Val), nil)
|
||||
}
|
||||
}
|
||||
|
||||
if endtagnameToken == nil || endtagargs.Remaining() > 0 {
|
||||
return nil, endtagargs.Error("Either no or only one argument (identifier) allowed for 'endblock'.", nil)
|
||||
}
|
||||
}
|
||||
|
||||
tpl := doc.template
|
||||
if tpl == nil {
|
||||
panic("internal error: tpl == nil")
|
||||
}
|
||||
_, hasBlock := tpl.blocks[nameToken.Val]
|
||||
if !hasBlock {
|
||||
tpl.blocks[nameToken.Val] = wrapper
|
||||
} else {
|
||||
return nil, arguments.Error(fmt.Sprintf("Block named '%s' already defined", nameToken.Val), nil)
|
||||
}
|
||||
|
||||
return &tagBlockNode{name: nameToken.Val}, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterTag("block", tagBlockParser)
|
||||
}
|
27
vendor/github.com/flosch/pongo2/tags_comment.go
generated
vendored
27
vendor/github.com/flosch/pongo2/tags_comment.go
generated
vendored
|
@ -1,27 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
type tagCommentNode struct{}
|
||||
|
||||
func (node *tagCommentNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func tagCommentParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
|
||||
commentNode := &tagCommentNode{}
|
||||
|
||||
// TODO: Process the endtag's arguments (see django 'comment'-tag documentation)
|
||||
err := doc.SkipUntilTag("endcomment")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if arguments.Count() != 0 {
|
||||
return nil, arguments.Error("Tag 'comment' does not take any argument.", nil)
|
||||
}
|
||||
|
||||
return commentNode, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterTag("comment", tagCommentParser)
|
||||
}
|
106
vendor/github.com/flosch/pongo2/tags_cycle.go
generated
vendored
106
vendor/github.com/flosch/pongo2/tags_cycle.go
generated
vendored
|
@ -1,106 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
type tagCycleValue struct {
|
||||
node *tagCycleNode
|
||||
value *Value
|
||||
}
|
||||
|
||||
type tagCycleNode struct {
|
||||
position *Token
|
||||
args []IEvaluator
|
||||
idx int
|
||||
asName string
|
||||
silent bool
|
||||
}
|
||||
|
||||
func (cv *tagCycleValue) String() string {
|
||||
return cv.value.String()
|
||||
}
|
||||
|
||||
func (node *tagCycleNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
item := node.args[node.idx%len(node.args)]
|
||||
node.idx++
|
||||
|
||||
val, err := item.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t, ok := val.Interface().(*tagCycleValue); ok {
|
||||
// {% cycle "test1" "test2"
|
||||
// {% cycle cycleitem %}
|
||||
|
||||
// Update the cycle value with next value
|
||||
item := t.node.args[t.node.idx%len(t.node.args)]
|
||||
t.node.idx++
|
||||
|
||||
val, err := item.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.value = val
|
||||
|
||||
if !t.node.silent {
|
||||
writer.WriteString(val.String())
|
||||
}
|
||||
} else {
|
||||
// Regular call
|
||||
|
||||
cycleValue := &tagCycleValue{
|
||||
node: node,
|
||||
value: val,
|
||||
}
|
||||
|
||||
if node.asName != "" {
|
||||
ctx.Private[node.asName] = cycleValue
|
||||
}
|
||||
if !node.silent {
|
||||
writer.WriteString(val.String())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// HINT: We're not supporting the old comma-separated list of expressions argument-style
|
||||
func tagCycleParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
|
||||
cycleNode := &tagCycleNode{
|
||||
position: start,
|
||||
}
|
||||
|
||||
for arguments.Remaining() > 0 {
|
||||
node, err := arguments.ParseExpression()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cycleNode.args = append(cycleNode.args, node)
|
||||
|
||||
if arguments.MatchOne(TokenKeyword, "as") != nil {
|
||||
// as
|
||||
|
||||
nameToken := arguments.MatchType(TokenIdentifier)
|
||||
if nameToken == nil {
|
||||
return nil, arguments.Error("Name (identifier) expected after 'as'.", nil)
|
||||
}
|
||||
cycleNode.asName = nameToken.Val
|
||||
|
||||
if arguments.MatchOne(TokenIdentifier, "silent") != nil {
|
||||
cycleNode.silent = true
|
||||
}
|
||||
|
||||
// Now we're finished
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if arguments.Remaining() > 0 {
|
||||
return nil, arguments.Error("Malformed cycle-tag.", nil)
|
||||
}
|
||||
|
||||
return cycleNode, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterTag("cycle", tagCycleParser)
|
||||
}
|
52
vendor/github.com/flosch/pongo2/tags_extends.go
generated
vendored
52
vendor/github.com/flosch/pongo2/tags_extends.go
generated
vendored
|
@ -1,52 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
type tagExtendsNode struct {
|
||||
filename string
|
||||
}
|
||||
|
||||
func (node *tagExtendsNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func tagExtendsParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
|
||||
extendsNode := &tagExtendsNode{}
|
||||
|
||||
if doc.template.level > 1 {
|
||||
return nil, arguments.Error("The 'extends' tag can only defined on root level.", start)
|
||||
}
|
||||
|
||||
if doc.template.parent != nil {
|
||||
// Already one parent
|
||||
return nil, arguments.Error("This template has already one parent.", start)
|
||||
}
|
||||
|
||||
if filenameToken := arguments.MatchType(TokenString); filenameToken != nil {
|
||||
// prepared, static template
|
||||
|
||||
// Get parent's filename
|
||||
parentFilename := doc.template.set.resolveFilename(doc.template, filenameToken.Val)
|
||||
|
||||
// Parse the parent
|
||||
parentTemplate, err := doc.template.set.FromFile(parentFilename)
|
||||
if err != nil {
|
||||
return nil, err.(*Error)
|
||||
}
|
||||
|
||||
// Keep track of things
|
||||
parentTemplate.child = doc.template
|
||||
doc.template.parent = parentTemplate
|
||||
extendsNode.filename = parentFilename
|
||||
} else {
|
||||
return nil, arguments.Error("Tag 'extends' requires a template filename as string.", nil)
|
||||
}
|
||||
|
||||
if arguments.Remaining() > 0 {
|
||||
return nil, arguments.Error("Tag 'extends' does only take 1 argument.", nil)
|
||||
}
|
||||
|
||||
return extendsNode, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterTag("extends", tagExtendsParser)
|
||||
}
|
95
vendor/github.com/flosch/pongo2/tags_filter.go
generated
vendored
95
vendor/github.com/flosch/pongo2/tags_filter.go
generated
vendored
|
@ -1,95 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
)
|
||||
|
||||
type nodeFilterCall struct {
|
||||
name string
|
||||
paramExpr IEvaluator
|
||||
}
|
||||
|
||||
type tagFilterNode struct {
|
||||
position *Token
|
||||
bodyWrapper *NodeWrapper
|
||||
filterChain []*nodeFilterCall
|
||||
}
|
||||
|
||||
func (node *tagFilterNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
temp := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB size
|
||||
|
||||
err := node.bodyWrapper.Execute(ctx, temp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
value := AsValue(temp.String())
|
||||
|
||||
for _, call := range node.filterChain {
|
||||
var param *Value
|
||||
if call.paramExpr != nil {
|
||||
param, err = call.paramExpr.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
param = AsValue(nil)
|
||||
}
|
||||
value, err = ApplyFilter(call.name, value, param)
|
||||
if err != nil {
|
||||
return ctx.Error(err.Error(), node.position)
|
||||
}
|
||||
}
|
||||
|
||||
writer.WriteString(value.String())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func tagFilterParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
|
||||
filterNode := &tagFilterNode{
|
||||
position: start,
|
||||
}
|
||||
|
||||
wrapper, _, err := doc.WrapUntilTag("endfilter")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
filterNode.bodyWrapper = wrapper
|
||||
|
||||
for arguments.Remaining() > 0 {
|
||||
filterCall := &nodeFilterCall{}
|
||||
|
||||
nameToken := arguments.MatchType(TokenIdentifier)
|
||||
if nameToken == nil {
|
||||
return nil, arguments.Error("Expected a filter name (identifier).", nil)
|
||||
}
|
||||
filterCall.name = nameToken.Val
|
||||
|
||||
if arguments.MatchOne(TokenSymbol, ":") != nil {
|
||||
// Filter parameter
|
||||
// NOTICE: we can't use ParseExpression() here, because it would parse the next filter "|..." as well in the argument list
|
||||
expr, err := arguments.parseVariableOrLiteral()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
filterCall.paramExpr = expr
|
||||
}
|
||||
|
||||
filterNode.filterChain = append(filterNode.filterChain, filterCall)
|
||||
|
||||
if arguments.MatchOne(TokenSymbol, "|") == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if arguments.Remaining() > 0 {
|
||||
return nil, arguments.Error("Malformed filter-tag arguments.", nil)
|
||||
}
|
||||
|
||||
return filterNode, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterTag("filter", tagFilterParser)
|
||||
}
|
49
vendor/github.com/flosch/pongo2/tags_firstof.go
generated
vendored
49
vendor/github.com/flosch/pongo2/tags_firstof.go
generated
vendored
|
@ -1,49 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
type tagFirstofNode struct {
|
||||
position *Token
|
||||
args []IEvaluator
|
||||
}
|
||||
|
||||
func (node *tagFirstofNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
for _, arg := range node.args {
|
||||
val, err := arg.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if val.IsTrue() {
|
||||
if ctx.Autoescape && !arg.FilterApplied("safe") {
|
||||
val, err = ApplyFilter("escape", val, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
writer.WriteString(val.String())
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func tagFirstofParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
|
||||
firstofNode := &tagFirstofNode{
|
||||
position: start,
|
||||
}
|
||||
|
||||
for arguments.Remaining() > 0 {
|
||||
node, err := arguments.ParseExpression()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
firstofNode.args = append(firstofNode.args, node)
|
||||
}
|
||||
|
||||
return firstofNode, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterTag("firstof", tagFirstofParser)
|
||||
}
|
159
vendor/github.com/flosch/pongo2/tags_for.go
generated
vendored
159
vendor/github.com/flosch/pongo2/tags_for.go
generated
vendored
|
@ -1,159 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
type tagForNode struct {
|
||||
key string
|
||||
value string // only for maps: for key, value in map
|
||||
objectEvaluator IEvaluator
|
||||
reversed bool
|
||||
sorted bool
|
||||
|
||||
bodyWrapper *NodeWrapper
|
||||
emptyWrapper *NodeWrapper
|
||||
}
|
||||
|
||||
type tagForLoopInformation struct {
|
||||
Counter int
|
||||
Counter0 int
|
||||
Revcounter int
|
||||
Revcounter0 int
|
||||
First bool
|
||||
Last bool
|
||||
Parentloop *tagForLoopInformation
|
||||
}
|
||||
|
||||
func (node *tagForNode) Execute(ctx *ExecutionContext, writer TemplateWriter) (forError *Error) {
|
||||
// Backup forloop (as parentloop in public context), key-name and value-name
|
||||
forCtx := NewChildExecutionContext(ctx)
|
||||
parentloop := forCtx.Private["forloop"]
|
||||
|
||||
// Create loop struct
|
||||
loopInfo := &tagForLoopInformation{
|
||||
First: true,
|
||||
}
|
||||
|
||||
// Is it a loop in a loop?
|
||||
if parentloop != nil {
|
||||
loopInfo.Parentloop = parentloop.(*tagForLoopInformation)
|
||||
}
|
||||
|
||||
// Register loopInfo in public context
|
||||
forCtx.Private["forloop"] = loopInfo
|
||||
|
||||
obj, err := node.objectEvaluator.Evaluate(forCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
obj.IterateOrder(func(idx, count int, key, value *Value) bool {
|
||||
// There's something to iterate over (correct type and at least 1 item)
|
||||
|
||||
// Update loop infos and public context
|
||||
forCtx.Private[node.key] = key
|
||||
if value != nil {
|
||||
forCtx.Private[node.value] = value
|
||||
}
|
||||
loopInfo.Counter = idx + 1
|
||||
loopInfo.Counter0 = idx
|
||||
if idx == 1 {
|
||||
loopInfo.First = false
|
||||
}
|
||||
if idx+1 == count {
|
||||
loopInfo.Last = true
|
||||
}
|
||||
loopInfo.Revcounter = count - idx // TODO: Not sure about this, have to look it up
|
||||
loopInfo.Revcounter0 = count - (idx + 1) // TODO: Not sure about this, have to look it up
|
||||
|
||||
// Render elements with updated context
|
||||
err := node.bodyWrapper.Execute(forCtx, writer)
|
||||
if err != nil {
|
||||
forError = err
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, func() {
|
||||
// Nothing to iterate over (maybe wrong type or no items)
|
||||
if node.emptyWrapper != nil {
|
||||
err := node.emptyWrapper.Execute(forCtx, writer)
|
||||
if err != nil {
|
||||
forError = err
|
||||
}
|
||||
}
|
||||
}, node.reversed, node.sorted)
|
||||
|
||||
return forError
|
||||
}
|
||||
|
||||
func tagForParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
|
||||
forNode := &tagForNode{}
|
||||
|
||||
// Arguments parsing
|
||||
var valueToken *Token
|
||||
keyToken := arguments.MatchType(TokenIdentifier)
|
||||
if keyToken == nil {
|
||||
return nil, arguments.Error("Expected an key identifier as first argument for 'for'-tag", nil)
|
||||
}
|
||||
|
||||
if arguments.Match(TokenSymbol, ",") != nil {
|
||||
// Value name is provided
|
||||
valueToken = arguments.MatchType(TokenIdentifier)
|
||||
if valueToken == nil {
|
||||
return nil, arguments.Error("Value name must be an identifier.", nil)
|
||||
}
|
||||
}
|
||||
|
||||
if arguments.Match(TokenKeyword, "in") == nil {
|
||||
return nil, arguments.Error("Expected keyword 'in'.", nil)
|
||||
}
|
||||
|
||||
objectEvaluator, err := arguments.ParseExpression()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
forNode.objectEvaluator = objectEvaluator
|
||||
forNode.key = keyToken.Val
|
||||
if valueToken != nil {
|
||||
forNode.value = valueToken.Val
|
||||
}
|
||||
|
||||
if arguments.MatchOne(TokenIdentifier, "reversed") != nil {
|
||||
forNode.reversed = true
|
||||
}
|
||||
|
||||
if arguments.MatchOne(TokenIdentifier, "sorted") != nil {
|
||||
forNode.sorted = true
|
||||
}
|
||||
|
||||
if arguments.Remaining() > 0 {
|
||||
return nil, arguments.Error("Malformed for-loop arguments.", nil)
|
||||
}
|
||||
|
||||
// Body wrapping
|
||||
wrapper, endargs, err := doc.WrapUntilTag("empty", "endfor")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
forNode.bodyWrapper = wrapper
|
||||
|
||||
if endargs.Count() > 0 {
|
||||
return nil, endargs.Error("Arguments not allowed here.", nil)
|
||||
}
|
||||
|
||||
if wrapper.Endtag == "empty" {
|
||||
// if there's an else in the if-statement, we need the else-Block as well
|
||||
wrapper, endargs, err = doc.WrapUntilTag("endfor")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
forNode.emptyWrapper = wrapper
|
||||
|
||||
if endargs.Count() > 0 {
|
||||
return nil, endargs.Error("Arguments not allowed here.", nil)
|
||||
}
|
||||
}
|
||||
|
||||
return forNode, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterTag("for", tagForParser)
|
||||
}
|
76
vendor/github.com/flosch/pongo2/tags_if.go
generated
vendored
76
vendor/github.com/flosch/pongo2/tags_if.go
generated
vendored
|
@ -1,76 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
type tagIfNode struct {
|
||||
conditions []IEvaluator
|
||||
wrappers []*NodeWrapper
|
||||
}
|
||||
|
||||
func (node *tagIfNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
for i, condition := range node.conditions {
|
||||
result, err := condition.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if result.IsTrue() {
|
||||
return node.wrappers[i].Execute(ctx, writer)
|
||||
}
|
||||
// Last condition?
|
||||
if len(node.conditions) == i+1 && len(node.wrappers) > i+1 {
|
||||
return node.wrappers[i+1].Execute(ctx, writer)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func tagIfParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
|
||||
ifNode := &tagIfNode{}
|
||||
|
||||
// Parse first and main IF condition
|
||||
condition, err := arguments.ParseExpression()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ifNode.conditions = append(ifNode.conditions, condition)
|
||||
|
||||
if arguments.Remaining() > 0 {
|
||||
return nil, arguments.Error("If-condition is malformed.", nil)
|
||||
}
|
||||
|
||||
// Check the rest
|
||||
for {
|
||||
wrapper, tagArgs, err := doc.WrapUntilTag("elif", "else", "endif")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ifNode.wrappers = append(ifNode.wrappers, wrapper)
|
||||
|
||||
if wrapper.Endtag == "elif" {
|
||||
// elif can take a condition
|
||||
condition, err = tagArgs.ParseExpression()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ifNode.conditions = append(ifNode.conditions, condition)
|
||||
|
||||
if tagArgs.Remaining() > 0 {
|
||||
return nil, tagArgs.Error("Elif-condition is malformed.", nil)
|
||||
}
|
||||
} else {
|
||||
if tagArgs.Count() > 0 {
|
||||
// else/endif can't take any conditions
|
||||
return nil, tagArgs.Error("Arguments not allowed here.", nil)
|
||||
}
|
||||
}
|
||||
|
||||
if wrapper.Endtag == "endif" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return ifNode, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterTag("if", tagIfParser)
|
||||
}
|
116
vendor/github.com/flosch/pongo2/tags_ifchanged.go
generated
vendored
116
vendor/github.com/flosch/pongo2/tags_ifchanged.go
generated
vendored
|
@ -1,116 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
)
|
||||
|
||||
type tagIfchangedNode struct {
|
||||
watchedExpr []IEvaluator
|
||||
lastValues []*Value
|
||||
lastContent []byte
|
||||
thenWrapper *NodeWrapper
|
||||
elseWrapper *NodeWrapper
|
||||
}
|
||||
|
||||
func (node *tagIfchangedNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
if len(node.watchedExpr) == 0 {
|
||||
// Check against own rendered body
|
||||
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB
|
||||
err := node.thenWrapper.Execute(ctx, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bufBytes := buf.Bytes()
|
||||
if !bytes.Equal(node.lastContent, bufBytes) {
|
||||
// Rendered content changed, output it
|
||||
writer.Write(bufBytes)
|
||||
node.lastContent = bufBytes
|
||||
}
|
||||
} else {
|
||||
nowValues := make([]*Value, 0, len(node.watchedExpr))
|
||||
for _, expr := range node.watchedExpr {
|
||||
val, err := expr.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nowValues = append(nowValues, val)
|
||||
}
|
||||
|
||||
// Compare old to new values now
|
||||
changed := len(node.lastValues) == 0
|
||||
|
||||
for idx, oldVal := range node.lastValues {
|
||||
if !oldVal.EqualValueTo(nowValues[idx]) {
|
||||
changed = true
|
||||
break // we can stop here because ONE value changed
|
||||
}
|
||||
}
|
||||
|
||||
node.lastValues = nowValues
|
||||
|
||||
if changed {
|
||||
// Render thenWrapper
|
||||
err := node.thenWrapper.Execute(ctx, writer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Render elseWrapper
|
||||
err := node.elseWrapper.Execute(ctx, writer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func tagIfchangedParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
|
||||
ifchangedNode := &tagIfchangedNode{}
|
||||
|
||||
for arguments.Remaining() > 0 {
|
||||
// Parse condition
|
||||
expr, err := arguments.ParseExpression()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ifchangedNode.watchedExpr = append(ifchangedNode.watchedExpr, expr)
|
||||
}
|
||||
|
||||
if arguments.Remaining() > 0 {
|
||||
return nil, arguments.Error("Ifchanged-arguments are malformed.", nil)
|
||||
}
|
||||
|
||||
// Wrap then/else-blocks
|
||||
wrapper, endargs, err := doc.WrapUntilTag("else", "endifchanged")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ifchangedNode.thenWrapper = wrapper
|
||||
|
||||
if endargs.Count() > 0 {
|
||||
return nil, endargs.Error("Arguments not allowed here.", nil)
|
||||
}
|
||||
|
||||
if wrapper.Endtag == "else" {
|
||||
// if there's an else in the if-statement, we need the else-Block as well
|
||||
wrapper, endargs, err = doc.WrapUntilTag("endifchanged")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ifchangedNode.elseWrapper = wrapper
|
||||
|
||||
if endargs.Count() > 0 {
|
||||
return nil, endargs.Error("Arguments not allowed here.", nil)
|
||||
}
|
||||
}
|
||||
|
||||
return ifchangedNode, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterTag("ifchanged", tagIfchangedParser)
|
||||
}
|
78
vendor/github.com/flosch/pongo2/tags_ifequal.go
generated
vendored
78
vendor/github.com/flosch/pongo2/tags_ifequal.go
generated
vendored
|
@ -1,78 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
type tagIfEqualNode struct {
|
||||
var1, var2 IEvaluator
|
||||
thenWrapper *NodeWrapper
|
||||
elseWrapper *NodeWrapper
|
||||
}
|
||||
|
||||
func (node *tagIfEqualNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
r1, err := node.var1.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r2, err := node.var2.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result := r1.EqualValueTo(r2)
|
||||
|
||||
if result {
|
||||
return node.thenWrapper.Execute(ctx, writer)
|
||||
}
|
||||
if node.elseWrapper != nil {
|
||||
return node.elseWrapper.Execute(ctx, writer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func tagIfEqualParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
|
||||
ifequalNode := &tagIfEqualNode{}
|
||||
|
||||
// Parse two expressions
|
||||
var1, err := arguments.ParseExpression()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var2, err := arguments.ParseExpression()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ifequalNode.var1 = var1
|
||||
ifequalNode.var2 = var2
|
||||
|
||||
if arguments.Remaining() > 0 {
|
||||
return nil, arguments.Error("ifequal only takes 2 arguments.", nil)
|
||||
}
|
||||
|
||||
// Wrap then/else-blocks
|
||||
wrapper, endargs, err := doc.WrapUntilTag("else", "endifequal")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ifequalNode.thenWrapper = wrapper
|
||||
|
||||
if endargs.Count() > 0 {
|
||||
return nil, endargs.Error("Arguments not allowed here.", nil)
|
||||
}
|
||||
|
||||
if wrapper.Endtag == "else" {
|
||||
// if there's an else in the if-statement, we need the else-Block as well
|
||||
wrapper, endargs, err = doc.WrapUntilTag("endifequal")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ifequalNode.elseWrapper = wrapper
|
||||
|
||||
if endargs.Count() > 0 {
|
||||
return nil, endargs.Error("Arguments not allowed here.", nil)
|
||||
}
|
||||
}
|
||||
|
||||
return ifequalNode, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterTag("ifequal", tagIfEqualParser)
|
||||
}
|
78
vendor/github.com/flosch/pongo2/tags_ifnotequal.go
generated
vendored
78
vendor/github.com/flosch/pongo2/tags_ifnotequal.go
generated
vendored
|
@ -1,78 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
type tagIfNotEqualNode struct {
|
||||
var1, var2 IEvaluator
|
||||
thenWrapper *NodeWrapper
|
||||
elseWrapper *NodeWrapper
|
||||
}
|
||||
|
||||
func (node *tagIfNotEqualNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
r1, err := node.var1.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r2, err := node.var2.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result := !r1.EqualValueTo(r2)
|
||||
|
||||
if result {
|
||||
return node.thenWrapper.Execute(ctx, writer)
|
||||
}
|
||||
if node.elseWrapper != nil {
|
||||
return node.elseWrapper.Execute(ctx, writer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func tagIfNotEqualParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
|
||||
ifnotequalNode := &tagIfNotEqualNode{}
|
||||
|
||||
// Parse two expressions
|
||||
var1, err := arguments.ParseExpression()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var2, err := arguments.ParseExpression()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ifnotequalNode.var1 = var1
|
||||
ifnotequalNode.var2 = var2
|
||||
|
||||
if arguments.Remaining() > 0 {
|
||||
return nil, arguments.Error("ifequal only takes 2 arguments.", nil)
|
||||
}
|
||||
|
||||
// Wrap then/else-blocks
|
||||
wrapper, endargs, err := doc.WrapUntilTag("else", "endifnotequal")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ifnotequalNode.thenWrapper = wrapper
|
||||
|
||||
if endargs.Count() > 0 {
|
||||
return nil, endargs.Error("Arguments not allowed here.", nil)
|
||||
}
|
||||
|
||||
if wrapper.Endtag == "else" {
|
||||
// if there's an else in the if-statement, we need the else-Block as well
|
||||
wrapper, endargs, err = doc.WrapUntilTag("endifnotequal")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ifnotequalNode.elseWrapper = wrapper
|
||||
|
||||
if endargs.Count() > 0 {
|
||||
return nil, endargs.Error("Arguments not allowed here.", nil)
|
||||
}
|
||||
}
|
||||
|
||||
return ifnotequalNode, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterTag("ifnotequal", tagIfNotEqualParser)
|
||||
}
|
84
vendor/github.com/flosch/pongo2/tags_import.go
generated
vendored
84
vendor/github.com/flosch/pongo2/tags_import.go
generated
vendored
|
@ -1,84 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type tagImportNode struct {
|
||||
position *Token
|
||||
filename string
|
||||
macros map[string]*tagMacroNode // alias/name -> macro instance
|
||||
}
|
||||
|
||||
func (node *tagImportNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
for name, macro := range node.macros {
|
||||
func(name string, macro *tagMacroNode) {
|
||||
ctx.Private[name] = func(args ...*Value) *Value {
|
||||
return macro.call(ctx, args...)
|
||||
}
|
||||
}(name, macro)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func tagImportParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
|
||||
importNode := &tagImportNode{
|
||||
position: start,
|
||||
macros: make(map[string]*tagMacroNode),
|
||||
}
|
||||
|
||||
filenameToken := arguments.MatchType(TokenString)
|
||||
if filenameToken == nil {
|
||||
return nil, arguments.Error("Import-tag needs a filename as string.", nil)
|
||||
}
|
||||
|
||||
importNode.filename = doc.template.set.resolveFilename(doc.template, filenameToken.Val)
|
||||
|
||||
if arguments.Remaining() == 0 {
|
||||
return nil, arguments.Error("You must at least specify one macro to import.", nil)
|
||||
}
|
||||
|
||||
// Compile the given template
|
||||
tpl, err := doc.template.set.FromFile(importNode.filename)
|
||||
if err != nil {
|
||||
return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, start)
|
||||
}
|
||||
|
||||
for arguments.Remaining() > 0 {
|
||||
macroNameToken := arguments.MatchType(TokenIdentifier)
|
||||
if macroNameToken == nil {
|
||||
return nil, arguments.Error("Expected macro name (identifier).", nil)
|
||||
}
|
||||
|
||||
asName := macroNameToken.Val
|
||||
if arguments.Match(TokenKeyword, "as") != nil {
|
||||
aliasToken := arguments.MatchType(TokenIdentifier)
|
||||
if aliasToken == nil {
|
||||
return nil, arguments.Error("Expected macro alias name (identifier).", nil)
|
||||
}
|
||||
asName = aliasToken.Val
|
||||
}
|
||||
|
||||
macroInstance, has := tpl.exportedMacros[macroNameToken.Val]
|
||||
if !has {
|
||||
return nil, arguments.Error(fmt.Sprintf("Macro '%s' not found (or not exported) in '%s'.", macroNameToken.Val,
|
||||
importNode.filename), macroNameToken)
|
||||
}
|
||||
|
||||
importNode.macros[asName] = macroInstance
|
||||
|
||||
if arguments.Remaining() == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if arguments.Match(TokenSymbol, ",") == nil {
|
||||
return nil, arguments.Error("Expected ','.", nil)
|
||||
}
|
||||
}
|
||||
|
||||
return importNode, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterTag("import", tagImportParser)
|
||||
}
|
146
vendor/github.com/flosch/pongo2/tags_include.go
generated
vendored
146
vendor/github.com/flosch/pongo2/tags_include.go
generated
vendored
|
@ -1,146 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
type tagIncludeNode struct {
|
||||
tpl *Template
|
||||
filenameEvaluator IEvaluator
|
||||
lazy bool
|
||||
only bool
|
||||
filename string
|
||||
withPairs map[string]IEvaluator
|
||||
ifExists bool
|
||||
}
|
||||
|
||||
func (node *tagIncludeNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
// Building the context for the template
|
||||
includeCtx := make(Context)
|
||||
|
||||
// Fill the context with all data from the parent
|
||||
if !node.only {
|
||||
includeCtx.Update(ctx.Public)
|
||||
includeCtx.Update(ctx.Private)
|
||||
}
|
||||
|
||||
// Put all custom with-pairs into the context
|
||||
for key, value := range node.withPairs {
|
||||
val, err := value.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
includeCtx[key] = val
|
||||
}
|
||||
|
||||
// Execute the template
|
||||
if node.lazy {
|
||||
// Evaluate the filename
|
||||
filename, err := node.filenameEvaluator.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if filename.String() == "" {
|
||||
return ctx.Error("Filename for 'include'-tag evaluated to an empty string.", nil)
|
||||
}
|
||||
|
||||
// Get include-filename
|
||||
includedFilename := ctx.template.set.resolveFilename(ctx.template, filename.String())
|
||||
|
||||
includedTpl, err2 := ctx.template.set.FromFile(includedFilename)
|
||||
if err2 != nil {
|
||||
// if this is ReadFile error, and "if_exists" flag is enabled
|
||||
if node.ifExists && err2.(*Error).Sender == "fromfile" {
|
||||
return nil
|
||||
}
|
||||
return err2.(*Error)
|
||||
}
|
||||
err2 = includedTpl.ExecuteWriter(includeCtx, writer)
|
||||
if err2 != nil {
|
||||
return err2.(*Error)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Template is already parsed with static filename
|
||||
err := node.tpl.ExecuteWriter(includeCtx, writer)
|
||||
if err != nil {
|
||||
return err.(*Error)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type tagIncludeEmptyNode struct{}
|
||||
|
||||
func (node *tagIncludeEmptyNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func tagIncludeParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
|
||||
includeNode := &tagIncludeNode{
|
||||
withPairs: make(map[string]IEvaluator),
|
||||
}
|
||||
|
||||
if filenameToken := arguments.MatchType(TokenString); filenameToken != nil {
|
||||
// prepared, static template
|
||||
|
||||
// "if_exists" flag
|
||||
ifExists := arguments.Match(TokenIdentifier, "if_exists") != nil
|
||||
|
||||
// Get include-filename
|
||||
includedFilename := doc.template.set.resolveFilename(doc.template, filenameToken.Val)
|
||||
|
||||
// Parse the parent
|
||||
includeNode.filename = includedFilename
|
||||
includedTpl, err := doc.template.set.FromFile(includedFilename)
|
||||
if err != nil {
|
||||
// if this is ReadFile error, and "if_exists" token presents we should create and empty node
|
||||
if err.(*Error).Sender == "fromfile" && ifExists {
|
||||
return &tagIncludeEmptyNode{}, nil
|
||||
}
|
||||
return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, filenameToken)
|
||||
}
|
||||
includeNode.tpl = includedTpl
|
||||
} else {
|
||||
// No String, then the user wants to use lazy-evaluation (slower, but possible)
|
||||
filenameEvaluator, err := arguments.ParseExpression()
|
||||
if err != nil {
|
||||
return nil, err.updateFromTokenIfNeeded(doc.template, filenameToken)
|
||||
}
|
||||
includeNode.filenameEvaluator = filenameEvaluator
|
||||
includeNode.lazy = true
|
||||
includeNode.ifExists = arguments.Match(TokenIdentifier, "if_exists") != nil // "if_exists" flag
|
||||
}
|
||||
|
||||
// After having parsed the filename we're gonna parse the with+only options
|
||||
if arguments.Match(TokenIdentifier, "with") != nil {
|
||||
for arguments.Remaining() > 0 {
|
||||
// We have at least one key=expr pair (because of starting "with")
|
||||
keyToken := arguments.MatchType(TokenIdentifier)
|
||||
if keyToken == nil {
|
||||
return nil, arguments.Error("Expected an identifier", nil)
|
||||
}
|
||||
if arguments.Match(TokenSymbol, "=") == nil {
|
||||
return nil, arguments.Error("Expected '='.", nil)
|
||||
}
|
||||
valueExpr, err := arguments.ParseExpression()
|
||||
if err != nil {
|
||||
return nil, err.updateFromTokenIfNeeded(doc.template, keyToken)
|
||||
}
|
||||
|
||||
includeNode.withPairs[keyToken.Val] = valueExpr
|
||||
|
||||
// Only?
|
||||
if arguments.Match(TokenIdentifier, "only") != nil {
|
||||
includeNode.only = true
|
||||
break // stop parsing arguments because it's the last option
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if arguments.Remaining() > 0 {
|
||||
return nil, arguments.Error("Malformed 'include'-tag arguments.", nil)
|
||||
}
|
||||
|
||||
return includeNode, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterTag("include", tagIncludeParser)
|
||||
}
|
133
vendor/github.com/flosch/pongo2/tags_lorem.go
generated
vendored
133
vendor/github.com/flosch/pongo2/tags_lorem.go
generated
vendored
|
@ -1,133 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/juju/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
tagLoremParagraphs = strings.Split(tagLoremText, "\n")
|
||||
tagLoremWords = strings.Fields(tagLoremText)
|
||||
)
|
||||
|
||||
type tagLoremNode struct {
|
||||
position *Token
|
||||
count int // number of paragraphs
|
||||
method string // w = words, p = HTML paragraphs, b = plain-text (default is b)
|
||||
random bool // does not use the default paragraph "Lorem ipsum dolor sit amet, ..."
|
||||
}
|
||||
|
||||
func (node *tagLoremNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
switch node.method {
|
||||
case "b":
|
||||
if node.random {
|
||||
for i := 0; i < node.count; i++ {
|
||||
if i > 0 {
|
||||
writer.WriteString("\n")
|
||||
}
|
||||
par := tagLoremParagraphs[rand.Intn(len(tagLoremParagraphs))]
|
||||
writer.WriteString(par)
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < node.count; i++ {
|
||||
if i > 0 {
|
||||
writer.WriteString("\n")
|
||||
}
|
||||
par := tagLoremParagraphs[i%len(tagLoremParagraphs)]
|
||||
writer.WriteString(par)
|
||||
}
|
||||
}
|
||||
case "w":
|
||||
if node.random {
|
||||
for i := 0; i < node.count; i++ {
|
||||
if i > 0 {
|
||||
writer.WriteString(" ")
|
||||
}
|
||||
word := tagLoremWords[rand.Intn(len(tagLoremWords))]
|
||||
writer.WriteString(word)
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < node.count; i++ {
|
||||
if i > 0 {
|
||||
writer.WriteString(" ")
|
||||
}
|
||||
word := tagLoremWords[i%len(tagLoremWords)]
|
||||
writer.WriteString(word)
|
||||
}
|
||||
}
|
||||
case "p":
|
||||
if node.random {
|
||||
for i := 0; i < node.count; i++ {
|
||||
if i > 0 {
|
||||
writer.WriteString("\n")
|
||||
}
|
||||
writer.WriteString("<p>")
|
||||
par := tagLoremParagraphs[rand.Intn(len(tagLoremParagraphs))]
|
||||
writer.WriteString(par)
|
||||
writer.WriteString("</p>")
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < node.count; i++ {
|
||||
if i > 0 {
|
||||
writer.WriteString("\n")
|
||||
}
|
||||
writer.WriteString("<p>")
|
||||
par := tagLoremParagraphs[i%len(tagLoremParagraphs)]
|
||||
writer.WriteString(par)
|
||||
writer.WriteString("</p>")
|
||||
|
||||
}
|
||||
}
|
||||
default:
|
||||
return ctx.OrigError(errors.Errorf("unsupported method: %s", node.method), nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func tagLoremParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
|
||||
loremNode := &tagLoremNode{
|
||||
position: start,
|
||||
count: 1,
|
||||
method: "b",
|
||||
}
|
||||
|
||||
if countToken := arguments.MatchType(TokenNumber); countToken != nil {
|
||||
loremNode.count = AsValue(countToken.Val).Integer()
|
||||
}
|
||||
|
||||
if methodToken := arguments.MatchType(TokenIdentifier); methodToken != nil {
|
||||
if methodToken.Val != "w" && methodToken.Val != "p" && methodToken.Val != "b" {
|
||||
return nil, arguments.Error("lorem-method must be either 'w', 'p' or 'b'.", nil)
|
||||
}
|
||||
|
||||
loremNode.method = methodToken.Val
|
||||
}
|
||||
|
||||
if arguments.MatchOne(TokenIdentifier, "random") != nil {
|
||||
loremNode.random = true
|
||||
}
|
||||
|
||||
if arguments.Remaining() > 0 {
|
||||
return nil, arguments.Error("Malformed lorem-tag arguments.", nil)
|
||||
}
|
||||
|
||||
return loremNode, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().Unix())
|
||||
|
||||
RegisterTag("lorem", tagLoremParser)
|
||||
}
|
||||
|
||||
const tagLoremText = `Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
|
||||
Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat.
|
||||
Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat. Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi.
|
||||
Nam liber tempor cum soluta nobis eleifend option congue nihil imperdiet doming id quod mazim placerat facer possim assum. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat.
|
||||
Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis.
|
||||
At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, At accusam aliquyam diam diam dolore dolores duo eirmod eos erat, et nonumy sed tempor et et invidunt justo labore Stet clita ea et gubergren, kasd magna no rebum. sanctus sea sed takimata ut vero voluptua. est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat.
|
||||
Consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.`
|
149
vendor/github.com/flosch/pongo2/tags_macro.go
generated
vendored
149
vendor/github.com/flosch/pongo2/tags_macro.go
generated
vendored
|
@ -1,149 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type tagMacroNode struct {
|
||||
position *Token
|
||||
name string
|
||||
argsOrder []string
|
||||
args map[string]IEvaluator
|
||||
exported bool
|
||||
|
||||
wrapper *NodeWrapper
|
||||
}
|
||||
|
||||
func (node *tagMacroNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
ctx.Private[node.name] = func(args ...*Value) *Value {
|
||||
return node.call(ctx, args...)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (node *tagMacroNode) call(ctx *ExecutionContext, args ...*Value) *Value {
|
||||
argsCtx := make(Context)
|
||||
|
||||
for k, v := range node.args {
|
||||
if v == nil {
|
||||
// User did not provided a default value
|
||||
argsCtx[k] = nil
|
||||
} else {
|
||||
// Evaluate the default value
|
||||
valueExpr, err := v.Evaluate(ctx)
|
||||
if err != nil {
|
||||
ctx.Logf(err.Error())
|
||||
return AsSafeValue(err.Error())
|
||||
}
|
||||
|
||||
argsCtx[k] = valueExpr
|
||||
}
|
||||
}
|
||||
|
||||
if len(args) > len(node.argsOrder) {
|
||||
// Too many arguments, we're ignoring them and just logging into debug mode.
|
||||
err := ctx.Error(fmt.Sprintf("Macro '%s' called with too many arguments (%d instead of %d).",
|
||||
node.name, len(args), len(node.argsOrder)), nil).updateFromTokenIfNeeded(ctx.template, node.position)
|
||||
|
||||
ctx.Logf(err.Error()) // TODO: This is a workaround, because the error is not returned yet to the Execution()-methods
|
||||
return AsSafeValue(err.Error())
|
||||
}
|
||||
|
||||
// Make a context for the macro execution
|
||||
macroCtx := NewChildExecutionContext(ctx)
|
||||
|
||||
// Register all arguments in the private context
|
||||
macroCtx.Private.Update(argsCtx)
|
||||
|
||||
for idx, argValue := range args {
|
||||
macroCtx.Private[node.argsOrder[idx]] = argValue.Interface()
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
err := node.wrapper.Execute(macroCtx, &b)
|
||||
if err != nil {
|
||||
return AsSafeValue(err.updateFromTokenIfNeeded(ctx.template, node.position).Error())
|
||||
}
|
||||
|
||||
return AsSafeValue(b.String())
|
||||
}
|
||||
|
||||
func tagMacroParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
|
||||
macroNode := &tagMacroNode{
|
||||
position: start,
|
||||
args: make(map[string]IEvaluator),
|
||||
}
|
||||
|
||||
nameToken := arguments.MatchType(TokenIdentifier)
|
||||
if nameToken == nil {
|
||||
return nil, arguments.Error("Macro-tag needs at least an identifier as name.", nil)
|
||||
}
|
||||
macroNode.name = nameToken.Val
|
||||
|
||||
if arguments.MatchOne(TokenSymbol, "(") == nil {
|
||||
return nil, arguments.Error("Expected '('.", nil)
|
||||
}
|
||||
|
||||
for arguments.Match(TokenSymbol, ")") == nil {
|
||||
argNameToken := arguments.MatchType(TokenIdentifier)
|
||||
if argNameToken == nil {
|
||||
return nil, arguments.Error("Expected argument name as identifier.", nil)
|
||||
}
|
||||
macroNode.argsOrder = append(macroNode.argsOrder, argNameToken.Val)
|
||||
|
||||
if arguments.Match(TokenSymbol, "=") != nil {
|
||||
// Default expression follows
|
||||
argDefaultExpr, err := arguments.ParseExpression()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
macroNode.args[argNameToken.Val] = argDefaultExpr
|
||||
} else {
|
||||
// No default expression
|
||||
macroNode.args[argNameToken.Val] = nil
|
||||
}
|
||||
|
||||
if arguments.Match(TokenSymbol, ")") != nil {
|
||||
break
|
||||
}
|
||||
if arguments.Match(TokenSymbol, ",") == nil {
|
||||
return nil, arguments.Error("Expected ',' or ')'.", nil)
|
||||
}
|
||||
}
|
||||
|
||||
if arguments.Match(TokenKeyword, "export") != nil {
|
||||
macroNode.exported = true
|
||||
}
|
||||
|
||||
if arguments.Remaining() > 0 {
|
||||
return nil, arguments.Error("Malformed macro-tag.", nil)
|
||||
}
|
||||
|
||||
// Body wrapping
|
||||
wrapper, endargs, err := doc.WrapUntilTag("endmacro")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
macroNode.wrapper = wrapper
|
||||
|
||||
if endargs.Count() > 0 {
|
||||
return nil, endargs.Error("Arguments not allowed here.", nil)
|
||||
}
|
||||
|
||||
if macroNode.exported {
|
||||
// Now register the macro if it wants to be exported
|
||||
_, has := doc.template.exportedMacros[macroNode.name]
|
||||
if has {
|
||||
return nil, doc.Error(fmt.Sprintf("another macro with name '%s' already exported", macroNode.name), start)
|
||||
}
|
||||
doc.template.exportedMacros[macroNode.name] = macroNode
|
||||
}
|
||||
|
||||
return macroNode, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterTag("macro", tagMacroParser)
|
||||
}
|
50
vendor/github.com/flosch/pongo2/tags_now.go
generated
vendored
50
vendor/github.com/flosch/pongo2/tags_now.go
generated
vendored
|
@ -1,50 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type tagNowNode struct {
|
||||
position *Token
|
||||
format string
|
||||
fake bool
|
||||
}
|
||||
|
||||
func (node *tagNowNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
var t time.Time
|
||||
if node.fake {
|
||||
t = time.Date(2014, time.February, 05, 18, 31, 45, 00, time.UTC)
|
||||
} else {
|
||||
t = time.Now()
|
||||
}
|
||||
|
||||
writer.WriteString(t.Format(node.format))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func tagNowParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
|
||||
nowNode := &tagNowNode{
|
||||
position: start,
|
||||
}
|
||||
|
||||
formatToken := arguments.MatchType(TokenString)
|
||||
if formatToken == nil {
|
||||
return nil, arguments.Error("Expected a format string.", nil)
|
||||
}
|
||||
nowNode.format = formatToken.Val
|
||||
|
||||
if arguments.MatchOne(TokenIdentifier, "fake") != nil {
|
||||
nowNode.fake = true
|
||||
}
|
||||
|
||||
if arguments.Remaining() > 0 {
|
||||
return nil, arguments.Error("Malformed now-tag arguments.", nil)
|
||||
}
|
||||
|
||||
return nowNode, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterTag("now", tagNowParser)
|
||||
}
|
50
vendor/github.com/flosch/pongo2/tags_set.go
generated
vendored
50
vendor/github.com/flosch/pongo2/tags_set.go
generated
vendored
|
@ -1,50 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
type tagSetNode struct {
|
||||
name string
|
||||
expression IEvaluator
|
||||
}
|
||||
|
||||
func (node *tagSetNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
// Evaluate expression
|
||||
value, err := node.expression.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx.Private[node.name] = value
|
||||
return nil
|
||||
}
|
||||
|
||||
func tagSetParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
|
||||
node := &tagSetNode{}
|
||||
|
||||
// Parse variable name
|
||||
typeToken := arguments.MatchType(TokenIdentifier)
|
||||
if typeToken == nil {
|
||||
return nil, arguments.Error("Expected an identifier.", nil)
|
||||
}
|
||||
node.name = typeToken.Val
|
||||
|
||||
if arguments.Match(TokenSymbol, "=") == nil {
|
||||
return nil, arguments.Error("Expected '='.", nil)
|
||||
}
|
||||
|
||||
// Variable expression
|
||||
keyExpression, err := arguments.ParseExpression()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
node.expression = keyExpression
|
||||
|
||||
// Remaining arguments
|
||||
if arguments.Remaining() > 0 {
|
||||
return nil, arguments.Error("Malformed 'set'-tag arguments.", nil)
|
||||
}
|
||||
|
||||
return node, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterTag("set", tagSetParser)
|
||||
}
|
54
vendor/github.com/flosch/pongo2/tags_spaceless.go
generated
vendored
54
vendor/github.com/flosch/pongo2/tags_spaceless.go
generated
vendored
|
@ -1,54 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
type tagSpacelessNode struct {
|
||||
wrapper *NodeWrapper
|
||||
}
|
||||
|
||||
var tagSpacelessRegexp = regexp.MustCompile(`(?U:(<.*>))([\t\n\v\f\r ]+)(?U:(<.*>))`)
|
||||
|
||||
func (node *tagSpacelessNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
b := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB
|
||||
|
||||
err := node.wrapper.Execute(ctx, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s := b.String()
|
||||
// Repeat this recursively
|
||||
changed := true
|
||||
for changed {
|
||||
s2 := tagSpacelessRegexp.ReplaceAllString(s, "$1$3")
|
||||
changed = s != s2
|
||||
s = s2
|
||||
}
|
||||
|
||||
writer.WriteString(s)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func tagSpacelessParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
|
||||
spacelessNode := &tagSpacelessNode{}
|
||||
|
||||
wrapper, _, err := doc.WrapUntilTag("endspaceless")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
spacelessNode.wrapper = wrapper
|
||||
|
||||
if arguments.Remaining() > 0 {
|
||||
return nil, arguments.Error("Malformed spaceless-tag arguments.", nil)
|
||||
}
|
||||
|
||||
return spacelessNode, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterTag("spaceless", tagSpacelessParser)
|
||||
}
|
68
vendor/github.com/flosch/pongo2/tags_ssi.go
generated
vendored
68
vendor/github.com/flosch/pongo2/tags_ssi.go
generated
vendored
|
@ -1,68 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
type tagSSINode struct {
|
||||
filename string
|
||||
content string
|
||||
template *Template
|
||||
}
|
||||
|
||||
func (node *tagSSINode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
if node.template != nil {
|
||||
// Execute the template within the current context
|
||||
includeCtx := make(Context)
|
||||
includeCtx.Update(ctx.Public)
|
||||
includeCtx.Update(ctx.Private)
|
||||
|
||||
err := node.template.execute(includeCtx, writer)
|
||||
if err != nil {
|
||||
return err.(*Error)
|
||||
}
|
||||
} else {
|
||||
// Just print out the content
|
||||
writer.WriteString(node.content)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func tagSSIParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
|
||||
SSINode := &tagSSINode{}
|
||||
|
||||
if fileToken := arguments.MatchType(TokenString); fileToken != nil {
|
||||
SSINode.filename = fileToken.Val
|
||||
|
||||
if arguments.Match(TokenIdentifier, "parsed") != nil {
|
||||
// parsed
|
||||
temporaryTpl, err := doc.template.set.FromFile(doc.template.set.resolveFilename(doc.template, fileToken.Val))
|
||||
if err != nil {
|
||||
return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, fileToken)
|
||||
}
|
||||
SSINode.template = temporaryTpl
|
||||
} else {
|
||||
// plaintext
|
||||
buf, err := ioutil.ReadFile(doc.template.set.resolveFilename(doc.template, fileToken.Val))
|
||||
if err != nil {
|
||||
return nil, (&Error{
|
||||
Sender: "tag:ssi",
|
||||
OrigError: err,
|
||||
}).updateFromTokenIfNeeded(doc.template, fileToken)
|
||||
}
|
||||
SSINode.content = string(buf)
|
||||
}
|
||||
} else {
|
||||
return nil, arguments.Error("First argument must be a string.", nil)
|
||||
}
|
||||
|
||||
if arguments.Remaining() > 0 {
|
||||
return nil, arguments.Error("Malformed SSI-tag argument.", nil)
|
||||
}
|
||||
|
||||
return SSINode, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterTag("ssi", tagSSIParser)
|
||||
}
|
45
vendor/github.com/flosch/pongo2/tags_templatetag.go
generated
vendored
45
vendor/github.com/flosch/pongo2/tags_templatetag.go
generated
vendored
|
@ -1,45 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
type tagTemplateTagNode struct {
|
||||
content string
|
||||
}
|
||||
|
||||
var templateTagMapping = map[string]string{
|
||||
"openblock": "{%",
|
||||
"closeblock": "%}",
|
||||
"openvariable": "{{",
|
||||
"closevariable": "}}",
|
||||
"openbrace": "{",
|
||||
"closebrace": "}",
|
||||
"opencomment": "{#",
|
||||
"closecomment": "#}",
|
||||
}
|
||||
|
||||
func (node *tagTemplateTagNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
writer.WriteString(node.content)
|
||||
return nil
|
||||
}
|
||||
|
||||
func tagTemplateTagParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
|
||||
ttNode := &tagTemplateTagNode{}
|
||||
|
||||
if argToken := arguments.MatchType(TokenIdentifier); argToken != nil {
|
||||
output, found := templateTagMapping[argToken.Val]
|
||||
if !found {
|
||||
return nil, arguments.Error("Argument not found", argToken)
|
||||
}
|
||||
ttNode.content = output
|
||||
} else {
|
||||
return nil, arguments.Error("Identifier expected.", nil)
|
||||
}
|
||||
|
||||
if arguments.Remaining() > 0 {
|
||||
return nil, arguments.Error("Malformed templatetag-tag argument.", nil)
|
||||
}
|
||||
|
||||
return ttNode, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterTag("templatetag", tagTemplateTagParser)
|
||||
}
|
83
vendor/github.com/flosch/pongo2/tags_widthratio.go
generated
vendored
83
vendor/github.com/flosch/pongo2/tags_widthratio.go
generated
vendored
|
@ -1,83 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
)
|
||||
|
||||
type tagWidthratioNode struct {
|
||||
position *Token
|
||||
current, max IEvaluator
|
||||
width IEvaluator
|
||||
ctxName string
|
||||
}
|
||||
|
||||
func (node *tagWidthratioNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
current, err := node.current.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
max, err := node.max.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
width, err := node.width.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
value := int(math.Ceil(current.Float()/max.Float()*width.Float() + 0.5))
|
||||
|
||||
if node.ctxName == "" {
|
||||
writer.WriteString(fmt.Sprintf("%d", value))
|
||||
} else {
|
||||
ctx.Private[node.ctxName] = value
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func tagWidthratioParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
|
||||
widthratioNode := &tagWidthratioNode{
|
||||
position: start,
|
||||
}
|
||||
|
||||
current, err := arguments.ParseExpression()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
widthratioNode.current = current
|
||||
|
||||
max, err := arguments.ParseExpression()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
widthratioNode.max = max
|
||||
|
||||
width, err := arguments.ParseExpression()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
widthratioNode.width = width
|
||||
|
||||
if arguments.MatchOne(TokenKeyword, "as") != nil {
|
||||
// Name follows
|
||||
nameToken := arguments.MatchType(TokenIdentifier)
|
||||
if nameToken == nil {
|
||||
return nil, arguments.Error("Expected name (identifier).", nil)
|
||||
}
|
||||
widthratioNode.ctxName = nameToken.Val
|
||||
}
|
||||
|
||||
if arguments.Remaining() > 0 {
|
||||
return nil, arguments.Error("Malformed widthratio-tag arguments.", nil)
|
||||
}
|
||||
|
||||
return widthratioNode, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterTag("widthratio", tagWidthratioParser)
|
||||
}
|
88
vendor/github.com/flosch/pongo2/tags_with.go
generated
vendored
88
vendor/github.com/flosch/pongo2/tags_with.go
generated
vendored
|
@ -1,88 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
type tagWithNode struct {
|
||||
withPairs map[string]IEvaluator
|
||||
wrapper *NodeWrapper
|
||||
}
|
||||
|
||||
func (node *tagWithNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
//new context for block
|
||||
withctx := NewChildExecutionContext(ctx)
|
||||
|
||||
// Put all custom with-pairs into the context
|
||||
for key, value := range node.withPairs {
|
||||
val, err := value.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
withctx.Private[key] = val
|
||||
}
|
||||
|
||||
return node.wrapper.Execute(withctx, writer)
|
||||
}
|
||||
|
||||
func tagWithParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
|
||||
withNode := &tagWithNode{
|
||||
withPairs: make(map[string]IEvaluator),
|
||||
}
|
||||
|
||||
if arguments.Count() == 0 {
|
||||
return nil, arguments.Error("Tag 'with' requires at least one argument.", nil)
|
||||
}
|
||||
|
||||
wrapper, endargs, err := doc.WrapUntilTag("endwith")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
withNode.wrapper = wrapper
|
||||
|
||||
if endargs.Count() > 0 {
|
||||
return nil, endargs.Error("Arguments not allowed here.", nil)
|
||||
}
|
||||
|
||||
// Scan through all arguments to see which style the user uses (old or new style).
|
||||
// If we find any "as" keyword we will enforce old style; otherwise we will use new style.
|
||||
oldStyle := false // by default we're using the new_style
|
||||
for i := 0; i < arguments.Count(); i++ {
|
||||
if arguments.PeekN(i, TokenKeyword, "as") != nil {
|
||||
oldStyle = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for arguments.Remaining() > 0 {
|
||||
if oldStyle {
|
||||
valueExpr, err := arguments.ParseExpression()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if arguments.Match(TokenKeyword, "as") == nil {
|
||||
return nil, arguments.Error("Expected 'as' keyword.", nil)
|
||||
}
|
||||
keyToken := arguments.MatchType(TokenIdentifier)
|
||||
if keyToken == nil {
|
||||
return nil, arguments.Error("Expected an identifier", nil)
|
||||
}
|
||||
withNode.withPairs[keyToken.Val] = valueExpr
|
||||
} else {
|
||||
keyToken := arguments.MatchType(TokenIdentifier)
|
||||
if keyToken == nil {
|
||||
return nil, arguments.Error("Expected an identifier", nil)
|
||||
}
|
||||
if arguments.Match(TokenSymbol, "=") == nil {
|
||||
return nil, arguments.Error("Expected '='.", nil)
|
||||
}
|
||||
valueExpr, err := arguments.ParseExpression()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
withNode.withPairs[keyToken.Val] = valueExpr
|
||||
}
|
||||
}
|
||||
|
||||
return withNode, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterTag("with", tagWithParser)
|
||||
}
|
194
vendor/github.com/flosch/pongo2/template.go
generated
vendored
194
vendor/github.com/flosch/pongo2/template.go
generated
vendored
|
@ -1,194 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/juju/errors"
|
||||
)
|
||||
|
||||
type TemplateWriter interface {
|
||||
io.Writer
|
||||
WriteString(string) (int, error)
|
||||
}
|
||||
|
||||
type templateWriter struct {
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func (tw *templateWriter) WriteString(s string) (int, error) {
|
||||
return tw.w.Write([]byte(s))
|
||||
}
|
||||
|
||||
func (tw *templateWriter) Write(b []byte) (int, error) {
|
||||
return tw.w.Write(b)
|
||||
}
|
||||
|
||||
type Template struct {
|
||||
set *TemplateSet
|
||||
|
||||
// Input
|
||||
isTplString bool
|
||||
name string
|
||||
tpl string
|
||||
size int
|
||||
|
||||
// Calculation
|
||||
tokens []*Token
|
||||
parser *Parser
|
||||
|
||||
// first come, first serve (it's important to not override existing entries in here)
|
||||
level int
|
||||
parent *Template
|
||||
child *Template
|
||||
blocks map[string]*NodeWrapper
|
||||
exportedMacros map[string]*tagMacroNode
|
||||
|
||||
// Output
|
||||
root *nodeDocument
|
||||
}
|
||||
|
||||
func newTemplateString(set *TemplateSet, tpl []byte) (*Template, error) {
|
||||
return newTemplate(set, "<string>", true, tpl)
|
||||
}
|
||||
|
||||
func newTemplate(set *TemplateSet, name string, isTplString bool, tpl []byte) (*Template, error) {
|
||||
strTpl := string(tpl)
|
||||
|
||||
// Create the template
|
||||
t := &Template{
|
||||
set: set,
|
||||
isTplString: isTplString,
|
||||
name: name,
|
||||
tpl: strTpl,
|
||||
size: len(strTpl),
|
||||
blocks: make(map[string]*NodeWrapper),
|
||||
exportedMacros: make(map[string]*tagMacroNode),
|
||||
}
|
||||
|
||||
// Tokenize it
|
||||
tokens, err := lex(name, strTpl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t.tokens = tokens
|
||||
|
||||
// For debugging purposes, show all tokens:
|
||||
/*for i, t := range tokens {
|
||||
fmt.Printf("%3d. %s\n", i, t)
|
||||
}*/
|
||||
|
||||
// Parse it
|
||||
err = t.parse()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (tpl *Template) execute(context Context, writer TemplateWriter) error {
|
||||
// Determine the parent to be executed (for template inheritance)
|
||||
parent := tpl
|
||||
for parent.parent != nil {
|
||||
parent = parent.parent
|
||||
}
|
||||
|
||||
// Create context if none is given
|
||||
newContext := make(Context)
|
||||
newContext.Update(tpl.set.Globals)
|
||||
|
||||
if context != nil {
|
||||
newContext.Update(context)
|
||||
|
||||
if len(newContext) > 0 {
|
||||
// Check for context name syntax
|
||||
err := newContext.checkForValidIdentifiers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check for clashes with macro names
|
||||
for k := range newContext {
|
||||
_, has := tpl.exportedMacros[k]
|
||||
if has {
|
||||
return &Error{
|
||||
Filename: tpl.name,
|
||||
Sender: "execution",
|
||||
OrigError: errors.Errorf("context key name '%s' clashes with macro '%s'", k, k),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create operational context
|
||||
ctx := newExecutionContext(parent, newContext)
|
||||
|
||||
// Run the selected document
|
||||
if err := parent.root.Execute(ctx, writer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tpl *Template) newTemplateWriterAndExecute(context Context, writer io.Writer) error {
|
||||
return tpl.execute(context, &templateWriter{w: writer})
|
||||
}
|
||||
|
||||
func (tpl *Template) newBufferAndExecute(context Context) (*bytes.Buffer, error) {
|
||||
// Create output buffer
|
||||
// We assume that the rendered template will be 30% larger
|
||||
buffer := bytes.NewBuffer(make([]byte, 0, int(float64(tpl.size)*1.3)))
|
||||
if err := tpl.execute(context, buffer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buffer, nil
|
||||
}
|
||||
|
||||
// Executes the template with the given context and writes to writer (io.Writer)
|
||||
// on success. Context can be nil. Nothing is written on error; instead the error
|
||||
// is being returned.
|
||||
func (tpl *Template) ExecuteWriter(context Context, writer io.Writer) error {
|
||||
buf, err := tpl.newBufferAndExecute(context)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = buf.WriteTo(writer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Same as ExecuteWriter. The only difference between both functions is that
|
||||
// this function might already have written parts of the generated template in the
|
||||
// case of an execution error because there's no intermediate buffer involved for
|
||||
// performance reasons. This is handy if you need high performance template
|
||||
// generation or if you want to manage your own pool of buffers.
|
||||
func (tpl *Template) ExecuteWriterUnbuffered(context Context, writer io.Writer) error {
|
||||
return tpl.newTemplateWriterAndExecute(context, writer)
|
||||
}
|
||||
|
||||
// Executes the template and returns the rendered template as a []byte
|
||||
func (tpl *Template) ExecuteBytes(context Context) ([]byte, error) {
|
||||
// Execute template
|
||||
buffer, err := tpl.newBufferAndExecute(context)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
// Executes the template and returns the rendered template as a string
|
||||
func (tpl *Template) Execute(context Context) (string, error) {
|
||||
// Execute template
|
||||
buffer, err := tpl.newBufferAndExecute(context)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return buffer.String(), nil
|
||||
|
||||
}
|
157
vendor/github.com/flosch/pongo2/template_loader.go
generated
vendored
157
vendor/github.com/flosch/pongo2/template_loader.go
generated
vendored
|
@ -1,157 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/juju/errors"
|
||||
)
|
||||
|
||||
// LocalFilesystemLoader represents a local filesystem loader with basic
|
||||
// BaseDirectory capabilities. The access to the local filesystem is unrestricted.
|
||||
type LocalFilesystemLoader struct {
|
||||
baseDir string
|
||||
}
|
||||
|
||||
// MustNewLocalFileSystemLoader creates a new LocalFilesystemLoader instance
|
||||
// and panics if there's any error during instantiation. The parameters
|
||||
// are the same like NewLocalFileSystemLoader.
|
||||
func MustNewLocalFileSystemLoader(baseDir string) *LocalFilesystemLoader {
|
||||
fs, err := NewLocalFileSystemLoader(baseDir)
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
return fs
|
||||
}
|
||||
|
||||
// NewLocalFileSystemLoader creates a new LocalFilesystemLoader and allows
|
||||
// templatesto be loaded from disk (unrestricted). If any base directory
|
||||
// is given (or being set using SetBaseDir), this base directory is being used
|
||||
// for path calculation in template inclusions/imports. Otherwise the path
|
||||
// is calculated based relatively to the including template's path.
|
||||
func NewLocalFileSystemLoader(baseDir string) (*LocalFilesystemLoader, error) {
|
||||
fs := &LocalFilesystemLoader{}
|
||||
if baseDir != "" {
|
||||
if err := fs.SetBaseDir(baseDir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
// SetBaseDir sets the template's base directory. This directory will
|
||||
// be used for any relative path in filters, tags and From*-functions to determine
|
||||
// your template. See the comment for NewLocalFileSystemLoader as well.
|
||||
func (fs *LocalFilesystemLoader) SetBaseDir(path string) error {
|
||||
// Make the path absolute
|
||||
if !filepath.IsAbs(path) {
|
||||
abs, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path = abs
|
||||
}
|
||||
|
||||
// Check for existence
|
||||
fi, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
return errors.Errorf("The given path '%s' is not a directory.", path)
|
||||
}
|
||||
|
||||
fs.baseDir = path
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get reads the path's content from your local filesystem.
|
||||
func (fs *LocalFilesystemLoader) Get(path string) (io.Reader, error) {
|
||||
buf, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bytes.NewReader(buf), nil
|
||||
}
|
||||
|
||||
// Abs resolves a filename relative to the base directory. Absolute paths are allowed.
|
||||
// When there's no base dir set, the absolute path to the filename
|
||||
// will be calculated based on either the provided base directory (which
|
||||
// might be a path of a template which includes another template) or
|
||||
// the current working directory.
|
||||
func (fs *LocalFilesystemLoader) Abs(base, name string) string {
|
||||
if filepath.IsAbs(name) {
|
||||
return name
|
||||
}
|
||||
|
||||
// Our own base dir has always priority; if there's none
|
||||
// we use the path provided in base.
|
||||
var err error
|
||||
if fs.baseDir == "" {
|
||||
if base == "" {
|
||||
base, err = os.Getwd()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return filepath.Join(base, name)
|
||||
}
|
||||
|
||||
return filepath.Join(filepath.Dir(base), name)
|
||||
}
|
||||
|
||||
return filepath.Join(fs.baseDir, name)
|
||||
}
|
||||
|
||||
// SandboxedFilesystemLoader is still WIP.
|
||||
type SandboxedFilesystemLoader struct {
|
||||
*LocalFilesystemLoader
|
||||
}
|
||||
|
||||
// NewSandboxedFilesystemLoader creates a new sandboxed local file system instance.
|
||||
func NewSandboxedFilesystemLoader(baseDir string) (*SandboxedFilesystemLoader, error) {
|
||||
fs, err := NewLocalFileSystemLoader(baseDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SandboxedFilesystemLoader{
|
||||
LocalFilesystemLoader: fs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Move sandbox to a virtual fs
|
||||
|
||||
/*
|
||||
if len(set.SandboxDirectories) > 0 {
|
||||
defer func() {
|
||||
// Remove any ".." or other crap
|
||||
resolvedPath = filepath.Clean(resolvedPath)
|
||||
|
||||
// Make the path absolute
|
||||
absPath, err := filepath.Abs(resolvedPath)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
resolvedPath = absPath
|
||||
|
||||
// Check against the sandbox directories (once one pattern matches, we're done and can allow it)
|
||||
for _, pattern := range set.SandboxDirectories {
|
||||
matched, err := filepath.Match(pattern, resolvedPath)
|
||||
if err != nil {
|
||||
panic("Wrong sandbox directory match pattern (see http://golang.org/pkg/path/filepath/#Match).")
|
||||
}
|
||||
if matched {
|
||||
// OK!
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// No pattern matched, we have to log+deny the request
|
||||
set.logf("Access attempt outside of the sandbox directories (blocked): '%s'", resolvedPath)
|
||||
resolvedPath = ""
|
||||
}()
|
||||
}
|
||||
*/
|
274
vendor/github.com/flosch/pongo2/template_sets.go
generated
vendored
274
vendor/github.com/flosch/pongo2/template_sets.go
generated
vendored
|
@ -1,274 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/juju/errors"
|
||||
)
|
||||
|
||||
// TemplateLoader allows to implement a virtual file system.
|
||||
type TemplateLoader interface {
|
||||
// Abs calculates the path to a given template. Whenever a path must be resolved
|
||||
// due to an import from another template, the base equals the parent template's path.
|
||||
Abs(base, name string) string
|
||||
|
||||
// Get returns an io.Reader where the template's content can be read from.
|
||||
Get(path string) (io.Reader, error)
|
||||
}
|
||||
|
||||
// TemplateSet allows you to create your own group of templates with their own
|
||||
// global context (which is shared among all members of the set) and their own
|
||||
// configuration.
|
||||
// It's useful for a separation of different kind of templates
|
||||
// (e. g. web templates vs. mail templates).
|
||||
type TemplateSet struct {
|
||||
name string
|
||||
loader TemplateLoader
|
||||
|
||||
// Globals will be provided to all templates created within this template set
|
||||
Globals Context
|
||||
|
||||
// If debug is true (default false), ExecutionContext.Logf() will work and output
|
||||
// to STDOUT. Furthermore, FromCache() won't cache the templates.
|
||||
// Make sure to synchronize the access to it in case you're changing this
|
||||
// variable during program execution (and template compilation/execution).
|
||||
Debug bool
|
||||
|
||||
// Sandbox features
|
||||
// - Disallow access to specific tags and/or filters (using BanTag() and BanFilter())
|
||||
//
|
||||
// For efficiency reasons you can ban tags/filters only *before* you have
|
||||
// added your first template to the set (restrictions are statically checked).
|
||||
// After you added one, it's not possible anymore (for your personal security).
|
||||
firstTemplateCreated bool
|
||||
bannedTags map[string]bool
|
||||
bannedFilters map[string]bool
|
||||
|
||||
// Template cache (for FromCache())
|
||||
templateCache map[string]*Template
|
||||
templateCacheMutex sync.Mutex
|
||||
}
|
||||
|
||||
// NewSet can be used to create sets with different kind of templates
|
||||
// (e. g. web from mail templates), with different globals or
|
||||
// other configurations.
|
||||
func NewSet(name string, loader TemplateLoader) *TemplateSet {
|
||||
return &TemplateSet{
|
||||
name: name,
|
||||
loader: loader,
|
||||
Globals: make(Context),
|
||||
bannedTags: make(map[string]bool),
|
||||
bannedFilters: make(map[string]bool),
|
||||
templateCache: make(map[string]*Template),
|
||||
}
|
||||
}
|
||||
|
||||
func (set *TemplateSet) resolveFilename(tpl *Template, path string) string {
|
||||
name := ""
|
||||
if tpl != nil && tpl.isTplString {
|
||||
return path
|
||||
}
|
||||
if tpl != nil {
|
||||
name = tpl.name
|
||||
}
|
||||
return set.loader.Abs(name, path)
|
||||
}
|
||||
|
||||
// BanTag bans a specific tag for this template set. See more in the documentation for TemplateSet.
|
||||
func (set *TemplateSet) BanTag(name string) error {
|
||||
_, has := tags[name]
|
||||
if !has {
|
||||
return errors.Errorf("tag '%s' not found", name)
|
||||
}
|
||||
if set.firstTemplateCreated {
|
||||
return errors.New("you cannot ban any tags after you've added your first template to your template set")
|
||||
}
|
||||
_, has = set.bannedTags[name]
|
||||
if has {
|
||||
return errors.Errorf("tag '%s' is already banned", name)
|
||||
}
|
||||
set.bannedTags[name] = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BanFilter bans a specific filter for this template set. See more in the documentation for TemplateSet.
|
||||
func (set *TemplateSet) BanFilter(name string) error {
|
||||
_, has := filters[name]
|
||||
if !has {
|
||||
return errors.Errorf("filter '%s' not found", name)
|
||||
}
|
||||
if set.firstTemplateCreated {
|
||||
return errors.New("you cannot ban any filters after you've added your first template to your template set")
|
||||
}
|
||||
_, has = set.bannedFilters[name]
|
||||
if has {
|
||||
return errors.Errorf("filter '%s' is already banned", name)
|
||||
}
|
||||
set.bannedFilters[name] = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CleanCache cleans the template cache. If filenames is not empty,
|
||||
// it will remove the template caches of those filenames.
|
||||
// Or it will empty the whole template cache. It is thread-safe.
|
||||
func (set *TemplateSet) CleanCache(filenames ...string) {
|
||||
set.templateCacheMutex.Lock()
|
||||
defer set.templateCacheMutex.Unlock()
|
||||
|
||||
if len(filenames) == 0 {
|
||||
set.templateCache = make(map[string]*Template, len(set.templateCache))
|
||||
}
|
||||
|
||||
for _, filename := range filenames {
|
||||
delete(set.templateCache, set.resolveFilename(nil, filename))
|
||||
}
|
||||
}
|
||||
|
||||
// FromCache is a convenient method to cache templates. It is thread-safe
|
||||
// and will only compile the template associated with a filename once.
|
||||
// If TemplateSet.Debug is true (for example during development phase),
|
||||
// FromCache() will not cache the template and instead recompile it on any
|
||||
// call (to make changes to a template live instantaneously).
|
||||
func (set *TemplateSet) FromCache(filename string) (*Template, error) {
|
||||
if set.Debug {
|
||||
// Recompile on any request
|
||||
return set.FromFile(filename)
|
||||
}
|
||||
// Cache the template
|
||||
cleanedFilename := set.resolveFilename(nil, filename)
|
||||
|
||||
set.templateCacheMutex.Lock()
|
||||
defer set.templateCacheMutex.Unlock()
|
||||
|
||||
tpl, has := set.templateCache[cleanedFilename]
|
||||
|
||||
// Cache miss
|
||||
if !has {
|
||||
tpl, err := set.FromFile(cleanedFilename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
set.templateCache[cleanedFilename] = tpl
|
||||
return tpl, nil
|
||||
}
|
||||
|
||||
// Cache hit
|
||||
return tpl, nil
|
||||
}
|
||||
|
||||
// FromString loads a template from string and returns a Template instance.
|
||||
func (set *TemplateSet) FromString(tpl string) (*Template, error) {
|
||||
set.firstTemplateCreated = true
|
||||
|
||||
return newTemplateString(set, []byte(tpl))
|
||||
}
|
||||
|
||||
// FromBytes loads a template from bytes and returns a Template instance.
|
||||
func (set *TemplateSet) FromBytes(tpl []byte) (*Template, error) {
|
||||
set.firstTemplateCreated = true
|
||||
|
||||
return newTemplateString(set, tpl)
|
||||
}
|
||||
|
||||
// FromFile loads a template from a filename and returns a Template instance.
|
||||
func (set *TemplateSet) FromFile(filename string) (*Template, error) {
|
||||
set.firstTemplateCreated = true
|
||||
|
||||
fd, err := set.loader.Get(set.resolveFilename(nil, filename))
|
||||
if err != nil {
|
||||
return nil, &Error{
|
||||
Filename: filename,
|
||||
Sender: "fromfile",
|
||||
OrigError: err,
|
||||
}
|
||||
}
|
||||
buf, err := ioutil.ReadAll(fd)
|
||||
if err != nil {
|
||||
return nil, &Error{
|
||||
Filename: filename,
|
||||
Sender: "fromfile",
|
||||
OrigError: err,
|
||||
}
|
||||
}
|
||||
|
||||
return newTemplate(set, filename, false, buf)
|
||||
}
|
||||
|
||||
// RenderTemplateString is a shortcut and renders a template string directly.
|
||||
func (set *TemplateSet) RenderTemplateString(s string, ctx Context) (string, error) {
|
||||
set.firstTemplateCreated = true
|
||||
|
||||
tpl := Must(set.FromString(s))
|
||||
result, err := tpl.Execute(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// RenderTemplateBytes is a shortcut and renders template bytes directly.
|
||||
func (set *TemplateSet) RenderTemplateBytes(b []byte, ctx Context) (string, error) {
|
||||
set.firstTemplateCreated = true
|
||||
|
||||
tpl := Must(set.FromBytes(b))
|
||||
result, err := tpl.Execute(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// RenderTemplateFile is a shortcut and renders a template file directly.
|
||||
func (set *TemplateSet) RenderTemplateFile(fn string, ctx Context) (string, error) {
|
||||
set.firstTemplateCreated = true
|
||||
|
||||
tpl := Must(set.FromFile(fn))
|
||||
result, err := tpl.Execute(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (set *TemplateSet) logf(format string, args ...interface{}) {
|
||||
if set.Debug {
|
||||
logger.Printf(fmt.Sprintf("[template set: %s] %s", set.name, format), args...)
|
||||
}
|
||||
}
|
||||
|
||||
// Logging function (internally used)
|
||||
func logf(format string, items ...interface{}) {
|
||||
if debug {
|
||||
logger.Printf(format, items...)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
debug bool // internal debugging
|
||||
logger = log.New(os.Stdout, "[pongo2] ", log.LstdFlags|log.Lshortfile)
|
||||
|
||||
// DefaultLoader allows the default un-sandboxed access to the local file
|
||||
// system and is being used by the DefaultSet.
|
||||
DefaultLoader = MustNewLocalFileSystemLoader("")
|
||||
|
||||
// DefaultSet is a set created for you for convinience reasons.
|
||||
DefaultSet = NewSet("default", DefaultLoader)
|
||||
|
||||
// Methods on the default set
|
||||
FromString = DefaultSet.FromString
|
||||
FromBytes = DefaultSet.FromBytes
|
||||
FromFile = DefaultSet.FromFile
|
||||
FromCache = DefaultSet.FromCache
|
||||
RenderTemplateString = DefaultSet.RenderTemplateString
|
||||
RenderTemplateFile = DefaultSet.RenderTemplateFile
|
||||
|
||||
// Globals for the default set
|
||||
Globals = DefaultSet.Globals
|
||||
)
|
520
vendor/github.com/flosch/pongo2/value.go
generated
vendored
520
vendor/github.com/flosch/pongo2/value.go
generated
vendored
|
@ -1,520 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Value struct {
|
||||
val reflect.Value
|
||||
safe bool // used to indicate whether a Value needs explicit escaping in the template
|
||||
}
|
||||
|
||||
// AsValue converts any given value to a pongo2.Value
|
||||
// Usually being used within own functions passed to a template
|
||||
// through a Context or within filter functions.
|
||||
//
|
||||
// Example:
|
||||
// AsValue("my string")
|
||||
func AsValue(i interface{}) *Value {
|
||||
return &Value{
|
||||
val: reflect.ValueOf(i),
|
||||
}
|
||||
}
|
||||
|
||||
// AsSafeValue works like AsValue, but does not apply the 'escape' filter.
|
||||
func AsSafeValue(i interface{}) *Value {
|
||||
return &Value{
|
||||
val: reflect.ValueOf(i),
|
||||
safe: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (v *Value) getResolvedValue() reflect.Value {
|
||||
if v.val.IsValid() && v.val.Kind() == reflect.Ptr {
|
||||
return v.val.Elem()
|
||||
}
|
||||
return v.val
|
||||
}
|
||||
|
||||
// IsString checks whether the underlying value is a string
|
||||
func (v *Value) IsString() bool {
|
||||
return v.getResolvedValue().Kind() == reflect.String
|
||||
}
|
||||
|
||||
// IsBool checks whether the underlying value is a bool
|
||||
func (v *Value) IsBool() bool {
|
||||
return v.getResolvedValue().Kind() == reflect.Bool
|
||||
}
|
||||
|
||||
// IsFloat checks whether the underlying value is a float
|
||||
func (v *Value) IsFloat() bool {
|
||||
return v.getResolvedValue().Kind() == reflect.Float32 ||
|
||||
v.getResolvedValue().Kind() == reflect.Float64
|
||||
}
|
||||
|
||||
// IsInteger checks whether the underlying value is an integer
|
||||
func (v *Value) IsInteger() bool {
|
||||
return v.getResolvedValue().Kind() == reflect.Int ||
|
||||
v.getResolvedValue().Kind() == reflect.Int8 ||
|
||||
v.getResolvedValue().Kind() == reflect.Int16 ||
|
||||
v.getResolvedValue().Kind() == reflect.Int32 ||
|
||||
v.getResolvedValue().Kind() == reflect.Int64 ||
|
||||
v.getResolvedValue().Kind() == reflect.Uint ||
|
||||
v.getResolvedValue().Kind() == reflect.Uint8 ||
|
||||
v.getResolvedValue().Kind() == reflect.Uint16 ||
|
||||
v.getResolvedValue().Kind() == reflect.Uint32 ||
|
||||
v.getResolvedValue().Kind() == reflect.Uint64
|
||||
}
|
||||
|
||||
// IsNumber checks whether the underlying value is either an integer
|
||||
// or a float.
|
||||
func (v *Value) IsNumber() bool {
|
||||
return v.IsInteger() || v.IsFloat()
|
||||
}
|
||||
|
||||
// IsNil checks whether the underlying value is NIL
|
||||
func (v *Value) IsNil() bool {
|
||||
//fmt.Printf("%+v\n", v.getResolvedValue().Type().String())
|
||||
return !v.getResolvedValue().IsValid()
|
||||
}
|
||||
|
||||
// String returns a string for the underlying value. If this value is not
|
||||
// of type string, pongo2 tries to convert it. Currently the following
|
||||
// types for underlying values are supported:
|
||||
//
|
||||
// 1. string
|
||||
// 2. int/uint (any size)
|
||||
// 3. float (any precision)
|
||||
// 4. bool
|
||||
// 5. time.Time
|
||||
// 6. String() will be called on the underlying value if provided
|
||||
//
|
||||
// NIL values will lead to an empty string. Unsupported types are leading
|
||||
// to their respective type name.
|
||||
func (v *Value) String() string {
|
||||
if v.IsNil() {
|
||||
return ""
|
||||
}
|
||||
|
||||
switch v.getResolvedValue().Kind() {
|
||||
case reflect.String:
|
||||
return v.getResolvedValue().String()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return strconv.FormatInt(v.getResolvedValue().Int(), 10)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return strconv.FormatUint(v.getResolvedValue().Uint(), 10)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return fmt.Sprintf("%f", v.getResolvedValue().Float())
|
||||
case reflect.Bool:
|
||||
if v.Bool() {
|
||||
return "True"
|
||||
}
|
||||
return "False"
|
||||
case reflect.Struct:
|
||||
if t, ok := v.Interface().(fmt.Stringer); ok {
|
||||
return t.String()
|
||||
}
|
||||
}
|
||||
|
||||
logf("Value.String() not implemented for type: %s\n", v.getResolvedValue().Kind().String())
|
||||
return v.getResolvedValue().String()
|
||||
}
|
||||
|
||||
// Integer returns the underlying value as an integer (converts the underlying
|
||||
// value, if necessary). If it's not possible to convert the underlying value,
|
||||
// it will return 0.
|
||||
func (v *Value) Integer() int {
|
||||
switch v.getResolvedValue().Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return int(v.getResolvedValue().Int())
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return int(v.getResolvedValue().Uint())
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return int(v.getResolvedValue().Float())
|
||||
case reflect.String:
|
||||
// Try to convert from string to int (base 10)
|
||||
f, err := strconv.ParseFloat(v.getResolvedValue().String(), 64)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return int(f)
|
||||
default:
|
||||
logf("Value.Integer() not available for type: %s\n", v.getResolvedValue().Kind().String())
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// Float returns the underlying value as a float (converts the underlying
|
||||
// value, if necessary). If it's not possible to convert the underlying value,
|
||||
// it will return 0.0.
|
||||
func (v *Value) Float() float64 {
|
||||
switch v.getResolvedValue().Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return float64(v.getResolvedValue().Int())
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return float64(v.getResolvedValue().Uint())
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.getResolvedValue().Float()
|
||||
case reflect.String:
|
||||
// Try to convert from string to float64 (base 10)
|
||||
f, err := strconv.ParseFloat(v.getResolvedValue().String(), 64)
|
||||
if err != nil {
|
||||
return 0.0
|
||||
}
|
||||
return f
|
||||
default:
|
||||
logf("Value.Float() not available for type: %s\n", v.getResolvedValue().Kind().String())
|
||||
return 0.0
|
||||
}
|
||||
}
|
||||
|
||||
// Bool returns the underlying value as bool. If the value is not bool, false
|
||||
// will always be returned. If you're looking for true/false-evaluation of the
|
||||
// underlying value, have a look on the IsTrue()-function.
|
||||
func (v *Value) Bool() bool {
|
||||
switch v.getResolvedValue().Kind() {
|
||||
case reflect.Bool:
|
||||
return v.getResolvedValue().Bool()
|
||||
default:
|
||||
logf("Value.Bool() not available for type: %s\n", v.getResolvedValue().Kind().String())
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// IsTrue tries to evaluate the underlying value the Pythonic-way:
|
||||
//
|
||||
// Returns TRUE in one the following cases:
|
||||
//
|
||||
// * int != 0
|
||||
// * uint != 0
|
||||
// * float != 0.0
|
||||
// * len(array/chan/map/slice/string) > 0
|
||||
// * bool == true
|
||||
// * underlying value is a struct
|
||||
//
|
||||
// Otherwise returns always FALSE.
|
||||
func (v *Value) IsTrue() bool {
|
||||
switch v.getResolvedValue().Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.getResolvedValue().Int() != 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return v.getResolvedValue().Uint() != 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.getResolvedValue().Float() != 0
|
||||
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.getResolvedValue().Len() > 0
|
||||
case reflect.Bool:
|
||||
return v.getResolvedValue().Bool()
|
||||
case reflect.Struct:
|
||||
return true // struct instance is always true
|
||||
default:
|
||||
logf("Value.IsTrue() not available for type: %s\n", v.getResolvedValue().Kind().String())
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Negate tries to negate the underlying value. It's mainly used for
|
||||
// the NOT-operator and in conjunction with a call to
|
||||
// return_value.IsTrue() afterwards.
|
||||
//
|
||||
// Example:
|
||||
// AsValue(1).Negate().IsTrue() == false
|
||||
func (v *Value) Negate() *Value {
|
||||
switch v.getResolvedValue().Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
if v.Integer() != 0 {
|
||||
return AsValue(0)
|
||||
}
|
||||
return AsValue(1)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
if v.Float() != 0.0 {
|
||||
return AsValue(float64(0.0))
|
||||
}
|
||||
return AsValue(float64(1.1))
|
||||
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
|
||||
return AsValue(v.getResolvedValue().Len() == 0)
|
||||
case reflect.Bool:
|
||||
return AsValue(!v.getResolvedValue().Bool())
|
||||
case reflect.Struct:
|
||||
return AsValue(false)
|
||||
default:
|
||||
logf("Value.IsTrue() not available for type: %s\n", v.getResolvedValue().Kind().String())
|
||||
return AsValue(true)
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the length for an array, chan, map, slice or string.
|
||||
// Otherwise it will return 0.
|
||||
func (v *Value) Len() int {
|
||||
switch v.getResolvedValue().Kind() {
|
||||
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
|
||||
return v.getResolvedValue().Len()
|
||||
case reflect.String:
|
||||
runes := []rune(v.getResolvedValue().String())
|
||||
return len(runes)
|
||||
default:
|
||||
logf("Value.Len() not available for type: %s\n", v.getResolvedValue().Kind().String())
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// Slice slices an array, slice or string. Otherwise it will
|
||||
// return an empty []int.
|
||||
func (v *Value) Slice(i, j int) *Value {
|
||||
switch v.getResolvedValue().Kind() {
|
||||
case reflect.Array, reflect.Slice:
|
||||
return AsValue(v.getResolvedValue().Slice(i, j).Interface())
|
||||
case reflect.String:
|
||||
runes := []rune(v.getResolvedValue().String())
|
||||
return AsValue(string(runes[i:j]))
|
||||
default:
|
||||
logf("Value.Slice() not available for type: %s\n", v.getResolvedValue().Kind().String())
|
||||
return AsValue([]int{})
|
||||
}
|
||||
}
|
||||
|
||||
// Index gets the i-th item of an array, slice or string. Otherwise
|
||||
// it will return NIL.
|
||||
func (v *Value) Index(i int) *Value {
|
||||
switch v.getResolvedValue().Kind() {
|
||||
case reflect.Array, reflect.Slice:
|
||||
if i >= v.Len() {
|
||||
return AsValue(nil)
|
||||
}
|
||||
return AsValue(v.getResolvedValue().Index(i).Interface())
|
||||
case reflect.String:
|
||||
//return AsValue(v.getResolvedValue().Slice(i, i+1).Interface())
|
||||
s := v.getResolvedValue().String()
|
||||
runes := []rune(s)
|
||||
if i < len(runes) {
|
||||
return AsValue(string(runes[i]))
|
||||
}
|
||||
return AsValue("")
|
||||
default:
|
||||
logf("Value.Slice() not available for type: %s\n", v.getResolvedValue().Kind().String())
|
||||
return AsValue([]int{})
|
||||
}
|
||||
}
|
||||
|
||||
// Contains checks whether the underlying value (which must be of type struct, map,
|
||||
// string, array or slice) contains of another Value (e. g. used to check
|
||||
// whether a struct contains of a specific field or a map contains a specific key).
|
||||
//
|
||||
// Example:
|
||||
// AsValue("Hello, World!").Contains(AsValue("World")) == true
|
||||
func (v *Value) Contains(other *Value) bool {
|
||||
switch v.getResolvedValue().Kind() {
|
||||
case reflect.Struct:
|
||||
fieldValue := v.getResolvedValue().FieldByName(other.String())
|
||||
return fieldValue.IsValid()
|
||||
case reflect.Map:
|
||||
var mapValue reflect.Value
|
||||
switch other.Interface().(type) {
|
||||
case int:
|
||||
mapValue = v.getResolvedValue().MapIndex(other.getResolvedValue())
|
||||
case string:
|
||||
mapValue = v.getResolvedValue().MapIndex(other.getResolvedValue())
|
||||
default:
|
||||
logf("Value.Contains() does not support lookup type '%s'\n", other.getResolvedValue().Kind().String())
|
||||
return false
|
||||
}
|
||||
|
||||
return mapValue.IsValid()
|
||||
case reflect.String:
|
||||
return strings.Contains(v.getResolvedValue().String(), other.String())
|
||||
|
||||
case reflect.Slice, reflect.Array:
|
||||
for i := 0; i < v.getResolvedValue().Len(); i++ {
|
||||
item := v.getResolvedValue().Index(i)
|
||||
if other.Interface() == item.Interface() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
||||
default:
|
||||
logf("Value.Contains() not available for type: %s\n", v.getResolvedValue().Kind().String())
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// CanSlice checks whether the underlying value is of type array, slice or string.
|
||||
// You normally would use CanSlice() before using the Slice() operation.
|
||||
func (v *Value) CanSlice() bool {
|
||||
switch v.getResolvedValue().Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.String:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Iterate iterates over a map, array, slice or a string. It calls the
|
||||
// function's first argument for every value with the following arguments:
|
||||
//
|
||||
// idx current 0-index
|
||||
// count total amount of items
|
||||
// key *Value for the key or item
|
||||
// value *Value (only for maps, the respective value for a specific key)
|
||||
//
|
||||
// If the underlying value has no items or is not one of the types above,
|
||||
// the empty function (function's second argument) will be called.
|
||||
func (v *Value) Iterate(fn func(idx, count int, key, value *Value) bool, empty func()) {
|
||||
v.IterateOrder(fn, empty, false, false)
|
||||
}
|
||||
|
||||
// IterateOrder behaves like Value.Iterate, but can iterate through an array/slice/string in reverse. Does
|
||||
// not affect the iteration through a map because maps don't have any particular order.
|
||||
// However, you can force an order using the `sorted` keyword (and even use `reversed sorted`).
|
||||
func (v *Value) IterateOrder(fn func(idx, count int, key, value *Value) bool, empty func(), reverse bool, sorted bool) {
|
||||
switch v.getResolvedValue().Kind() {
|
||||
case reflect.Map:
|
||||
keys := sortedKeys(v.getResolvedValue().MapKeys())
|
||||
if sorted {
|
||||
if reverse {
|
||||
sort.Sort(sort.Reverse(keys))
|
||||
} else {
|
||||
sort.Sort(keys)
|
||||
}
|
||||
}
|
||||
keyLen := len(keys)
|
||||
for idx, key := range keys {
|
||||
value := v.getResolvedValue().MapIndex(key)
|
||||
if !fn(idx, keyLen, &Value{val: key}, &Value{val: value}) {
|
||||
return
|
||||
}
|
||||
}
|
||||
if keyLen == 0 {
|
||||
empty()
|
||||
}
|
||||
return // done
|
||||
case reflect.Array, reflect.Slice:
|
||||
var items valuesList
|
||||
|
||||
itemCount := v.getResolvedValue().Len()
|
||||
for i := 0; i < itemCount; i++ {
|
||||
items = append(items, &Value{val: v.getResolvedValue().Index(i)})
|
||||
}
|
||||
|
||||
if sorted {
|
||||
if reverse {
|
||||
sort.Sort(sort.Reverse(items))
|
||||
} else {
|
||||
sort.Sort(items)
|
||||
}
|
||||
} else {
|
||||
if reverse {
|
||||
for i := 0; i < itemCount/2; i++ {
|
||||
items[i], items[itemCount-1-i] = items[itemCount-1-i], items[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(items) > 0 {
|
||||
for idx, item := range items {
|
||||
if !fn(idx, itemCount, item, nil) {
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
empty()
|
||||
}
|
||||
return // done
|
||||
case reflect.String:
|
||||
if sorted {
|
||||
// TODO(flosch): Handle sorted
|
||||
panic("TODO: handle sort for type string")
|
||||
}
|
||||
|
||||
// TODO(flosch): Not utf8-compatible (utf8-decoding necessary)
|
||||
charCount := v.getResolvedValue().Len()
|
||||
if charCount > 0 {
|
||||
if reverse {
|
||||
for i := charCount - 1; i >= 0; i-- {
|
||||
if !fn(i, charCount, &Value{val: v.getResolvedValue().Slice(i, i+1)}, nil) {
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < charCount; i++ {
|
||||
if !fn(i, charCount, &Value{val: v.getResolvedValue().Slice(i, i+1)}, nil) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
empty()
|
||||
}
|
||||
return // done
|
||||
default:
|
||||
logf("Value.Iterate() not available for type: %s\n", v.getResolvedValue().Kind().String())
|
||||
}
|
||||
empty()
|
||||
}
|
||||
|
||||
// Interface gives you access to the underlying value.
|
||||
func (v *Value) Interface() interface{} {
|
||||
if v.val.IsValid() {
|
||||
return v.val.Interface()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EqualValueTo checks whether two values are containing the same value or object.
|
||||
func (v *Value) EqualValueTo(other *Value) bool {
|
||||
// comparison of uint with int fails using .Interface()-comparison (see issue #64)
|
||||
if v.IsInteger() && other.IsInteger() {
|
||||
return v.Integer() == other.Integer()
|
||||
}
|
||||
return v.Interface() == other.Interface()
|
||||
}
|
||||
|
||||
type sortedKeys []reflect.Value
|
||||
|
||||
func (sk sortedKeys) Len() int {
|
||||
return len(sk)
|
||||
}
|
||||
|
||||
func (sk sortedKeys) Less(i, j int) bool {
|
||||
vi := &Value{val: sk[i]}
|
||||
vj := &Value{val: sk[j]}
|
||||
switch {
|
||||
case vi.IsInteger() && vj.IsInteger():
|
||||
return vi.Integer() < vj.Integer()
|
||||
case vi.IsFloat() && vj.IsFloat():
|
||||
return vi.Float() < vj.Float()
|
||||
default:
|
||||
return vi.String() < vj.String()
|
||||
}
|
||||
}
|
||||
|
||||
func (sk sortedKeys) Swap(i, j int) {
|
||||
sk[i], sk[j] = sk[j], sk[i]
|
||||
}
|
||||
|
||||
type valuesList []*Value
|
||||
|
||||
func (vl valuesList) Len() int {
|
||||
return len(vl)
|
||||
}
|
||||
|
||||
func (vl valuesList) Less(i, j int) bool {
|
||||
vi := vl[i]
|
||||
vj := vl[j]
|
||||
switch {
|
||||
case vi.IsInteger() && vj.IsInteger():
|
||||
return vi.Integer() < vj.Integer()
|
||||
case vi.IsFloat() && vj.IsFloat():
|
||||
return vi.Float() < vj.Float()
|
||||
default:
|
||||
return vi.String() < vj.String()
|
||||
}
|
||||
}
|
||||
|
||||
func (vl valuesList) Swap(i, j int) {
|
||||
vl[i], vl[j] = vl[j], vl[i]
|
||||
}
|
695
vendor/github.com/flosch/pongo2/variable.go
generated
vendored
695
vendor/github.com/flosch/pongo2/variable.go
generated
vendored
|
@ -1,695 +0,0 @@
|
|||
package pongo2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/juju/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
varTypeInt = iota
|
||||
varTypeIdent
|
||||
)
|
||||
|
||||
var (
|
||||
typeOfValuePtr = reflect.TypeOf(new(Value))
|
||||
typeOfExecCtxPtr = reflect.TypeOf(new(ExecutionContext))
|
||||
)
|
||||
|
||||
type variablePart struct {
|
||||
typ int
|
||||
s string
|
||||
i int
|
||||
|
||||
isFunctionCall bool
|
||||
callingArgs []functionCallArgument // needed for a function call, represents all argument nodes (INode supports nested function calls)
|
||||
}
|
||||
|
||||
type functionCallArgument interface {
|
||||
Evaluate(*ExecutionContext) (*Value, *Error)
|
||||
}
|
||||
|
||||
// TODO: Add location tokens
|
||||
type stringResolver struct {
|
||||
locationToken *Token
|
||||
val string
|
||||
}
|
||||
|
||||
type intResolver struct {
|
||||
locationToken *Token
|
||||
val int
|
||||
}
|
||||
|
||||
type floatResolver struct {
|
||||
locationToken *Token
|
||||
val float64
|
||||
}
|
||||
|
||||
type boolResolver struct {
|
||||
locationToken *Token
|
||||
val bool
|
||||
}
|
||||
|
||||
type variableResolver struct {
|
||||
locationToken *Token
|
||||
|
||||
parts []*variablePart
|
||||
}
|
||||
|
||||
type nodeFilteredVariable struct {
|
||||
locationToken *Token
|
||||
|
||||
resolver IEvaluator
|
||||
filterChain []*filterCall
|
||||
}
|
||||
|
||||
type nodeVariable struct {
|
||||
locationToken *Token
|
||||
expr IEvaluator
|
||||
}
|
||||
|
||||
type executionCtxEval struct{}
|
||||
|
||||
func (v *nodeFilteredVariable) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
value, err := v.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writer.WriteString(value.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vr *variableResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
value, err := vr.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writer.WriteString(value.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stringResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
value, err := s.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writer.WriteString(value.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *intResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
value, err := i.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writer.WriteString(value.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *floatResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
value, err := f.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writer.WriteString(value.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *boolResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
value, err := b.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writer.WriteString(value.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *nodeFilteredVariable) GetPositionToken() *Token {
|
||||
return v.locationToken
|
||||
}
|
||||
|
||||
func (vr *variableResolver) GetPositionToken() *Token {
|
||||
return vr.locationToken
|
||||
}
|
||||
|
||||
func (s *stringResolver) GetPositionToken() *Token {
|
||||
return s.locationToken
|
||||
}
|
||||
|
||||
func (i *intResolver) GetPositionToken() *Token {
|
||||
return i.locationToken
|
||||
}
|
||||
|
||||
func (f *floatResolver) GetPositionToken() *Token {
|
||||
return f.locationToken
|
||||
}
|
||||
|
||||
func (b *boolResolver) GetPositionToken() *Token {
|
||||
return b.locationToken
|
||||
}
|
||||
|
||||
func (s *stringResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
|
||||
return AsValue(s.val), nil
|
||||
}
|
||||
|
||||
func (i *intResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
|
||||
return AsValue(i.val), nil
|
||||
}
|
||||
|
||||
func (f *floatResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
|
||||
return AsValue(f.val), nil
|
||||
}
|
||||
|
||||
func (b *boolResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
|
||||
return AsValue(b.val), nil
|
||||
}
|
||||
|
||||
func (s *stringResolver) FilterApplied(name string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (i *intResolver) FilterApplied(name string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (f *floatResolver) FilterApplied(name string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *boolResolver) FilterApplied(name string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (nv *nodeVariable) FilterApplied(name string) bool {
|
||||
return nv.expr.FilterApplied(name)
|
||||
}
|
||||
|
||||
func (nv *nodeVariable) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
|
||||
value, err := nv.expr.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !nv.expr.FilterApplied("safe") && !value.safe && value.IsString() && ctx.Autoescape {
|
||||
// apply escape filter
|
||||
value, err = filters["escape"](value, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
writer.WriteString(value.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (executionCtxEval) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
|
||||
return AsValue(ctx), nil
|
||||
}
|
||||
|
||||
func (vr *variableResolver) FilterApplied(name string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (vr *variableResolver) String() string {
|
||||
parts := make([]string, 0, len(vr.parts))
|
||||
for _, p := range vr.parts {
|
||||
switch p.typ {
|
||||
case varTypeInt:
|
||||
parts = append(parts, strconv.Itoa(p.i))
|
||||
case varTypeIdent:
|
||||
parts = append(parts, p.s)
|
||||
default:
|
||||
panic("unimplemented")
|
||||
}
|
||||
}
|
||||
return strings.Join(parts, ".")
|
||||
}
|
||||
|
||||
func (vr *variableResolver) resolve(ctx *ExecutionContext) (*Value, error) {
|
||||
var current reflect.Value
|
||||
var isSafe bool
|
||||
|
||||
for idx, part := range vr.parts {
|
||||
if idx == 0 {
|
||||
// We're looking up the first part of the variable.
|
||||
// First we're having a look in our private
|
||||
// context (e. g. information provided by tags, like the forloop)
|
||||
val, inPrivate := ctx.Private[vr.parts[0].s]
|
||||
if !inPrivate {
|
||||
// Nothing found? Then have a final lookup in the public context
|
||||
val = ctx.Public[vr.parts[0].s]
|
||||
}
|
||||
current = reflect.ValueOf(val) // Get the initial value
|
||||
} else {
|
||||
// Next parts, resolve it from current
|
||||
|
||||
// Before resolving the pointer, let's see if we have a method to call
|
||||
// Problem with resolving the pointer is we're changing the receiver
|
||||
isFunc := false
|
||||
if part.typ == varTypeIdent {
|
||||
funcValue := current.MethodByName(part.s)
|
||||
if funcValue.IsValid() {
|
||||
current = funcValue
|
||||
isFunc = true
|
||||
}
|
||||
}
|
||||
|
||||
if !isFunc {
|
||||
// If current a pointer, resolve it
|
||||
if current.Kind() == reflect.Ptr {
|
||||
current = current.Elem()
|
||||
if !current.IsValid() {
|
||||
// Value is not valid (anymore)
|
||||
return AsValue(nil), nil
|
||||
}
|
||||
}
|
||||
|
||||
// Look up which part must be called now
|
||||
switch part.typ {
|
||||
case varTypeInt:
|
||||
// Calling an index is only possible for:
|
||||
// * slices/arrays/strings
|
||||
switch current.Kind() {
|
||||
case reflect.String, reflect.Array, reflect.Slice:
|
||||
if part.i >= 0 && current.Len() > part.i {
|
||||
current = current.Index(part.i)
|
||||
} else {
|
||||
// In Django, exceeding the length of a list is just empty.
|
||||
return AsValue(nil), nil
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("Can't access an index on type %s (variable %s)",
|
||||
current.Kind().String(), vr.String())
|
||||
}
|
||||
case varTypeIdent:
|
||||
// debugging:
|
||||
// fmt.Printf("now = %s (kind: %s)\n", part.s, current.Kind().String())
|
||||
|
||||
// Calling a field or key
|
||||
switch current.Kind() {
|
||||
case reflect.Struct:
|
||||
current = current.FieldByName(part.s)
|
||||
case reflect.Map:
|
||||
current = current.MapIndex(reflect.ValueOf(part.s))
|
||||
default:
|
||||
return nil, errors.Errorf("Can't access a field by name on type %s (variable %s)",
|
||||
current.Kind().String(), vr.String())
|
||||
}
|
||||
default:
|
||||
panic("unimplemented")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !current.IsValid() {
|
||||
// Value is not valid (anymore)
|
||||
return AsValue(nil), nil
|
||||
}
|
||||
|
||||
// If current is a reflect.ValueOf(pongo2.Value), then unpack it
|
||||
// Happens in function calls (as a return value) or by injecting
|
||||
// into the execution context (e.g. in a for-loop)
|
||||
if current.Type() == typeOfValuePtr {
|
||||
tmpValue := current.Interface().(*Value)
|
||||
current = tmpValue.val
|
||||
isSafe = tmpValue.safe
|
||||
}
|
||||
|
||||
// Check whether this is an interface and resolve it where required
|
||||
if current.Kind() == reflect.Interface {
|
||||
current = reflect.ValueOf(current.Interface())
|
||||
}
|
||||
|
||||
// Check if the part is a function call
|
||||
if part.isFunctionCall || current.Kind() == reflect.Func {
|
||||
// Check for callable
|
||||
if current.Kind() != reflect.Func {
|
||||
return nil, errors.Errorf("'%s' is not a function (it is %s)", vr.String(), current.Kind().String())
|
||||
}
|
||||
|
||||
// Check for correct function syntax and types
|
||||
// func(*Value, ...) *Value
|
||||
t := current.Type()
|
||||
currArgs := part.callingArgs
|
||||
|
||||
// If an implicit ExecCtx is needed
|
||||
if t.NumIn() > 0 && t.In(0) == typeOfExecCtxPtr {
|
||||
currArgs = append([]functionCallArgument{executionCtxEval{}}, currArgs...)
|
||||
}
|
||||
|
||||
// Input arguments
|
||||
if len(currArgs) != t.NumIn() && !(len(currArgs) >= t.NumIn()-1 && t.IsVariadic()) {
|
||||
return nil,
|
||||
errors.Errorf("Function input argument count (%d) of '%s' must be equal to the calling argument count (%d).",
|
||||
t.NumIn(), vr.String(), len(currArgs))
|
||||
}
|
||||
|
||||
// Output arguments
|
||||
if t.NumOut() != 1 && t.NumOut() != 2 {
|
||||
return nil, errors.Errorf("'%s' must have exactly 1 or 2 output arguments, the second argument must be of type error", vr.String())
|
||||
}
|
||||
|
||||
// Evaluate all parameters
|
||||
var parameters []reflect.Value
|
||||
|
||||
numArgs := t.NumIn()
|
||||
isVariadic := t.IsVariadic()
|
||||
var fnArg reflect.Type
|
||||
|
||||
for idx, arg := range currArgs {
|
||||
pv, err := arg.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if isVariadic {
|
||||
if idx >= t.NumIn()-1 {
|
||||
fnArg = t.In(numArgs - 1).Elem()
|
||||
} else {
|
||||
fnArg = t.In(idx)
|
||||
}
|
||||
} else {
|
||||
fnArg = t.In(idx)
|
||||
}
|
||||
|
||||
if fnArg != typeOfValuePtr {
|
||||
// Function's argument is not a *pongo2.Value, then we have to check whether input argument is of the same type as the function's argument
|
||||
if !isVariadic {
|
||||
if fnArg != reflect.TypeOf(pv.Interface()) && fnArg.Kind() != reflect.Interface {
|
||||
return nil, errors.Errorf("Function input argument %d of '%s' must be of type %s or *pongo2.Value (not %T).",
|
||||
idx, vr.String(), fnArg.String(), pv.Interface())
|
||||
}
|
||||
// Function's argument has another type, using the interface-value
|
||||
parameters = append(parameters, reflect.ValueOf(pv.Interface()))
|
||||
} else {
|
||||
if fnArg != reflect.TypeOf(pv.Interface()) && fnArg.Kind() != reflect.Interface {
|
||||
return nil, errors.Errorf("Function variadic input argument of '%s' must be of type %s or *pongo2.Value (not %T).",
|
||||
vr.String(), fnArg.String(), pv.Interface())
|
||||
}
|
||||
// Function's argument has another type, using the interface-value
|
||||
parameters = append(parameters, reflect.ValueOf(pv.Interface()))
|
||||
}
|
||||
} else {
|
||||
// Function's argument is a *pongo2.Value
|
||||
parameters = append(parameters, reflect.ValueOf(pv))
|
||||
}
|
||||
}
|
||||
|
||||
// Check if any of the values are invalid
|
||||
for _, p := range parameters {
|
||||
if p.Kind() == reflect.Invalid {
|
||||
return nil, errors.Errorf("Calling a function using an invalid parameter")
|
||||
}
|
||||
}
|
||||
|
||||
// Call it and get first return parameter back
|
||||
values := current.Call(parameters)
|
||||
rv := values[0]
|
||||
if t.NumOut() == 2 {
|
||||
e := values[1].Interface()
|
||||
if e != nil {
|
||||
err, ok := e.(error)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("The second return value is not an error")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if rv.Type() != typeOfValuePtr {
|
||||
current = reflect.ValueOf(rv.Interface())
|
||||
} else {
|
||||
// Return the function call value
|
||||
current = rv.Interface().(*Value).val
|
||||
isSafe = rv.Interface().(*Value).safe
|
||||
}
|
||||
}
|
||||
|
||||
if !current.IsValid() {
|
||||
// Value is not valid (e. g. NIL value)
|
||||
return AsValue(nil), nil
|
||||
}
|
||||
}
|
||||
|
||||
return &Value{val: current, safe: isSafe}, nil
|
||||
}
|
||||
|
||||
func (vr *variableResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
|
||||
value, err := vr.resolve(ctx)
|
||||
if err != nil {
|
||||
return AsValue(nil), ctx.Error(err.Error(), vr.locationToken)
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
func (v *nodeFilteredVariable) FilterApplied(name string) bool {
|
||||
for _, filter := range v.filterChain {
|
||||
if filter.name == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (v *nodeFilteredVariable) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
|
||||
value, err := v.resolver.Evaluate(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, filter := range v.filterChain {
|
||||
value, err = filter.Execute(value, ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// IDENT | IDENT.(IDENT|NUMBER)...
|
||||
func (p *Parser) parseVariableOrLiteral() (IEvaluator, *Error) {
|
||||
t := p.Current()
|
||||
|
||||
if t == nil {
|
||||
return nil, p.Error("Unexpected EOF, expected a number, string, keyword or identifier.", p.lastToken)
|
||||
}
|
||||
|
||||
// Is first part a number or a string, there's nothing to resolve (because there's only to return the value then)
|
||||
switch t.Typ {
|
||||
case TokenNumber:
|
||||
p.Consume()
|
||||
|
||||
// One exception to the rule that we don't have float64 literals is at the beginning
|
||||
// of an expression (or a variable name). Since we know we started with an integer
|
||||
// which can't obviously be a variable name, we can check whether the first number
|
||||
// is followed by dot (and then a number again). If so we're converting it to a float64.
|
||||
|
||||
if p.Match(TokenSymbol, ".") != nil {
|
||||
// float64
|
||||
t2 := p.MatchType(TokenNumber)
|
||||
if t2 == nil {
|
||||
return nil, p.Error("Expected a number after the '.'.", nil)
|
||||
}
|
||||
f, err := strconv.ParseFloat(fmt.Sprintf("%s.%s", t.Val, t2.Val), 64)
|
||||
if err != nil {
|
||||
return nil, p.Error(err.Error(), t)
|
||||
}
|
||||
fr := &floatResolver{
|
||||
locationToken: t,
|
||||
val: f,
|
||||
}
|
||||
return fr, nil
|
||||
}
|
||||
i, err := strconv.Atoi(t.Val)
|
||||
if err != nil {
|
||||
return nil, p.Error(err.Error(), t)
|
||||
}
|
||||
nr := &intResolver{
|
||||
locationToken: t,
|
||||
val: i,
|
||||
}
|
||||
return nr, nil
|
||||
|
||||
case TokenString:
|
||||
p.Consume()
|
||||
sr := &stringResolver{
|
||||
locationToken: t,
|
||||
val: t.Val,
|
||||
}
|
||||
return sr, nil
|
||||
case TokenKeyword:
|
||||
p.Consume()
|
||||
switch t.Val {
|
||||
case "true":
|
||||
br := &boolResolver{
|
||||
locationToken: t,
|
||||
val: true,
|
||||
}
|
||||
return br, nil
|
||||
case "false":
|
||||
br := &boolResolver{
|
||||
locationToken: t,
|
||||
val: false,
|
||||
}
|
||||
return br, nil
|
||||
default:
|
||||
return nil, p.Error("This keyword is not allowed here.", nil)
|
||||
}
|
||||
}
|
||||
|
||||
resolver := &variableResolver{
|
||||
locationToken: t,
|
||||
}
|
||||
|
||||
// First part of a variable MUST be an identifier
|
||||
if t.Typ != TokenIdentifier {
|
||||
return nil, p.Error("Expected either a number, string, keyword or identifier.", t)
|
||||
}
|
||||
|
||||
resolver.parts = append(resolver.parts, &variablePart{
|
||||
typ: varTypeIdent,
|
||||
s: t.Val,
|
||||
})
|
||||
|
||||
p.Consume() // we consumed the first identifier of the variable name
|
||||
|
||||
variableLoop:
|
||||
for p.Remaining() > 0 {
|
||||
t = p.Current()
|
||||
|
||||
if p.Match(TokenSymbol, ".") != nil {
|
||||
// Next variable part (can be either NUMBER or IDENT)
|
||||
t2 := p.Current()
|
||||
if t2 != nil {
|
||||
switch t2.Typ {
|
||||
case TokenIdentifier:
|
||||
resolver.parts = append(resolver.parts, &variablePart{
|
||||
typ: varTypeIdent,
|
||||
s: t2.Val,
|
||||
})
|
||||
p.Consume() // consume: IDENT
|
||||
continue variableLoop
|
||||
case TokenNumber:
|
||||
i, err := strconv.Atoi(t2.Val)
|
||||
if err != nil {
|
||||
return nil, p.Error(err.Error(), t2)
|
||||
}
|
||||
resolver.parts = append(resolver.parts, &variablePart{
|
||||
typ: varTypeInt,
|
||||
i: i,
|
||||
})
|
||||
p.Consume() // consume: NUMBER
|
||||
continue variableLoop
|
||||
default:
|
||||
return nil, p.Error("This token is not allowed within a variable name.", t2)
|
||||
}
|
||||
} else {
|
||||
// EOF
|
||||
return nil, p.Error("Unexpected EOF, expected either IDENTIFIER or NUMBER after DOT.",
|
||||
p.lastToken)
|
||||
}
|
||||
} else if p.Match(TokenSymbol, "(") != nil {
|
||||
// Function call
|
||||
// FunctionName '(' Comma-separated list of expressions ')'
|
||||
part := resolver.parts[len(resolver.parts)-1]
|
||||
part.isFunctionCall = true
|
||||
argumentLoop:
|
||||
for {
|
||||
if p.Remaining() == 0 {
|
||||
return nil, p.Error("Unexpected EOF, expected function call argument list.", p.lastToken)
|
||||
}
|
||||
|
||||
if p.Peek(TokenSymbol, ")") == nil {
|
||||
// No closing bracket, so we're parsing an expression
|
||||
exprArg, err := p.ParseExpression()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
part.callingArgs = append(part.callingArgs, exprArg)
|
||||
|
||||
if p.Match(TokenSymbol, ")") != nil {
|
||||
// If there's a closing bracket after an expression, we will stop parsing the arguments
|
||||
break argumentLoop
|
||||
} else {
|
||||
// If there's NO closing bracket, there MUST be an comma
|
||||
if p.Match(TokenSymbol, ",") == nil {
|
||||
return nil, p.Error("Missing comma or closing bracket after argument.", nil)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// We got a closing bracket, so stop parsing arguments
|
||||
p.Consume()
|
||||
break argumentLoop
|
||||
}
|
||||
|
||||
}
|
||||
// We're done parsing the function call, next variable part
|
||||
continue variableLoop
|
||||
}
|
||||
|
||||
// No dot or function call? Then we're done with the variable parsing
|
||||
break
|
||||
}
|
||||
|
||||
return resolver, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseVariableOrLiteralWithFilter() (*nodeFilteredVariable, *Error) {
|
||||
v := &nodeFilteredVariable{
|
||||
locationToken: p.Current(),
|
||||
}
|
||||
|
||||
// Parse the variable name
|
||||
resolver, err := p.parseVariableOrLiteral()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v.resolver = resolver
|
||||
|
||||
// Parse all the filters
|
||||
filterLoop:
|
||||
for p.Match(TokenSymbol, "|") != nil {
|
||||
// Parse one single filter
|
||||
filter, err := p.parseFilter()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check sandbox filter restriction
|
||||
if _, isBanned := p.template.set.bannedFilters[filter.name]; isBanned {
|
||||
return nil, p.Error(fmt.Sprintf("Usage of filter '%s' is not allowed (sandbox restriction active).", filter.name), nil)
|
||||
}
|
||||
|
||||
v.filterChain = append(v.filterChain, filter)
|
||||
|
||||
continue filterLoop
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (p *Parser) parseVariableElement() (INode, *Error) {
|
||||
node := &nodeVariable{
|
||||
locationToken: p.Current(),
|
||||
}
|
||||
|
||||
p.Consume() // consume '{{'
|
||||
|
||||
expr, err := p.ParseExpression()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
node.expr = expr
|
||||
|
||||
if p.Match(TokenSymbol, "}}") == nil {
|
||||
return nil, p.Error("'}}' expected", nil)
|
||||
}
|
||||
|
||||
return node, nil
|
||||
}
|
3
vendor/github.com/golang/protobuf/AUTHORS
generated
vendored
3
vendor/github.com/golang/protobuf/AUTHORS
generated
vendored
|
@ -1,3 +0,0 @@
|
|||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
3
vendor/github.com/golang/protobuf/CONTRIBUTORS
generated
vendored
3
vendor/github.com/golang/protobuf/CONTRIBUTORS
generated
vendored
|
@ -1,3 +0,0 @@
|
|||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
28
vendor/github.com/golang/protobuf/LICENSE
generated
vendored
28
vendor/github.com/golang/protobuf/LICENSE
generated
vendored
|
@ -1,28 +0,0 @@
|
|||
Copyright 2010 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
253
vendor/github.com/golang/protobuf/proto/clone.go
generated
vendored
253
vendor/github.com/golang/protobuf/proto/clone.go
generated
vendored
|
@ -1,253 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer deep copy and merge.
|
||||
// TODO: RawMessage.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Clone returns a deep copy of a protocol buffer.
|
||||
func Clone(src Message) Message {
|
||||
in := reflect.ValueOf(src)
|
||||
if in.IsNil() {
|
||||
return src
|
||||
}
|
||||
out := reflect.New(in.Type().Elem())
|
||||
dst := out.Interface().(Message)
|
||||
Merge(dst, src)
|
||||
return dst
|
||||
}
|
||||
|
||||
// Merger is the interface representing objects that can merge messages of the same type.
|
||||
type Merger interface {
|
||||
// Merge merges src into this message.
|
||||
// Required and optional fields that are set in src will be set to that value in dst.
|
||||
// Elements of repeated fields will be appended.
|
||||
//
|
||||
// Merge may panic if called with a different argument type than the receiver.
|
||||
Merge(src Message)
|
||||
}
|
||||
|
||||
// generatedMerger is the custom merge method that generated protos will have.
|
||||
// We must add this method since a generate Merge method will conflict with
|
||||
// many existing protos that have a Merge data field already defined.
|
||||
type generatedMerger interface {
|
||||
XXX_Merge(src Message)
|
||||
}
|
||||
|
||||
// Merge merges src into dst.
|
||||
// Required and optional fields that are set in src will be set to that value in dst.
|
||||
// Elements of repeated fields will be appended.
|
||||
// Merge panics if src and dst are not the same type, or if dst is nil.
|
||||
func Merge(dst, src Message) {
|
||||
if m, ok := dst.(Merger); ok {
|
||||
m.Merge(src)
|
||||
return
|
||||
}
|
||||
|
||||
in := reflect.ValueOf(src)
|
||||
out := reflect.ValueOf(dst)
|
||||
if out.IsNil() {
|
||||
panic("proto: nil destination")
|
||||
}
|
||||
if in.Type() != out.Type() {
|
||||
panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
|
||||
}
|
||||
if in.IsNil() {
|
||||
return // Merge from nil src is a noop
|
||||
}
|
||||
if m, ok := dst.(generatedMerger); ok {
|
||||
m.XXX_Merge(src)
|
||||
return
|
||||
}
|
||||
mergeStruct(out.Elem(), in.Elem())
|
||||
}
|
||||
|
||||
func mergeStruct(out, in reflect.Value) {
|
||||
sprop := GetProperties(in.Type())
|
||||
for i := 0; i < in.NumField(); i++ {
|
||||
f := in.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
|
||||
}
|
||||
|
||||
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
||||
emOut, _ := extendable(out.Addr().Interface())
|
||||
mIn, muIn := emIn.extensionsRead()
|
||||
if mIn != nil {
|
||||
mOut := emOut.extensionsWrite()
|
||||
muIn.Lock()
|
||||
mergeExtension(mOut, mIn)
|
||||
muIn.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
uf := in.FieldByName("XXX_unrecognized")
|
||||
if !uf.IsValid() {
|
||||
return
|
||||
}
|
||||
uin := uf.Bytes()
|
||||
if len(uin) > 0 {
|
||||
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
|
||||
}
|
||||
}
|
||||
|
||||
// mergeAny performs a merge between two values of the same type.
|
||||
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
|
||||
// prop is set if this is a struct field (it may be nil).
|
||||
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
|
||||
if in.Type() == protoMessageType {
|
||||
if !in.IsNil() {
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
|
||||
} else {
|
||||
Merge(out.Interface().(Message), in.Interface().(Message))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
switch in.Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
if !viaPtr && isProto3Zero(in) {
|
||||
return
|
||||
}
|
||||
out.Set(in)
|
||||
case reflect.Interface:
|
||||
// Probably a oneof field; copy non-nil values.
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
// Allocate destination if it is not set, or set to a different type.
|
||||
// Otherwise we will merge as normal.
|
||||
if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
|
||||
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
|
||||
}
|
||||
mergeAny(out.Elem(), in.Elem(), false, nil)
|
||||
case reflect.Map:
|
||||
if in.Len() == 0 {
|
||||
return
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeMap(in.Type()))
|
||||
}
|
||||
// For maps with value types of *T or []byte we need to deep copy each value.
|
||||
elemKind := in.Type().Elem().Kind()
|
||||
for _, key := range in.MapKeys() {
|
||||
var val reflect.Value
|
||||
switch elemKind {
|
||||
case reflect.Ptr:
|
||||
val = reflect.New(in.Type().Elem().Elem())
|
||||
mergeAny(val, in.MapIndex(key), false, nil)
|
||||
case reflect.Slice:
|
||||
val = in.MapIndex(key)
|
||||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
||||
default:
|
||||
val = in.MapIndex(key)
|
||||
}
|
||||
out.SetMapIndex(key, val)
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.New(in.Elem().Type()))
|
||||
}
|
||||
mergeAny(out.Elem(), in.Elem(), true, nil)
|
||||
case reflect.Slice:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if in.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// []byte is a scalar bytes field, not a repeated field.
|
||||
|
||||
// Edge case: if this is in a proto3 message, a zero length
|
||||
// bytes field is considered the zero value, and should not
|
||||
// be merged.
|
||||
if prop != nil && prop.proto3 && in.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Make a deep copy.
|
||||
// Append to []byte{} instead of []byte(nil) so that we never end up
|
||||
// with a nil result.
|
||||
out.SetBytes(append([]byte{}, in.Bytes()...))
|
||||
return
|
||||
}
|
||||
n := in.Len()
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeSlice(in.Type(), 0, n))
|
||||
}
|
||||
switch in.Type().Elem().Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
out.Set(reflect.AppendSlice(out, in))
|
||||
default:
|
||||
for i := 0; i < n; i++ {
|
||||
x := reflect.Indirect(reflect.New(in.Type().Elem()))
|
||||
mergeAny(x, in.Index(i), false, nil)
|
||||
out.Set(reflect.Append(out, x))
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
mergeStruct(out, in)
|
||||
default:
|
||||
// unknown type, so not a protocol buffer
|
||||
log.Printf("proto: don't know how to copy %v", in)
|
||||
}
|
||||
}
|
||||
|
||||
func mergeExtension(out, in map[int32]Extension) {
|
||||
for extNum, eIn := range in {
|
||||
eOut := Extension{desc: eIn.desc}
|
||||
if eIn.value != nil {
|
||||
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
|
||||
mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
|
||||
eOut.value = v.Interface()
|
||||
}
|
||||
if eIn.enc != nil {
|
||||
eOut.enc = make([]byte, len(eIn.enc))
|
||||
copy(eOut.enc, eIn.enc)
|
||||
}
|
||||
|
||||
out[extNum] = eOut
|
||||
}
|
||||
}
|
428
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
428
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
|
@ -1,428 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for decoding protocol buffer data to construct in-memory representations.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// errOverflow is returned when an integer is too large to be represented.
|
||||
var errOverflow = errors.New("proto: integer overflow")
|
||||
|
||||
// ErrInternalBadWireType is returned by generated code when an incorrect
|
||||
// wire type is encountered. It does not get returned to user code.
|
||||
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the slice.
|
||||
// It returns the integer and the number of bytes consumed, or
|
||||
// zero if there is not enough.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func DecodeVarint(buf []byte) (x uint64, n int) {
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if n >= len(buf) {
|
||||
return 0, 0
|
||||
}
|
||||
b := uint64(buf[n])
|
||||
n++
|
||||
x |= (b & 0x7F) << shift
|
||||
if (b & 0x80) == 0 {
|
||||
return x, n
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
|
||||
i := p.index
|
||||
l := len(p.buf)
|
||||
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if i >= l {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
b := p.buf[i]
|
||||
i++
|
||||
x |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
p.index = i
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
err = errOverflow
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||
i := p.index
|
||||
buf := p.buf
|
||||
|
||||
if i >= len(buf) {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
} else if buf[i] < 0x80 {
|
||||
p.index++
|
||||
return uint64(buf[i]), nil
|
||||
} else if len(buf)-i < 10 {
|
||||
return p.decodeVarintSlow()
|
||||
}
|
||||
|
||||
var b uint64
|
||||
// we already checked the first byte
|
||||
x = uint64(buf[i]) - 0x80
|
||||
i++
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 7
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 7
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 14
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 14
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 21
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 21
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 28
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 28
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 35
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 35
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 42
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 42
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 49
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 49
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 56
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 56
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 63
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
// x -= 0x80 << 63 // Always zero.
|
||||
|
||||
return 0, errOverflow
|
||||
|
||||
done:
|
||||
p.index = i
|
||||
return x, nil
|
||||
}
|
||||
|
||||
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed64, sfixed64, and double protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 8
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-8])
|
||||
x |= uint64(p.buf[i-7]) << 8
|
||||
x |= uint64(p.buf[i-6]) << 16
|
||||
x |= uint64(p.buf[i-5]) << 24
|
||||
x |= uint64(p.buf[i-4]) << 32
|
||||
x |= uint64(p.buf[i-3]) << 40
|
||||
x |= uint64(p.buf[i-2]) << 48
|
||||
x |= uint64(p.buf[i-1]) << 56
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeFixed32 reads a 32-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed32, sfixed32, and float protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 4
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-4])
|
||||
x |= uint64(p.buf[i-3]) << 8
|
||||
x |= uint64(p.buf[i-2]) << 16
|
||||
x |= uint64(p.buf[i-1]) << 24
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint64 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint32 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
|
||||
// This is the format used for the bytes protocol buffer
|
||||
// type and for embedded messages.
|
||||
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
|
||||
n, err := p.DecodeVarint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nb := int(n)
|
||||
if nb < 0 {
|
||||
return nil, fmt.Errorf("proto: bad byte length %d", nb)
|
||||
}
|
||||
end := p.index + nb
|
||||
if end < p.index || end > len(p.buf) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
if !alloc {
|
||||
// todo: check if can get more uses of alloc=false
|
||||
buf = p.buf[p.index:end]
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
buf = make([]byte, nb)
|
||||
copy(buf, p.buf[p.index:])
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeStringBytes reads an encoded string from the Buffer.
|
||||
// This is the format used for the proto2 string type.
|
||||
func (p *Buffer) DecodeStringBytes() (s string, err error) {
|
||||
buf, err := p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// Unmarshaler is the interface representing objects that can
|
||||
// unmarshal themselves. The argument points to data that may be
|
||||
// overwritten, so implementations should not keep references to the
|
||||
// buffer.
|
||||
// Unmarshal implementations should not clear the receiver.
|
||||
// Any unmarshaled data should be merged into the receiver.
|
||||
// Callers of Unmarshal that do not want to retain existing data
|
||||
// should Reset the receiver before calling Unmarshal.
|
||||
type Unmarshaler interface {
|
||||
Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// newUnmarshaler is the interface representing objects that can
|
||||
// unmarshal themselves. The semantics are identical to Unmarshaler.
|
||||
//
|
||||
// This exists to support protoc-gen-go generated messages.
|
||||
// The proto package will stop type-asserting to this interface in the future.
|
||||
//
|
||||
// DO NOT DEPEND ON THIS.
|
||||
type newUnmarshaler interface {
|
||||
XXX_Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in buf and places the
|
||||
// decoded result in pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// Unmarshal resets pb before starting to unmarshal, so any
|
||||
// existing data in pb is always removed. Use UnmarshalMerge
|
||||
// to preserve and append to existing data.
|
||||
func Unmarshal(buf []byte, pb Message) error {
|
||||
pb.Reset()
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
return u.XXX_Unmarshal(buf)
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
return u.Unmarshal(buf)
|
||||
}
|
||||
return NewBuffer(buf).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// UnmarshalMerge parses the protocol buffer representation in buf and
|
||||
// writes the decoded result to pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// UnmarshalMerge merges into existing data in pb.
|
||||
// Most code should use Unmarshal instead.
|
||||
func UnmarshalMerge(buf []byte, pb Message) error {
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
return u.XXX_Unmarshal(buf)
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
// NOTE: The history of proto have unfortunately been inconsistent
|
||||
// whether Unmarshaler should or should not implicitly clear itself.
|
||||
// Some implementations do, most do not.
|
||||
// Thus, calling this here may or may not do what people want.
|
||||
//
|
||||
// See https://github.com/golang/protobuf/issues/424
|
||||
return u.Unmarshal(buf)
|
||||
}
|
||||
return NewBuffer(buf).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// DecodeMessage reads a count-delimited message from the Buffer.
|
||||
func (p *Buffer) DecodeMessage(pb Message) error {
|
||||
enc, err := p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return NewBuffer(enc).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// DecodeGroup reads a tag-delimited group from the Buffer.
|
||||
// StartGroup tag is already consumed. This function consumes
|
||||
// EndGroup tag.
|
||||
func (p *Buffer) DecodeGroup(pb Message) error {
|
||||
b := p.buf[p.index:]
|
||||
x, y := findEndGroup(b)
|
||||
if x < 0 {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
err := Unmarshal(b[:x], pb)
|
||||
p.index += y
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in the
|
||||
// Buffer and places the decoded result in pb. If the struct
|
||||
// underlying pb does not match the data in the buffer, the results can be
|
||||
// unpredictable.
|
||||
//
|
||||
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
|
||||
func (p *Buffer) Unmarshal(pb Message) error {
|
||||
// If the object can unmarshal itself, let it.
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
err := u.XXX_Unmarshal(p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
// NOTE: The history of proto have unfortunately been inconsistent
|
||||
// whether Unmarshaler should or should not implicitly clear itself.
|
||||
// Some implementations do, most do not.
|
||||
// Thus, calling this here may or may not do what people want.
|
||||
//
|
||||
// See https://github.com/golang/protobuf/issues/424
|
||||
err := u.Unmarshal(p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
// Slow workaround for messages that aren't Unmarshalers.
|
||||
// This includes some hand-coded .pb.go files and
|
||||
// bootstrap protos.
|
||||
// TODO: fix all of those and then add Unmarshal to
|
||||
// the Message interface. Then:
|
||||
// The cast above and code below can be deleted.
|
||||
// The old unmarshaler can be deleted.
|
||||
// Clients can call Unmarshal directly (can already do that, actually).
|
||||
var info InternalMessageInfo
|
||||
err := info.Unmarshal(pb, p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
350
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
350
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
|
@ -1,350 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type generatedDiscarder interface {
|
||||
XXX_DiscardUnknown()
|
||||
}
|
||||
|
||||
// DiscardUnknown recursively discards all unknown fields from this message
|
||||
// and all embedded messages.
|
||||
//
|
||||
// When unmarshaling a message with unrecognized fields, the tags and values
|
||||
// of such fields are preserved in the Message. This allows a later call to
|
||||
// marshal to be able to produce a message that continues to have those
|
||||
// unrecognized fields. To avoid this, DiscardUnknown is used to
|
||||
// explicitly clear the unknown fields after unmarshaling.
|
||||
//
|
||||
// For proto2 messages, the unknown fields of message extensions are only
|
||||
// discarded from messages that have been accessed via GetExtension.
|
||||
func DiscardUnknown(m Message) {
|
||||
if m, ok := m.(generatedDiscarder); ok {
|
||||
m.XXX_DiscardUnknown()
|
||||
return
|
||||
}
|
||||
// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
|
||||
// but the master branch has no implementation for InternalMessageInfo,
|
||||
// so it would be more work to replicate that approach.
|
||||
discardLegacy(m)
|
||||
}
|
||||
|
||||
// DiscardUnknown recursively discards all unknown fields.
|
||||
func (a *InternalMessageInfo) DiscardUnknown(m Message) {
|
||||
di := atomicLoadDiscardInfo(&a.discard)
|
||||
if di == nil {
|
||||
di = getDiscardInfo(reflect.TypeOf(m).Elem())
|
||||
atomicStoreDiscardInfo(&a.discard, di)
|
||||
}
|
||||
di.discard(toPointer(&m))
|
||||
}
|
||||
|
||||
type discardInfo struct {
|
||||
typ reflect.Type
|
||||
|
||||
initialized int32 // 0: only typ is valid, 1: everything is valid
|
||||
lock sync.Mutex
|
||||
|
||||
fields []discardFieldInfo
|
||||
unrecognized field
|
||||
}
|
||||
|
||||
type discardFieldInfo struct {
|
||||
field field // Offset of field, guaranteed to be valid
|
||||
discard func(src pointer)
|
||||
}
|
||||
|
||||
var (
|
||||
discardInfoMap = map[reflect.Type]*discardInfo{}
|
||||
discardInfoLock sync.Mutex
|
||||
)
|
||||
|
||||
func getDiscardInfo(t reflect.Type) *discardInfo {
|
||||
discardInfoLock.Lock()
|
||||
defer discardInfoLock.Unlock()
|
||||
di := discardInfoMap[t]
|
||||
if di == nil {
|
||||
di = &discardInfo{typ: t}
|
||||
discardInfoMap[t] = di
|
||||
}
|
||||
return di
|
||||
}
|
||||
|
||||
func (di *discardInfo) discard(src pointer) {
|
||||
if src.isNil() {
|
||||
return // Nothing to do.
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&di.initialized) == 0 {
|
||||
di.computeDiscardInfo()
|
||||
}
|
||||
|
||||
for _, fi := range di.fields {
|
||||
sfp := src.offset(fi.field)
|
||||
fi.discard(sfp)
|
||||
}
|
||||
|
||||
// For proto2 messages, only discard unknown fields in message extensions
|
||||
// that have been accessed via GetExtension.
|
||||
if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
|
||||
// Ignore lock since DiscardUnknown is not concurrency safe.
|
||||
emm, _ := em.extensionsRead()
|
||||
for _, mx := range emm {
|
||||
if m, ok := mx.value.(Message); ok {
|
||||
DiscardUnknown(m)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if di.unrecognized.IsValid() {
|
||||
*src.offset(di.unrecognized).toBytes() = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (di *discardInfo) computeDiscardInfo() {
|
||||
di.lock.Lock()
|
||||
defer di.lock.Unlock()
|
||||
if di.initialized != 0 {
|
||||
return
|
||||
}
|
||||
t := di.typ
|
||||
n := t.NumField()
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
|
||||
dfi := discardFieldInfo{field: toField(&f)}
|
||||
tf := f.Type
|
||||
|
||||
// Unwrap tf to get its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
di := getDiscardInfo(tf)
|
||||
dfi.discard = func(src pointer) {
|
||||
sps := src.getPointerSlice()
|
||||
for _, sp := range sps {
|
||||
if !sp.isNil() {
|
||||
di.discard(sp)
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
di := getDiscardInfo(tf)
|
||||
dfi.discard = func(src pointer) {
|
||||
sp := src.getPointer()
|
||||
if !sp.isNil() {
|
||||
di.discard(sp)
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
|
||||
default: // E.g., map[K]V
|
||||
if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
|
||||
dfi.discard = func(src pointer) {
|
||||
sm := src.asPointerTo(tf).Elem()
|
||||
if sm.Len() == 0 {
|
||||
return
|
||||
}
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
DiscardUnknown(val.Interface().(Message))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
dfi.discard = func(pointer) {} // Noop
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
|
||||
default: // E.g., interface{}
|
||||
// TODO: Make this faster?
|
||||
dfi.discard = func(src pointer) {
|
||||
su := src.asPointerTo(tf).Elem()
|
||||
if !su.IsNil() {
|
||||
sv := su.Elem().Elem().Field(0)
|
||||
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
||||
return
|
||||
}
|
||||
switch sv.Type().Kind() {
|
||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||
DiscardUnknown(sv.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
continue
|
||||
}
|
||||
di.fields = append(di.fields, dfi)
|
||||
}
|
||||
|
||||
di.unrecognized = invalidField
|
||||
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
|
||||
if f.Type != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
di.unrecognized = toField(&f)
|
||||
}
|
||||
|
||||
atomic.StoreInt32(&di.initialized, 1)
|
||||
}
|
||||
|
||||
func discardLegacy(m Message) {
|
||||
v := reflect.ValueOf(m)
|
||||
if v.Kind() != reflect.Ptr || v.IsNil() {
|
||||
return
|
||||
}
|
||||
v = v.Elem()
|
||||
if v.Kind() != reflect.Struct {
|
||||
return
|
||||
}
|
||||
t := v.Type()
|
||||
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
vf := v.Field(i)
|
||||
tf := f.Type
|
||||
|
||||
// Unwrap tf to get its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
for j := 0; j < vf.Len(); j++ {
|
||||
discardLegacy(vf.Index(j).Interface().(Message))
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
discardLegacy(vf.Interface().(Message))
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
|
||||
default: // E.g., map[K]V
|
||||
tv := vf.Type().Elem()
|
||||
if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
|
||||
for _, key := range vf.MapKeys() {
|
||||
val := vf.MapIndex(key)
|
||||
discardLegacy(val.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
|
||||
default: // E.g., test_proto.isCommunique_Union interface
|
||||
if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
|
||||
vf = vf.Elem() // E.g., *test_proto.Communique_Msg
|
||||
if !vf.IsNil() {
|
||||
vf = vf.Elem() // E.g., test_proto.Communique_Msg
|
||||
vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
|
||||
if vf.Kind() == reflect.Ptr {
|
||||
discardLegacy(vf.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
|
||||
if vf.Type() != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
vf.Set(reflect.ValueOf([]byte(nil)))
|
||||
}
|
||||
|
||||
// For proto2 messages, only discard unknown fields in message extensions
|
||||
// that have been accessed via GetExtension.
|
||||
if em, err := extendable(m); err == nil {
|
||||
// Ignore lock since discardLegacy is not concurrency safe.
|
||||
emm, _ := em.extensionsRead()
|
||||
for _, mx := range emm {
|
||||
if m, ok := mx.value.(Message); ok {
|
||||
discardLegacy(m)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
203
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
203
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
|
@ -1,203 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for encoding data into the wire format for protocol buffers.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var (
|
||||
// errRepeatedHasNil is the error returned if Marshal is called with
|
||||
// a struct with a repeated field containing a nil element.
|
||||
errRepeatedHasNil = errors.New("proto: repeated field has nil element")
|
||||
|
||||
// errOneofHasNil is the error returned if Marshal is called with
|
||||
// a struct with a oneof field containing a nil element.
|
||||
errOneofHasNil = errors.New("proto: oneof field has nil value")
|
||||
|
||||
// ErrNil is the error returned if Marshal is called with nil.
|
||||
ErrNil = errors.New("proto: Marshal called with nil")
|
||||
|
||||
// ErrTooLarge is the error returned if Marshal is called with a
|
||||
// message that encodes to >2GB.
|
||||
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
|
||||
)
|
||||
|
||||
// The fundamental encoders that put bytes on the wire.
|
||||
// Those that take integer types all accept uint64 and are
|
||||
// therefore of type valueEncoder.
|
||||
|
||||
const maxVarintBytes = 10 // maximum length of a varint
|
||||
|
||||
// EncodeVarint returns the varint encoding of x.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
// Not used by the package itself, but helpful to clients
|
||||
// wishing to use the same encoding.
|
||||
func EncodeVarint(x uint64) []byte {
|
||||
var buf [maxVarintBytes]byte
|
||||
var n int
|
||||
for n = 0; x > 127; n++ {
|
||||
buf[n] = 0x80 | uint8(x&0x7F)
|
||||
x >>= 7
|
||||
}
|
||||
buf[n] = uint8(x)
|
||||
n++
|
||||
return buf[0:n]
|
||||
}
|
||||
|
||||
// EncodeVarint writes a varint-encoded integer to the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) EncodeVarint(x uint64) error {
|
||||
for x >= 1<<7 {
|
||||
p.buf = append(p.buf, uint8(x&0x7f|0x80))
|
||||
x >>= 7
|
||||
}
|
||||
p.buf = append(p.buf, uint8(x))
|
||||
return nil
|
||||
}
|
||||
|
||||
// SizeVarint returns the varint encoding size of an integer.
|
||||
func SizeVarint(x uint64) int {
|
||||
switch {
|
||||
case x < 1<<7:
|
||||
return 1
|
||||
case x < 1<<14:
|
||||
return 2
|
||||
case x < 1<<21:
|
||||
return 3
|
||||
case x < 1<<28:
|
||||
return 4
|
||||
case x < 1<<35:
|
||||
return 5
|
||||
case x < 1<<42:
|
||||
return 6
|
||||
case x < 1<<49:
|
||||
return 7
|
||||
case x < 1<<56:
|
||||
return 8
|
||||
case x < 1<<63:
|
||||
return 9
|
||||
}
|
||||
return 10
|
||||
}
|
||||
|
||||
// EncodeFixed64 writes a 64-bit integer to the Buffer.
|
||||
// This is the format for the
|
||||
// fixed64, sfixed64, and double protocol buffer types.
|
||||
func (p *Buffer) EncodeFixed64(x uint64) error {
|
||||
p.buf = append(p.buf,
|
||||
uint8(x),
|
||||
uint8(x>>8),
|
||||
uint8(x>>16),
|
||||
uint8(x>>24),
|
||||
uint8(x>>32),
|
||||
uint8(x>>40),
|
||||
uint8(x>>48),
|
||||
uint8(x>>56))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeFixed32 writes a 32-bit integer to the Buffer.
|
||||
// This is the format for the
|
||||
// fixed32, sfixed32, and float protocol buffer types.
|
||||
func (p *Buffer) EncodeFixed32(x uint64) error {
|
||||
p.buf = append(p.buf,
|
||||
uint8(x),
|
||||
uint8(x>>8),
|
||||
uint8(x>>16),
|
||||
uint8(x>>24))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
|
||||
// to the Buffer.
|
||||
// This is the format used for the sint64 protocol buffer type.
|
||||
func (p *Buffer) EncodeZigzag64(x uint64) error {
|
||||
// use signed number to get arithmetic right shift.
|
||||
return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
|
||||
// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
|
||||
// to the Buffer.
|
||||
// This is the format used for the sint32 protocol buffer type.
|
||||
func (p *Buffer) EncodeZigzag32(x uint64) error {
|
||||
// use signed number to get arithmetic right shift.
|
||||
return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
|
||||
}
|
||||
|
||||
// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
|
||||
// This is the format used for the bytes protocol buffer
|
||||
// type and for embedded messages.
|
||||
func (p *Buffer) EncodeRawBytes(b []byte) error {
|
||||
p.EncodeVarint(uint64(len(b)))
|
||||
p.buf = append(p.buf, b...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeStringBytes writes an encoded string to the Buffer.
|
||||
// This is the format used for the proto2 string type.
|
||||
func (p *Buffer) EncodeStringBytes(s string) error {
|
||||
p.EncodeVarint(uint64(len(s)))
|
||||
p.buf = append(p.buf, s...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Marshaler is the interface representing objects that can marshal themselves.
|
||||
type Marshaler interface {
|
||||
Marshal() ([]byte, error)
|
||||
}
|
||||
|
||||
// EncodeMessage writes the protocol buffer to the Buffer,
|
||||
// prefixed by a varint-encoded length.
|
||||
func (p *Buffer) EncodeMessage(pb Message) error {
|
||||
siz := Size(pb)
|
||||
p.EncodeVarint(uint64(siz))
|
||||
return p.Marshal(pb)
|
||||
}
|
||||
|
||||
// All protocol buffer fields are nillable, but be careful.
|
||||
func isNil(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return v.IsNil()
|
||||
}
|
||||
return false
|
||||
}
|
300
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
300
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
|
@ -1,300 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer comparison.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
/*
|
||||
Equal returns true iff protocol buffers a and b are equal.
|
||||
The arguments must both be pointers to protocol buffer structs.
|
||||
|
||||
Equality is defined in this way:
|
||||
- Two messages are equal iff they are the same type,
|
||||
corresponding fields are equal, unknown field sets
|
||||
are equal, and extensions sets are equal.
|
||||
- Two set scalar fields are equal iff their values are equal.
|
||||
If the fields are of a floating-point type, remember that
|
||||
NaN != x for all x, including NaN. If the message is defined
|
||||
in a proto3 .proto file, fields are not "set"; specifically,
|
||||
zero length proto3 "bytes" fields are equal (nil == {}).
|
||||
- Two repeated fields are equal iff their lengths are the same,
|
||||
and their corresponding elements are equal. Note a "bytes" field,
|
||||
although represented by []byte, is not a repeated field and the
|
||||
rule for the scalar fields described above applies.
|
||||
- Two unset fields are equal.
|
||||
- Two unknown field sets are equal if their current
|
||||
encoded state is equal.
|
||||
- Two extension sets are equal iff they have corresponding
|
||||
elements that are pairwise equal.
|
||||
- Two map fields are equal iff their lengths are the same,
|
||||
and they contain the same set of elements. Zero-length map
|
||||
fields are equal.
|
||||
- Every other combination of things are not equal.
|
||||
|
||||
The return value is undefined if a and b are not protocol buffers.
|
||||
*/
|
||||
func Equal(a, b Message) bool {
|
||||
if a == nil || b == nil {
|
||||
return a == b
|
||||
}
|
||||
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
|
||||
if v1.Type() != v2.Type() {
|
||||
return false
|
||||
}
|
||||
if v1.Kind() == reflect.Ptr {
|
||||
if v1.IsNil() {
|
||||
return v2.IsNil()
|
||||
}
|
||||
if v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
v1, v2 = v1.Elem(), v2.Elem()
|
||||
}
|
||||
if v1.Kind() != reflect.Struct {
|
||||
return false
|
||||
}
|
||||
return equalStruct(v1, v2)
|
||||
}
|
||||
|
||||
// v1 and v2 are known to have the same type.
|
||||
func equalStruct(v1, v2 reflect.Value) bool {
|
||||
sprop := GetProperties(v1.Type())
|
||||
for i := 0; i < v1.NumField(); i++ {
|
||||
f := v1.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
f1, f2 := v1.Field(i), v2.Field(i)
|
||||
if f.Type.Kind() == reflect.Ptr {
|
||||
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
|
||||
// both unset
|
||||
continue
|
||||
} else if n1 != n2 {
|
||||
// set/unset mismatch
|
||||
return false
|
||||
}
|
||||
f1, f2 = f1.Elem(), f2.Elem()
|
||||
}
|
||||
if !equalAny(f1, f2, sprop.Prop[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
|
||||
em2 := v2.FieldByName("XXX_InternalExtensions")
|
||||
if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
|
||||
em2 := v2.FieldByName("XXX_extensions")
|
||||
if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
uf := v1.FieldByName("XXX_unrecognized")
|
||||
if !uf.IsValid() {
|
||||
return true
|
||||
}
|
||||
|
||||
u1 := uf.Bytes()
|
||||
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
|
||||
return bytes.Equal(u1, u2)
|
||||
}
|
||||
|
||||
// v1 and v2 are known to have the same type.
|
||||
// prop may be nil.
|
||||
func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
|
||||
if v1.Type() == protoMessageType {
|
||||
m1, _ := v1.Interface().(Message)
|
||||
m2, _ := v2.Interface().(Message)
|
||||
return Equal(m1, m2)
|
||||
}
|
||||
switch v1.Kind() {
|
||||
case reflect.Bool:
|
||||
return v1.Bool() == v2.Bool()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v1.Float() == v2.Float()
|
||||
case reflect.Int32, reflect.Int64:
|
||||
return v1.Int() == v2.Int()
|
||||
case reflect.Interface:
|
||||
// Probably a oneof field; compare the inner values.
|
||||
n1, n2 := v1.IsNil(), v2.IsNil()
|
||||
if n1 || n2 {
|
||||
return n1 == n2
|
||||
}
|
||||
e1, e2 := v1.Elem(), v2.Elem()
|
||||
if e1.Type() != e2.Type() {
|
||||
return false
|
||||
}
|
||||
return equalAny(e1, e2, nil)
|
||||
case reflect.Map:
|
||||
if v1.Len() != v2.Len() {
|
||||
return false
|
||||
}
|
||||
for _, key := range v1.MapKeys() {
|
||||
val2 := v2.MapIndex(key)
|
||||
if !val2.IsValid() {
|
||||
// This key was not found in the second map.
|
||||
return false
|
||||
}
|
||||
if !equalAny(v1.MapIndex(key), val2, nil) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.Ptr:
|
||||
// Maps may have nil values in them, so check for nil.
|
||||
if v1.IsNil() && v2.IsNil() {
|
||||
return true
|
||||
}
|
||||
if v1.IsNil() != v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
return equalAny(v1.Elem(), v2.Elem(), prop)
|
||||
case reflect.Slice:
|
||||
if v1.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// short circuit: []byte
|
||||
|
||||
// Edge case: if this is in a proto3 message, a zero length
|
||||
// bytes field is considered the zero value.
|
||||
if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
|
||||
return true
|
||||
}
|
||||
if v1.IsNil() != v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
|
||||
}
|
||||
|
||||
if v1.Len() != v2.Len() {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < v1.Len(); i++ {
|
||||
if !equalAny(v1.Index(i), v2.Index(i), prop) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.String:
|
||||
return v1.Interface().(string) == v2.Interface().(string)
|
||||
case reflect.Struct:
|
||||
return equalStruct(v1, v2)
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
return v1.Uint() == v2.Uint()
|
||||
}
|
||||
|
||||
// unknown type, so not a protocol buffer
|
||||
log.Printf("proto: don't know how to compare %v", v1)
|
||||
return false
|
||||
}
|
||||
|
||||
// base is the struct type that the extensions are based on.
|
||||
// x1 and x2 are InternalExtensions.
|
||||
func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
|
||||
em1, _ := x1.extensionsRead()
|
||||
em2, _ := x2.extensionsRead()
|
||||
return equalExtMap(base, em1, em2)
|
||||
}
|
||||
|
||||
func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
||||
if len(em1) != len(em2) {
|
||||
return false
|
||||
}
|
||||
|
||||
for extNum, e1 := range em1 {
|
||||
e2, ok := em2[extNum]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
m1, m2 := e1.value, e2.value
|
||||
|
||||
if m1 == nil && m2 == nil {
|
||||
// Both have only encoded form.
|
||||
if bytes.Equal(e1.enc, e2.enc) {
|
||||
continue
|
||||
}
|
||||
// The bytes are different, but the extensions might still be
|
||||
// equal. We need to decode them to compare.
|
||||
}
|
||||
|
||||
if m1 != nil && m2 != nil {
|
||||
// Both are unencoded.
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||
return false
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// At least one is encoded. To do a semantically correct comparison
|
||||
// we need to unmarshal them first.
|
||||
var desc *ExtensionDesc
|
||||
if m := extensionMaps[base]; m != nil {
|
||||
desc = m[extNum]
|
||||
}
|
||||
if desc == nil {
|
||||
// If both have only encoded form and the bytes are the same,
|
||||
// it is handled above. We get here when the bytes are different.
|
||||
// We don't know how to decode it, so just compare them as byte
|
||||
// slices.
|
||||
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
|
||||
return false
|
||||
}
|
||||
var err error
|
||||
if m1 == nil {
|
||||
m1, err = decodeExtension(e1.enc, desc)
|
||||
}
|
||||
if m2 == nil && err == nil {
|
||||
m2, err = decodeExtension(e2.enc, desc)
|
||||
}
|
||||
if err != nil {
|
||||
// The encoded form is invalid.
|
||||
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
|
||||
return false
|
||||
}
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
543
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
543
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
|
@ -1,543 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Types and routines for supporting protocol buffer extensions.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
|
||||
var ErrMissingExtension = errors.New("proto: missing extension")
|
||||
|
||||
// ExtensionRange represents a range of message extensions for a protocol buffer.
|
||||
// Used in code generated by the protocol compiler.
|
||||
type ExtensionRange struct {
|
||||
Start, End int32 // both inclusive
|
||||
}
|
||||
|
||||
// extendableProto is an interface implemented by any protocol buffer generated by the current
|
||||
// proto compiler that may be extended.
|
||||
type extendableProto interface {
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
extensionsWrite() map[int32]Extension
|
||||
extensionsRead() (map[int32]Extension, sync.Locker)
|
||||
}
|
||||
|
||||
// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
|
||||
// version of the proto compiler that may be extended.
|
||||
type extendableProtoV1 interface {
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
ExtensionMap() map[int32]Extension
|
||||
}
|
||||
|
||||
// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
|
||||
type extensionAdapter struct {
|
||||
extendableProtoV1
|
||||
}
|
||||
|
||||
func (e extensionAdapter) extensionsWrite() map[int32]Extension {
|
||||
return e.ExtensionMap()
|
||||
}
|
||||
|
||||
func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
|
||||
return e.ExtensionMap(), notLocker{}
|
||||
}
|
||||
|
||||
// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
|
||||
type notLocker struct{}
|
||||
|
||||
func (n notLocker) Lock() {}
|
||||
func (n notLocker) Unlock() {}
|
||||
|
||||
// extendable returns the extendableProto interface for the given generated proto message.
|
||||
// If the proto message has the old extension format, it returns a wrapper that implements
|
||||
// the extendableProto interface.
|
||||
func extendable(p interface{}) (extendableProto, error) {
|
||||
switch p := p.(type) {
|
||||
case extendableProto:
|
||||
if isNilPtr(p) {
|
||||
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
|
||||
}
|
||||
return p, nil
|
||||
case extendableProtoV1:
|
||||
if isNilPtr(p) {
|
||||
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
|
||||
}
|
||||
return extensionAdapter{p}, nil
|
||||
}
|
||||
// Don't allocate a specific error containing %T:
|
||||
// this is the hot path for Clone and MarshalText.
|
||||
return nil, errNotExtendable
|
||||
}
|
||||
|
||||
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
|
||||
|
||||
func isNilPtr(x interface{}) bool {
|
||||
v := reflect.ValueOf(x)
|
||||
return v.Kind() == reflect.Ptr && v.IsNil()
|
||||
}
|
||||
|
||||
// XXX_InternalExtensions is an internal representation of proto extensions.
|
||||
//
|
||||
// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
|
||||
// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
|
||||
//
|
||||
// The methods of XXX_InternalExtensions are not concurrency safe in general,
|
||||
// but calls to logically read-only methods such as has and get may be executed concurrently.
|
||||
type XXX_InternalExtensions struct {
|
||||
// The struct must be indirect so that if a user inadvertently copies a
|
||||
// generated message and its embedded XXX_InternalExtensions, they
|
||||
// avoid the mayhem of a copied mutex.
|
||||
//
|
||||
// The mutex serializes all logically read-only operations to p.extensionMap.
|
||||
// It is up to the client to ensure that write operations to p.extensionMap are
|
||||
// mutually exclusive with other accesses.
|
||||
p *struct {
|
||||
mu sync.Mutex
|
||||
extensionMap map[int32]Extension
|
||||
}
|
||||
}
|
||||
|
||||
// extensionsWrite returns the extension map, creating it on first use.
|
||||
func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
|
||||
if e.p == nil {
|
||||
e.p = new(struct {
|
||||
mu sync.Mutex
|
||||
extensionMap map[int32]Extension
|
||||
})
|
||||
e.p.extensionMap = make(map[int32]Extension)
|
||||
}
|
||||
return e.p.extensionMap
|
||||
}
|
||||
|
||||
// extensionsRead returns the extensions map for read-only use. It may be nil.
|
||||
// The caller must hold the returned mutex's lock when accessing Elements within the map.
|
||||
func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
|
||||
if e.p == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return e.p.extensionMap, &e.p.mu
|
||||
}
|
||||
|
||||
// ExtensionDesc represents an extension specification.
|
||||
// Used in generated code from the protocol compiler.
|
||||
type ExtensionDesc struct {
|
||||
ExtendedType Message // nil pointer to the type that is being extended
|
||||
ExtensionType interface{} // nil pointer to the extension type
|
||||
Field int32 // field number
|
||||
Name string // fully-qualified name of extension, for text formatting
|
||||
Tag string // protobuf tag style
|
||||
Filename string // name of the file in which the extension is defined
|
||||
}
|
||||
|
||||
func (ed *ExtensionDesc) repeated() bool {
|
||||
t := reflect.TypeOf(ed.ExtensionType)
|
||||
return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
|
||||
}
|
||||
|
||||
// Extension represents an extension in a message.
|
||||
type Extension struct {
|
||||
// When an extension is stored in a message using SetExtension
|
||||
// only desc and value are set. When the message is marshaled
|
||||
// enc will be set to the encoded form of the message.
|
||||
//
|
||||
// When a message is unmarshaled and contains extensions, each
|
||||
// extension will have only enc set. When such an extension is
|
||||
// accessed using GetExtension (or GetExtensions) desc and value
|
||||
// will be set.
|
||||
desc *ExtensionDesc
|
||||
value interface{}
|
||||
enc []byte
|
||||
}
|
||||
|
||||
// SetRawExtension is for testing only.
|
||||
func SetRawExtension(base Message, id int32, b []byte) {
|
||||
epb, err := extendable(base)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
extmap := epb.extensionsWrite()
|
||||
extmap[id] = Extension{enc: b}
|
||||
}
|
||||
|
||||
// isExtensionField returns true iff the given field number is in an extension range.
|
||||
func isExtensionField(pb extendableProto, field int32) bool {
|
||||
for _, er := range pb.ExtensionRangeArray() {
|
||||
if er.Start <= field && field <= er.End {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// checkExtensionTypes checks that the given extension is valid for pb.
|
||||
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
|
||||
var pbi interface{} = pb
|
||||
// Check the extended type.
|
||||
if ea, ok := pbi.(extensionAdapter); ok {
|
||||
pbi = ea.extendableProtoV1
|
||||
}
|
||||
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
|
||||
return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
|
||||
}
|
||||
// Check the range.
|
||||
if !isExtensionField(pb, extension.Field) {
|
||||
return errors.New("proto: bad extension number; not in declared ranges")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// extPropKey is sufficient to uniquely identify an extension.
|
||||
type extPropKey struct {
|
||||
base reflect.Type
|
||||
field int32
|
||||
}
|
||||
|
||||
var extProp = struct {
|
||||
sync.RWMutex
|
||||
m map[extPropKey]*Properties
|
||||
}{
|
||||
m: make(map[extPropKey]*Properties),
|
||||
}
|
||||
|
||||
func extensionProperties(ed *ExtensionDesc) *Properties {
|
||||
key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
|
||||
|
||||
extProp.RLock()
|
||||
if prop, ok := extProp.m[key]; ok {
|
||||
extProp.RUnlock()
|
||||
return prop
|
||||
}
|
||||
extProp.RUnlock()
|
||||
|
||||
extProp.Lock()
|
||||
defer extProp.Unlock()
|
||||
// Check again.
|
||||
if prop, ok := extProp.m[key]; ok {
|
||||
return prop
|
||||
}
|
||||
|
||||
prop := new(Properties)
|
||||
prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
|
||||
extProp.m[key] = prop
|
||||
return prop
|
||||
}
|
||||
|
||||
// HasExtension returns whether the given extension is present in pb.
|
||||
func HasExtension(pb Message, extension *ExtensionDesc) bool {
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
extmap, mu := epb.extensionsRead()
|
||||
if extmap == nil {
|
||||
return false
|
||||
}
|
||||
mu.Lock()
|
||||
_, ok := extmap[extension.Field]
|
||||
mu.Unlock()
|
||||
return ok
|
||||
}
|
||||
|
||||
// ClearExtension removes the given extension from pb.
|
||||
func ClearExtension(pb Message, extension *ExtensionDesc) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
extmap := epb.extensionsWrite()
|
||||
delete(extmap, extension.Field)
|
||||
}
|
||||
|
||||
// GetExtension retrieves a proto2 extended field from pb.
|
||||
//
|
||||
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
|
||||
// then GetExtension parses the encoded field and returns a Go value of the specified type.
|
||||
// If the field is not present, then the default value is returned (if one is specified),
|
||||
// otherwise ErrMissingExtension is reported.
|
||||
//
|
||||
// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
|
||||
// then GetExtension returns the raw encoded bytes of the field extension.
|
||||
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if extension.ExtendedType != nil {
|
||||
// can only check type if this is a complete descriptor
|
||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
emap, mu := epb.extensionsRead()
|
||||
if emap == nil {
|
||||
return defaultExtensionValue(extension)
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
e, ok := emap[extension.Field]
|
||||
if !ok {
|
||||
// defaultExtensionValue returns the default value or
|
||||
// ErrMissingExtension if there is no default.
|
||||
return defaultExtensionValue(extension)
|
||||
}
|
||||
|
||||
if e.value != nil {
|
||||
// Already decoded. Check the descriptor, though.
|
||||
if e.desc != extension {
|
||||
// This shouldn't happen. If it does, it means that
|
||||
// GetExtension was called twice with two different
|
||||
// descriptors with the same field number.
|
||||
return nil, errors.New("proto: descriptor conflict")
|
||||
}
|
||||
return e.value, nil
|
||||
}
|
||||
|
||||
if extension.ExtensionType == nil {
|
||||
// incomplete descriptor
|
||||
return e.enc, nil
|
||||
}
|
||||
|
||||
v, err := decodeExtension(e.enc, extension)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remember the decoded version and drop the encoded version.
|
||||
// That way it is safe to mutate what we return.
|
||||
e.value = v
|
||||
e.desc = extension
|
||||
e.enc = nil
|
||||
emap[extension.Field] = e
|
||||
return e.value, nil
|
||||
}
|
||||
|
||||
// defaultExtensionValue returns the default value for extension.
|
||||
// If no default for an extension is defined ErrMissingExtension is returned.
|
||||
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
|
||||
if extension.ExtensionType == nil {
|
||||
// incomplete descriptor, so no default
|
||||
return nil, ErrMissingExtension
|
||||
}
|
||||
|
||||
t := reflect.TypeOf(extension.ExtensionType)
|
||||
props := extensionProperties(extension)
|
||||
|
||||
sf, _, err := fieldDefault(t, props)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if sf == nil || sf.value == nil {
|
||||
// There is no default value.
|
||||
return nil, ErrMissingExtension
|
||||
}
|
||||
|
||||
if t.Kind() != reflect.Ptr {
|
||||
// We do not need to return a Ptr, we can directly return sf.value.
|
||||
return sf.value, nil
|
||||
}
|
||||
|
||||
// We need to return an interface{} that is a pointer to sf.value.
|
||||
value := reflect.New(t).Elem()
|
||||
value.Set(reflect.New(value.Type().Elem()))
|
||||
if sf.kind == reflect.Int32 {
|
||||
// We may have an int32 or an enum, but the underlying data is int32.
|
||||
// Since we can't set an int32 into a non int32 reflect.value directly
|
||||
// set it as a int32.
|
||||
value.Elem().SetInt(int64(sf.value.(int32)))
|
||||
} else {
|
||||
value.Elem().Set(reflect.ValueOf(sf.value))
|
||||
}
|
||||
return value.Interface(), nil
|
||||
}
|
||||
|
||||
// decodeExtension decodes an extension encoded in b.
|
||||
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
|
||||
t := reflect.TypeOf(extension.ExtensionType)
|
||||
unmarshal := typeUnmarshaler(t, extension.Tag)
|
||||
|
||||
// t is a pointer to a struct, pointer to basic type or a slice.
|
||||
// Allocate space to store the pointer/slice.
|
||||
value := reflect.New(t).Elem()
|
||||
|
||||
var err error
|
||||
for {
|
||||
x, n := decodeVarint(b)
|
||||
if n == 0 {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
b = b[n:]
|
||||
wire := int(x) & 7
|
||||
|
||||
b, err = unmarshal(b, valToPointer(value.Addr()), wire)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(b) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return value.Interface(), nil
|
||||
}
|
||||
|
||||
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
|
||||
// The returned slice has the same length as es; missing extensions will appear as nil elements.
|
||||
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
extensions = make([]interface{}, len(es))
|
||||
for i, e := range es {
|
||||
extensions[i], err = GetExtension(epb, e)
|
||||
if err == ErrMissingExtension {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
|
||||
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
|
||||
// just the Field field, which defines the extension's field number.
|
||||
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
registeredExtensions := RegisteredExtensions(pb)
|
||||
|
||||
emap, mu := epb.extensionsRead()
|
||||
if emap == nil {
|
||||
return nil, nil
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
extensions := make([]*ExtensionDesc, 0, len(emap))
|
||||
for extid, e := range emap {
|
||||
desc := e.desc
|
||||
if desc == nil {
|
||||
desc = registeredExtensions[extid]
|
||||
if desc == nil {
|
||||
desc = &ExtensionDesc{Field: extid}
|
||||
}
|
||||
}
|
||||
|
||||
extensions = append(extensions, desc)
|
||||
}
|
||||
return extensions, nil
|
||||
}
|
||||
|
||||
// SetExtension sets the specified extension of pb to the specified value.
|
||||
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||
return err
|
||||
}
|
||||
typ := reflect.TypeOf(extension.ExtensionType)
|
||||
if typ != reflect.TypeOf(value) {
|
||||
return errors.New("proto: bad extension value type")
|
||||
}
|
||||
// nil extension values need to be caught early, because the
|
||||
// encoder can't distinguish an ErrNil due to a nil extension
|
||||
// from an ErrNil due to a missing field. Extensions are
|
||||
// always optional, so the encoder would just swallow the error
|
||||
// and drop all the extensions from the encoded message.
|
||||
if reflect.ValueOf(value).IsNil() {
|
||||
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
|
||||
}
|
||||
|
||||
extmap := epb.extensionsWrite()
|
||||
extmap[extension.Field] = Extension{desc: extension, value: value}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearAllExtensions clears all extensions from pb.
|
||||
func ClearAllExtensions(pb Message) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
m := epb.extensionsWrite()
|
||||
for k := range m {
|
||||
delete(m, k)
|
||||
}
|
||||
}
|
||||
|
||||
// A global registry of extensions.
|
||||
// The generated code will register the generated descriptors by calling RegisterExtension.
|
||||
|
||||
var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
|
||||
|
||||
// RegisterExtension is called from the generated code.
|
||||
func RegisterExtension(desc *ExtensionDesc) {
|
||||
st := reflect.TypeOf(desc.ExtendedType).Elem()
|
||||
m := extensionMaps[st]
|
||||
if m == nil {
|
||||
m = make(map[int32]*ExtensionDesc)
|
||||
extensionMaps[st] = m
|
||||
}
|
||||
if _, ok := m[desc.Field]; ok {
|
||||
panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
|
||||
}
|
||||
m[desc.Field] = desc
|
||||
}
|
||||
|
||||
// RegisteredExtensions returns a map of the registered extensions of a
|
||||
// protocol buffer struct, indexed by the extension number.
|
||||
// The argument pb should be a nil pointer to the struct type.
|
||||
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
|
||||
return extensionMaps[reflect.TypeOf(pb).Elem()]
|
||||
}
|
979
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
979
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
|
@ -1,979 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
/*
|
||||
Package proto converts data structures to and from the wire format of
|
||||
protocol buffers. It works in concert with the Go source code generated
|
||||
for .proto files by the protocol compiler.
|
||||
|
||||
A summary of the properties of the protocol buffer interface
|
||||
for a protocol buffer variable v:
|
||||
|
||||
- Names are turned from camel_case to CamelCase for export.
|
||||
- There are no methods on v to set fields; just treat
|
||||
them as structure fields.
|
||||
- There are getters that return a field's value if set,
|
||||
and return the field's default value if unset.
|
||||
The getters work even if the receiver is a nil message.
|
||||
- The zero value for a struct is its correct initialization state.
|
||||
All desired fields must be set before marshaling.
|
||||
- A Reset() method will restore a protobuf struct to its zero state.
|
||||
- Non-repeated fields are pointers to the values; nil means unset.
|
||||
That is, optional or required field int32 f becomes F *int32.
|
||||
- Repeated fields are slices.
|
||||
- Helper functions are available to aid the setting of fields.
|
||||
msg.Foo = proto.String("hello") // set field
|
||||
- Constants are defined to hold the default values of all fields that
|
||||
have them. They have the form Default_StructName_FieldName.
|
||||
Because the getter methods handle defaulted values,
|
||||
direct use of these constants should be rare.
|
||||
- Enums are given type names and maps from names to values.
|
||||
Enum values are prefixed by the enclosing message's name, or by the
|
||||
enum's type name if it is a top-level enum. Enum types have a String
|
||||
method, and a Enum method to assist in message construction.
|
||||
- Nested messages, groups and enums have type names prefixed with the name of
|
||||
the surrounding message type.
|
||||
- Extensions are given descriptor names that start with E_,
|
||||
followed by an underscore-delimited list of the nested messages
|
||||
that contain it (if any) followed by the CamelCased name of the
|
||||
extension field itself. HasExtension, ClearExtension, GetExtension
|
||||
and SetExtension are functions for manipulating extensions.
|
||||
- Oneof field sets are given a single field in their message,
|
||||
with distinguished wrapper types for each possible field value.
|
||||
- Marshal and Unmarshal are functions to encode and decode the wire format.
|
||||
|
||||
When the .proto file specifies `syntax="proto3"`, there are some differences:
|
||||
|
||||
- Non-repeated fields of non-message type are values instead of pointers.
|
||||
- Enum types do not get an Enum method.
|
||||
|
||||
The simplest way to describe this is to see an example.
|
||||
Given file test.proto, containing
|
||||
|
||||
package example;
|
||||
|
||||
enum FOO { X = 17; }
|
||||
|
||||
message Test {
|
||||
required string label = 1;
|
||||
optional int32 type = 2 [default=77];
|
||||
repeated int64 reps = 3;
|
||||
optional group OptionalGroup = 4 {
|
||||
required string RequiredField = 5;
|
||||
}
|
||||
oneof union {
|
||||
int32 number = 6;
|
||||
string name = 7;
|
||||
}
|
||||
}
|
||||
|
||||
The resulting file, test.pb.go, is:
|
||||
|
||||
package example
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import math "math"
|
||||
|
||||
type FOO int32
|
||||
const (
|
||||
FOO_X FOO = 17
|
||||
)
|
||||
var FOO_name = map[int32]string{
|
||||
17: "X",
|
||||
}
|
||||
var FOO_value = map[string]int32{
|
||||
"X": 17,
|
||||
}
|
||||
|
||||
func (x FOO) Enum() *FOO {
|
||||
p := new(FOO)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
func (x FOO) String() string {
|
||||
return proto.EnumName(FOO_name, int32(x))
|
||||
}
|
||||
func (x *FOO) UnmarshalJSON(data []byte) error {
|
||||
value, err := proto.UnmarshalJSONEnum(FOO_value, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = FOO(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
type Test struct {
|
||||
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
|
||||
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
|
||||
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
|
||||
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
|
||||
// Types that are valid to be assigned to Union:
|
||||
// *Test_Number
|
||||
// *Test_Name
|
||||
Union isTest_Union `protobuf_oneof:"union"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
func (m *Test) Reset() { *m = Test{} }
|
||||
func (m *Test) String() string { return proto.CompactTextString(m) }
|
||||
func (*Test) ProtoMessage() {}
|
||||
|
||||
type isTest_Union interface {
|
||||
isTest_Union()
|
||||
}
|
||||
|
||||
type Test_Number struct {
|
||||
Number int32 `protobuf:"varint,6,opt,name=number"`
|
||||
}
|
||||
type Test_Name struct {
|
||||
Name string `protobuf:"bytes,7,opt,name=name"`
|
||||
}
|
||||
|
||||
func (*Test_Number) isTest_Union() {}
|
||||
func (*Test_Name) isTest_Union() {}
|
||||
|
||||
func (m *Test) GetUnion() isTest_Union {
|
||||
if m != nil {
|
||||
return m.Union
|
||||
}
|
||||
return nil
|
||||
}
|
||||
const Default_Test_Type int32 = 77
|
||||
|
||||
func (m *Test) GetLabel() string {
|
||||
if m != nil && m.Label != nil {
|
||||
return *m.Label
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Test) GetType() int32 {
|
||||
if m != nil && m.Type != nil {
|
||||
return *m.Type
|
||||
}
|
||||
return Default_Test_Type
|
||||
}
|
||||
|
||||
func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
|
||||
if m != nil {
|
||||
return m.Optionalgroup
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Test_OptionalGroup struct {
|
||||
RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
|
||||
}
|
||||
func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
|
||||
func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
|
||||
|
||||
func (m *Test_OptionalGroup) GetRequiredField() string {
|
||||
if m != nil && m.RequiredField != nil {
|
||||
return *m.RequiredField
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Test) GetNumber() int32 {
|
||||
if x, ok := m.GetUnion().(*Test_Number); ok {
|
||||
return x.Number
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Test) GetName() string {
|
||||
if x, ok := m.GetUnion().(*Test_Name); ok {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
|
||||
}
|
||||
|
||||
To create and play with a Test object:
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
pb "./example.pb"
|
||||
)
|
||||
|
||||
func main() {
|
||||
test := &pb.Test{
|
||||
Label: proto.String("hello"),
|
||||
Type: proto.Int32(17),
|
||||
Reps: []int64{1, 2, 3},
|
||||
Optionalgroup: &pb.Test_OptionalGroup{
|
||||
RequiredField: proto.String("good bye"),
|
||||
},
|
||||
Union: &pb.Test_Name{"fred"},
|
||||
}
|
||||
data, err := proto.Marshal(test)
|
||||
if err != nil {
|
||||
log.Fatal("marshaling error: ", err)
|
||||
}
|
||||
newTest := &pb.Test{}
|
||||
err = proto.Unmarshal(data, newTest)
|
||||
if err != nil {
|
||||
log.Fatal("unmarshaling error: ", err)
|
||||
}
|
||||
// Now test and newTest contain the same data.
|
||||
if test.GetLabel() != newTest.GetLabel() {
|
||||
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
|
||||
}
|
||||
// Use a type switch to determine which oneof was set.
|
||||
switch u := test.Union.(type) {
|
||||
case *pb.Test_Number: // u.Number contains the number.
|
||||
case *pb.Test_Name: // u.Name contains the string.
|
||||
}
|
||||
// etc.
|
||||
}
|
||||
*/
|
||||
package proto
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
|
||||
// Marshal reports this when a required field is not initialized.
|
||||
// Unmarshal reports this when a required field is missing from the wire data.
|
||||
type RequiredNotSetError struct{ field string }
|
||||
|
||||
func (e *RequiredNotSetError) Error() string {
|
||||
if e.field == "" {
|
||||
return fmt.Sprintf("proto: required field not set")
|
||||
}
|
||||
return fmt.Sprintf("proto: required field %q not set", e.field)
|
||||
}
|
||||
func (e *RequiredNotSetError) RequiredNotSet() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type invalidUTF8Error struct{ field string }
|
||||
|
||||
func (e *invalidUTF8Error) Error() string {
|
||||
if e.field == "" {
|
||||
return "proto: invalid UTF-8 detected"
|
||||
}
|
||||
return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
|
||||
}
|
||||
func (e *invalidUTF8Error) InvalidUTF8() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
|
||||
// This error should not be exposed to the external API as such errors should
|
||||
// be recreated with the field information.
|
||||
var errInvalidUTF8 = &invalidUTF8Error{}
|
||||
|
||||
// isNonFatal reports whether the error is either a RequiredNotSet error
|
||||
// or a InvalidUTF8 error.
|
||||
func isNonFatal(err error) bool {
|
||||
if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
|
||||
return true
|
||||
}
|
||||
if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type nonFatal struct{ E error }
|
||||
|
||||
// Merge merges err into nf and reports whether it was successful.
|
||||
// Otherwise it returns false for any fatal non-nil errors.
|
||||
func (nf *nonFatal) Merge(err error) (ok bool) {
|
||||
if err == nil {
|
||||
return true // not an error
|
||||
}
|
||||
if !isNonFatal(err) {
|
||||
return false // fatal error
|
||||
}
|
||||
if nf.E == nil {
|
||||
nf.E = err // store first instance of non-fatal error
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Message is implemented by generated protocol buffer messages.
|
||||
type Message interface {
|
||||
Reset()
|
||||
String() string
|
||||
ProtoMessage()
|
||||
}
|
||||
|
||||
// Stats records allocation details about the protocol buffer encoders
|
||||
// and decoders. Useful for tuning the library itself.
|
||||
type Stats struct {
|
||||
Emalloc uint64 // mallocs in encode
|
||||
Dmalloc uint64 // mallocs in decode
|
||||
Encode uint64 // number of encodes
|
||||
Decode uint64 // number of decodes
|
||||
Chit uint64 // number of cache hits
|
||||
Cmiss uint64 // number of cache misses
|
||||
Size uint64 // number of sizes
|
||||
}
|
||||
|
||||
// Set to true to enable stats collection.
|
||||
const collectStats = false
|
||||
|
||||
var stats Stats
|
||||
|
||||
// GetStats returns a copy of the global Stats structure.
|
||||
func GetStats() Stats { return stats }
|
||||
|
||||
// A Buffer is a buffer manager for marshaling and unmarshaling
|
||||
// protocol buffers. It may be reused between invocations to
|
||||
// reduce memory usage. It is not necessary to use a Buffer;
|
||||
// the global functions Marshal and Unmarshal create a
|
||||
// temporary Buffer and are fine for most applications.
|
||||
type Buffer struct {
|
||||
buf []byte // encode/decode byte stream
|
||||
index int // read point
|
||||
|
||||
deterministic bool
|
||||
}
|
||||
|
||||
// NewBuffer allocates a new Buffer and initializes its internal data to
|
||||
// the contents of the argument slice.
|
||||
func NewBuffer(e []byte) *Buffer {
|
||||
return &Buffer{buf: e}
|
||||
}
|
||||
|
||||
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
|
||||
func (p *Buffer) Reset() {
|
||||
p.buf = p.buf[0:0] // for reading/writing
|
||||
p.index = 0 // for reading
|
||||
}
|
||||
|
||||
// SetBuf replaces the internal buffer with the slice,
|
||||
// ready for unmarshaling the contents of the slice.
|
||||
func (p *Buffer) SetBuf(s []byte) {
|
||||
p.buf = s
|
||||
p.index = 0
|
||||
}
|
||||
|
||||
// Bytes returns the contents of the Buffer.
|
||||
func (p *Buffer) Bytes() []byte { return p.buf }
|
||||
|
||||
// SetDeterministic sets whether to use deterministic serialization.
|
||||
//
|
||||
// Deterministic serialization guarantees that for a given binary, equal
|
||||
// messages will always be serialized to the same bytes. This implies:
|
||||
//
|
||||
// - Repeated serialization of a message will return the same bytes.
|
||||
// - Different processes of the same binary (which may be executing on
|
||||
// different machines) will serialize equal messages to the same bytes.
|
||||
//
|
||||
// Note that the deterministic serialization is NOT canonical across
|
||||
// languages. It is not guaranteed to remain stable over time. It is unstable
|
||||
// across different builds with schema changes due to unknown fields.
|
||||
// Users who need canonical serialization (e.g., persistent storage in a
|
||||
// canonical form, fingerprinting, etc.) should define their own
|
||||
// canonicalization specification and implement their own serializer rather
|
||||
// than relying on this API.
|
||||
//
|
||||
// If deterministic serialization is requested, map entries will be sorted
|
||||
// by keys in lexographical order. This is an implementation detail and
|
||||
// subject to change.
|
||||
func (p *Buffer) SetDeterministic(deterministic bool) {
|
||||
p.deterministic = deterministic
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper routines for simplifying the creation of optional fields of basic type.
|
||||
*/
|
||||
|
||||
// Bool is a helper routine that allocates a new bool value
|
||||
// to store v and returns a pointer to it.
|
||||
func Bool(v bool) *bool {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int32 is a helper routine that allocates a new int32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int32(v int32) *int32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int is a helper routine that allocates a new int32 value
|
||||
// to store v and returns a pointer to it, but unlike Int32
|
||||
// its argument value is an int.
|
||||
func Int(v int) *int32 {
|
||||
p := new(int32)
|
||||
*p = int32(v)
|
||||
return p
|
||||
}
|
||||
|
||||
// Int64 is a helper routine that allocates a new int64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int64(v int64) *int64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float32 is a helper routine that allocates a new float32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Float32(v float32) *float32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float64 is a helper routine that allocates a new float64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Float64(v float64) *float64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint32 is a helper routine that allocates a new uint32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint32(v uint32) *uint32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint64 is a helper routine that allocates a new uint64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint64(v uint64) *uint64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// String is a helper routine that allocates a new string value
|
||||
// to store v and returns a pointer to it.
|
||||
func String(v string) *string {
|
||||
return &v
|
||||
}
|
||||
|
||||
// EnumName is a helper function to simplify printing protocol buffer enums
|
||||
// by name. Given an enum map and a value, it returns a useful string.
|
||||
func EnumName(m map[int32]string, v int32) string {
|
||||
s, ok := m[v]
|
||||
if ok {
|
||||
return s
|
||||
}
|
||||
return strconv.Itoa(int(v))
|
||||
}
|
||||
|
||||
// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
|
||||
// from their JSON-encoded representation. Given a map from the enum's symbolic
|
||||
// names to its int values, and a byte buffer containing the JSON-encoded
|
||||
// value, it returns an int32 that can be cast to the enum type by the caller.
|
||||
//
|
||||
// The function can deal with both JSON representations, numeric and symbolic.
|
||||
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
|
||||
if data[0] == '"' {
|
||||
// New style: enums are strings.
|
||||
var repr string
|
||||
if err := json.Unmarshal(data, &repr); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
val, ok := m[repr]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
// Old style: enums are ints.
|
||||
var val int32
|
||||
if err := json.Unmarshal(data, &val); err != nil {
|
||||
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// DebugPrint dumps the encoded data in b in a debugging format with a header
|
||||
// including the string s. Used in testing but made available for general debugging.
|
||||
func (p *Buffer) DebugPrint(s string, b []byte) {
|
||||
var u uint64
|
||||
|
||||
obuf := p.buf
|
||||
index := p.index
|
||||
p.buf = b
|
||||
p.index = 0
|
||||
depth := 0
|
||||
|
||||
fmt.Printf("\n--- %s ---\n", s)
|
||||
|
||||
out:
|
||||
for {
|
||||
for i := 0; i < depth; i++ {
|
||||
fmt.Print(" ")
|
||||
}
|
||||
|
||||
index := p.index
|
||||
if index == len(p.buf) {
|
||||
break
|
||||
}
|
||||
|
||||
op, err := p.DecodeVarint()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: fetching op err %v\n", index, err)
|
||||
break out
|
||||
}
|
||||
tag := op >> 3
|
||||
wire := op & 7
|
||||
|
||||
switch wire {
|
||||
default:
|
||||
fmt.Printf("%3d: t=%3d unknown wire=%d\n",
|
||||
index, tag, wire)
|
||||
break out
|
||||
|
||||
case WireBytes:
|
||||
var r []byte
|
||||
|
||||
r, err = p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
|
||||
if len(r) <= 6 {
|
||||
for i := 0; i < len(r); i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < 3; i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
fmt.Printf(" ..")
|
||||
for i := len(r) - 3; i < len(r); i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
|
||||
case WireFixed32:
|
||||
u, err = p.DecodeFixed32()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
|
||||
|
||||
case WireFixed64:
|
||||
u, err = p.DecodeFixed64()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
|
||||
|
||||
case WireVarint:
|
||||
u, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
|
||||
|
||||
case WireStartGroup:
|
||||
fmt.Printf("%3d: t=%3d start\n", index, tag)
|
||||
depth++
|
||||
|
||||
case WireEndGroup:
|
||||
depth--
|
||||
fmt.Printf("%3d: t=%3d end\n", index, tag)
|
||||
}
|
||||
}
|
||||
|
||||
if depth != 0 {
|
||||
fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
|
||||
p.buf = obuf
|
||||
p.index = index
|
||||
}
|
||||
|
||||
// SetDefaults sets unset protocol buffer fields to their default values.
|
||||
// It only modifies fields that are both unset and have defined defaults.
|
||||
// It recursively sets default values in any non-nil sub-messages.
|
||||
func SetDefaults(pb Message) {
|
||||
setDefaults(reflect.ValueOf(pb), true, false)
|
||||
}
|
||||
|
||||
// v is a pointer to a struct.
|
||||
func setDefaults(v reflect.Value, recur, zeros bool) {
|
||||
v = v.Elem()
|
||||
|
||||
defaultMu.RLock()
|
||||
dm, ok := defaults[v.Type()]
|
||||
defaultMu.RUnlock()
|
||||
if !ok {
|
||||
dm = buildDefaultMessage(v.Type())
|
||||
defaultMu.Lock()
|
||||
defaults[v.Type()] = dm
|
||||
defaultMu.Unlock()
|
||||
}
|
||||
|
||||
for _, sf := range dm.scalars {
|
||||
f := v.Field(sf.index)
|
||||
if !f.IsNil() {
|
||||
// field already set
|
||||
continue
|
||||
}
|
||||
dv := sf.value
|
||||
if dv == nil && !zeros {
|
||||
// no explicit default, and don't want to set zeros
|
||||
continue
|
||||
}
|
||||
fptr := f.Addr().Interface() // **T
|
||||
// TODO: Consider batching the allocations we do here.
|
||||
switch sf.kind {
|
||||
case reflect.Bool:
|
||||
b := new(bool)
|
||||
if dv != nil {
|
||||
*b = dv.(bool)
|
||||
}
|
||||
*(fptr.(**bool)) = b
|
||||
case reflect.Float32:
|
||||
f := new(float32)
|
||||
if dv != nil {
|
||||
*f = dv.(float32)
|
||||
}
|
||||
*(fptr.(**float32)) = f
|
||||
case reflect.Float64:
|
||||
f := new(float64)
|
||||
if dv != nil {
|
||||
*f = dv.(float64)
|
||||
}
|
||||
*(fptr.(**float64)) = f
|
||||
case reflect.Int32:
|
||||
// might be an enum
|
||||
if ft := f.Type(); ft != int32PtrType {
|
||||
// enum
|
||||
f.Set(reflect.New(ft.Elem()))
|
||||
if dv != nil {
|
||||
f.Elem().SetInt(int64(dv.(int32)))
|
||||
}
|
||||
} else {
|
||||
// int32 field
|
||||
i := new(int32)
|
||||
if dv != nil {
|
||||
*i = dv.(int32)
|
||||
}
|
||||
*(fptr.(**int32)) = i
|
||||
}
|
||||
case reflect.Int64:
|
||||
i := new(int64)
|
||||
if dv != nil {
|
||||
*i = dv.(int64)
|
||||
}
|
||||
*(fptr.(**int64)) = i
|
||||
case reflect.String:
|
||||
s := new(string)
|
||||
if dv != nil {
|
||||
*s = dv.(string)
|
||||
}
|
||||
*(fptr.(**string)) = s
|
||||
case reflect.Uint8:
|
||||
// exceptional case: []byte
|
||||
var b []byte
|
||||
if dv != nil {
|
||||
db := dv.([]byte)
|
||||
b = make([]byte, len(db))
|
||||
copy(b, db)
|
||||
} else {
|
||||
b = []byte{}
|
||||
}
|
||||
*(fptr.(*[]byte)) = b
|
||||
case reflect.Uint32:
|
||||
u := new(uint32)
|
||||
if dv != nil {
|
||||
*u = dv.(uint32)
|
||||
}
|
||||
*(fptr.(**uint32)) = u
|
||||
case reflect.Uint64:
|
||||
u := new(uint64)
|
||||
if dv != nil {
|
||||
*u = dv.(uint64)
|
||||
}
|
||||
*(fptr.(**uint64)) = u
|
||||
default:
|
||||
log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
|
||||
}
|
||||
}
|
||||
|
||||
for _, ni := range dm.nested {
|
||||
f := v.Field(ni)
|
||||
// f is *T or []*T or map[T]*T
|
||||
switch f.Kind() {
|
||||
case reflect.Ptr:
|
||||
if f.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(f, recur, zeros)
|
||||
|
||||
case reflect.Slice:
|
||||
for i := 0; i < f.Len(); i++ {
|
||||
e := f.Index(i)
|
||||
if e.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(e, recur, zeros)
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
for _, k := range f.MapKeys() {
|
||||
e := f.MapIndex(k)
|
||||
if e.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(e, recur, zeros)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// defaults maps a protocol buffer struct type to a slice of the fields,
|
||||
// with its scalar fields set to their proto-declared non-zero default values.
|
||||
defaultMu sync.RWMutex
|
||||
defaults = make(map[reflect.Type]defaultMessage)
|
||||
|
||||
int32PtrType = reflect.TypeOf((*int32)(nil))
|
||||
)
|
||||
|
||||
// defaultMessage represents information about the default values of a message.
|
||||
type defaultMessage struct {
|
||||
scalars []scalarField
|
||||
nested []int // struct field index of nested messages
|
||||
}
|
||||
|
||||
type scalarField struct {
|
||||
index int // struct field index
|
||||
kind reflect.Kind // element type (the T in *T or []T)
|
||||
value interface{} // the proto-declared default value, or nil
|
||||
}
|
||||
|
||||
// t is a struct type.
|
||||
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
|
||||
sprop := GetProperties(t)
|
||||
for _, prop := range sprop.Prop {
|
||||
fi, ok := sprop.decoderTags.get(prop.Tag)
|
||||
if !ok {
|
||||
// XXX_unrecognized
|
||||
continue
|
||||
}
|
||||
ft := t.Field(fi).Type
|
||||
|
||||
sf, nested, err := fieldDefault(ft, prop)
|
||||
switch {
|
||||
case err != nil:
|
||||
log.Print(err)
|
||||
case nested:
|
||||
dm.nested = append(dm.nested, fi)
|
||||
case sf != nil:
|
||||
sf.index = fi
|
||||
dm.scalars = append(dm.scalars, *sf)
|
||||
}
|
||||
}
|
||||
|
||||
return dm
|
||||
}
|
||||
|
||||
// fieldDefault returns the scalarField for field type ft.
|
||||
// sf will be nil if the field can not have a default.
|
||||
// nestedMessage will be true if this is a nested message.
|
||||
// Note that sf.index is not set on return.
|
||||
func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
|
||||
var canHaveDefault bool
|
||||
switch ft.Kind() {
|
||||
case reflect.Ptr:
|
||||
if ft.Elem().Kind() == reflect.Struct {
|
||||
nestedMessage = true
|
||||
} else {
|
||||
canHaveDefault = true // proto2 scalar field
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
switch ft.Elem().Kind() {
|
||||
case reflect.Ptr:
|
||||
nestedMessage = true // repeated message
|
||||
case reflect.Uint8:
|
||||
canHaveDefault = true // bytes field
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
if ft.Elem().Kind() == reflect.Ptr {
|
||||
nestedMessage = true // map with message values
|
||||
}
|
||||
}
|
||||
|
||||
if !canHaveDefault {
|
||||
if nestedMessage {
|
||||
return nil, true, nil
|
||||
}
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// We now know that ft is a pointer or slice.
|
||||
sf = &scalarField{kind: ft.Elem().Kind()}
|
||||
|
||||
// scalar fields without defaults
|
||||
if !prop.HasDefault {
|
||||
return sf, false, nil
|
||||
}
|
||||
|
||||
// a scalar field: either *T or []byte
|
||||
switch ft.Elem().Kind() {
|
||||
case reflect.Bool:
|
||||
x, err := strconv.ParseBool(prop.Default)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.Float32:
|
||||
x, err := strconv.ParseFloat(prop.Default, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = float32(x)
|
||||
case reflect.Float64:
|
||||
x, err := strconv.ParseFloat(prop.Default, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.Int32:
|
||||
x, err := strconv.ParseInt(prop.Default, 10, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = int32(x)
|
||||
case reflect.Int64:
|
||||
x, err := strconv.ParseInt(prop.Default, 10, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.String:
|
||||
sf.value = prop.Default
|
||||
case reflect.Uint8:
|
||||
// []byte (not *uint8)
|
||||
sf.value = []byte(prop.Default)
|
||||
case reflect.Uint32:
|
||||
x, err := strconv.ParseUint(prop.Default, 10, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = uint32(x)
|
||||
case reflect.Uint64:
|
||||
x, err := strconv.ParseUint(prop.Default, 10, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
default:
|
||||
return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
|
||||
}
|
||||
|
||||
return sf, false, nil
|
||||
}
|
||||
|
||||
// mapKeys returns a sort.Interface to be used for sorting the map keys.
|
||||
// Map fields may have key types of non-float scalars, strings and enums.
|
||||
func mapKeys(vs []reflect.Value) sort.Interface {
|
||||
s := mapKeySorter{vs: vs}
|
||||
|
||||
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
|
||||
if len(vs) == 0 {
|
||||
return s
|
||||
}
|
||||
switch vs[0].Kind() {
|
||||
case reflect.Int32, reflect.Int64:
|
||||
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
|
||||
case reflect.Bool:
|
||||
s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
|
||||
case reflect.String:
|
||||
s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
type mapKeySorter struct {
|
||||
vs []reflect.Value
|
||||
less func(a, b reflect.Value) bool
|
||||
}
|
||||
|
||||
func (s mapKeySorter) Len() int { return len(s.vs) }
|
||||
func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
|
||||
func (s mapKeySorter) Less(i, j int) bool {
|
||||
return s.less(s.vs[i], s.vs[j])
|
||||
}
|
||||
|
||||
// isProto3Zero reports whether v is a zero proto3 value.
|
||||
func isProto3Zero(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.String:
|
||||
return v.String() == ""
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
const ProtoPackageIsVersion2 = true
|
||||
|
||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
const ProtoPackageIsVersion1 = true
|
||||
|
||||
// InternalMessageInfo is a type used internally by generated .pb.go files.
|
||||
// This type is not intended to be used by non-generated code.
|
||||
// This type is not subject to any compatibility guarantee.
|
||||
type InternalMessageInfo struct {
|
||||
marshal *marshalInfo
|
||||
unmarshal *unmarshalInfo
|
||||
merge *mergeInfo
|
||||
discard *discardInfo
|
||||
}
|
314
vendor/github.com/golang/protobuf/proto/message_set.go
generated
vendored
314
vendor/github.com/golang/protobuf/proto/message_set.go
generated
vendored
|
@ -1,314 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Support for message sets.
|
||||
*/
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
|
||||
// A message type ID is required for storing a protocol buffer in a message set.
|
||||
var errNoMessageTypeID = errors.New("proto does not have a message type ID")
|
||||
|
||||
// The first two types (_MessageSet_Item and messageSet)
|
||||
// model what the protocol compiler produces for the following protocol message:
|
||||
// message MessageSet {
|
||||
// repeated group Item = 1 {
|
||||
// required int32 type_id = 2;
|
||||
// required string message = 3;
|
||||
// };
|
||||
// }
|
||||
// That is the MessageSet wire format. We can't use a proto to generate these
|
||||
// because that would introduce a circular dependency between it and this package.
|
||||
|
||||
type _MessageSet_Item struct {
|
||||
TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
|
||||
Message []byte `protobuf:"bytes,3,req,name=message"`
|
||||
}
|
||||
|
||||
type messageSet struct {
|
||||
Item []*_MessageSet_Item `protobuf:"group,1,rep"`
|
||||
XXX_unrecognized []byte
|
||||
// TODO: caching?
|
||||
}
|
||||
|
||||
// Make sure messageSet is a Message.
|
||||
var _ Message = (*messageSet)(nil)
|
||||
|
||||
// messageTypeIder is an interface satisfied by a protocol buffer type
|
||||
// that may be stored in a MessageSet.
|
||||
type messageTypeIder interface {
|
||||
MessageTypeId() int32
|
||||
}
|
||||
|
||||
func (ms *messageSet) find(pb Message) *_MessageSet_Item {
|
||||
mti, ok := pb.(messageTypeIder)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
id := mti.MessageTypeId()
|
||||
for _, item := range ms.Item {
|
||||
if *item.TypeId == id {
|
||||
return item
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *messageSet) Has(pb Message) bool {
|
||||
return ms.find(pb) != nil
|
||||
}
|
||||
|
||||
func (ms *messageSet) Unmarshal(pb Message) error {
|
||||
if item := ms.find(pb); item != nil {
|
||||
return Unmarshal(item.Message, pb)
|
||||
}
|
||||
if _, ok := pb.(messageTypeIder); !ok {
|
||||
return errNoMessageTypeID
|
||||
}
|
||||
return nil // TODO: return error instead?
|
||||
}
|
||||
|
||||
func (ms *messageSet) Marshal(pb Message) error {
|
||||
msg, err := Marshal(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if item := ms.find(pb); item != nil {
|
||||
// reuse existing item
|
||||
item.Message = msg
|
||||
return nil
|
||||
}
|
||||
|
||||
mti, ok := pb.(messageTypeIder)
|
||||
if !ok {
|
||||
return errNoMessageTypeID
|
||||
}
|
||||
|
||||
mtid := mti.MessageTypeId()
|
||||
ms.Item = append(ms.Item, &_MessageSet_Item{
|
||||
TypeId: &mtid,
|
||||
Message: msg,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *messageSet) Reset() { *ms = messageSet{} }
|
||||
func (ms *messageSet) String() string { return CompactTextString(ms) }
|
||||
func (*messageSet) ProtoMessage() {}
|
||||
|
||||
// Support for the message_set_wire_format message option.
|
||||
|
||||
func skipVarint(buf []byte) []byte {
|
||||
i := 0
|
||||
for ; buf[i]&0x80 != 0; i++ {
|
||||
}
|
||||
return buf[i+1:]
|
||||
}
|
||||
|
||||
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
|
||||
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func MarshalMessageSet(exts interface{}) ([]byte, error) {
|
||||
return marshalMessageSet(exts, false)
|
||||
}
|
||||
|
||||
// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
|
||||
func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
|
||||
switch exts := exts.(type) {
|
||||
case *XXX_InternalExtensions:
|
||||
var u marshalInfo
|
||||
siz := u.sizeMessageSet(exts)
|
||||
b := make([]byte, 0, siz)
|
||||
return u.appendMessageSet(b, exts, deterministic)
|
||||
|
||||
case map[int32]Extension:
|
||||
// This is an old-style extension map.
|
||||
// Wrap it in a new-style XXX_InternalExtensions.
|
||||
ie := XXX_InternalExtensions{
|
||||
p: &struct {
|
||||
mu sync.Mutex
|
||||
extensionMap map[int32]Extension
|
||||
}{
|
||||
extensionMap: exts,
|
||||
},
|
||||
}
|
||||
|
||||
var u marshalInfo
|
||||
siz := u.sizeMessageSet(&ie)
|
||||
b := make([]byte, 0, siz)
|
||||
return u.appendMessageSet(b, &ie, deterministic)
|
||||
|
||||
default:
|
||||
return nil, errors.New("proto: not an extension map")
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
||||
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func UnmarshalMessageSet(buf []byte, exts interface{}) error {
|
||||
var m map[int32]Extension
|
||||
switch exts := exts.(type) {
|
||||
case *XXX_InternalExtensions:
|
||||
m = exts.extensionsWrite()
|
||||
case map[int32]Extension:
|
||||
m = exts
|
||||
default:
|
||||
return errors.New("proto: not an extension map")
|
||||
}
|
||||
|
||||
ms := new(messageSet)
|
||||
if err := Unmarshal(buf, ms); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, item := range ms.Item {
|
||||
id := *item.TypeId
|
||||
msg := item.Message
|
||||
|
||||
// Restore wire type and field number varint, plus length varint.
|
||||
// Be careful to preserve duplicate items.
|
||||
b := EncodeVarint(uint64(id)<<3 | WireBytes)
|
||||
if ext, ok := m[id]; ok {
|
||||
// Existing data; rip off the tag and length varint
|
||||
// so we join the new data correctly.
|
||||
// We can assume that ext.enc is set because we are unmarshaling.
|
||||
o := ext.enc[len(b):] // skip wire type and field number
|
||||
_, n := DecodeVarint(o) // calculate length of length varint
|
||||
o = o[n:] // skip length varint
|
||||
msg = append(o, msg...) // join old data and new data
|
||||
}
|
||||
b = append(b, EncodeVarint(uint64(len(msg)))...)
|
||||
b = append(b, msg...)
|
||||
|
||||
m[id] = Extension{enc: b}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
|
||||
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
|
||||
var m map[int32]Extension
|
||||
switch exts := exts.(type) {
|
||||
case *XXX_InternalExtensions:
|
||||
var mu sync.Locker
|
||||
m, mu = exts.extensionsRead()
|
||||
if m != nil {
|
||||
// Keep the extensions map locked until we're done marshaling to prevent
|
||||
// races between marshaling and unmarshaling the lazily-{en,de}coded
|
||||
// values.
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
}
|
||||
case map[int32]Extension:
|
||||
m = exts
|
||||
default:
|
||||
return nil, errors.New("proto: not an extension map")
|
||||
}
|
||||
var b bytes.Buffer
|
||||
b.WriteByte('{')
|
||||
|
||||
// Process the map in key order for deterministic output.
|
||||
ids := make([]int32, 0, len(m))
|
||||
for id := range m {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
|
||||
|
||||
for i, id := range ids {
|
||||
ext := m[id]
|
||||
msd, ok := messageSetMap[id]
|
||||
if !ok {
|
||||
// Unknown type; we can't render it, so skip it.
|
||||
continue
|
||||
}
|
||||
|
||||
if i > 0 && b.Len() > 1 {
|
||||
b.WriteByte(',')
|
||||
}
|
||||
|
||||
fmt.Fprintf(&b, `"[%s]":`, msd.name)
|
||||
|
||||
x := ext.value
|
||||
if x == nil {
|
||||
x = reflect.New(msd.t.Elem()).Interface()
|
||||
if err := Unmarshal(ext.enc, x.(Message)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
d, err := json.Marshal(x)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.Write(d)
|
||||
}
|
||||
b.WriteByte('}')
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
|
||||
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
|
||||
// Common-case fast path.
|
||||
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is fairly tricky, and it's not clear that it is needed.
|
||||
return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
|
||||
}
|
||||
|
||||
// A global registry of types that can be used in a MessageSet.
|
||||
|
||||
var messageSetMap = make(map[int32]messageSetDesc)
|
||||
|
||||
type messageSetDesc struct {
|
||||
t reflect.Type // pointer to struct
|
||||
name string
|
||||
}
|
||||
|
||||
// RegisterMessageSetType is called from the generated code.
|
||||
func RegisterMessageSetType(m Message, fieldNum int32, name string) {
|
||||
messageSetMap[fieldNum] = messageSetDesc{
|
||||
t: reflect.TypeOf(m),
|
||||
name: name,
|
||||
}
|
||||
}
|
357
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
357
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
|
@ -1,357 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build purego appengine js
|
||||
|
||||
// This file contains an implementation of proto field accesses using package reflect.
|
||||
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
|
||||
// be used on App Engine.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const unsafeAllowed = false
|
||||
|
||||
// A field identifies a field in a struct, accessible from a pointer.
|
||||
// In this implementation, a field is identified by the sequence of field indices
|
||||
// passed to reflect's FieldByIndex.
|
||||
type field []int
|
||||
|
||||
// toField returns a field equivalent to the given reflect field.
|
||||
func toField(f *reflect.StructField) field {
|
||||
return f.Index
|
||||
}
|
||||
|
||||
// invalidField is an invalid field identifier.
|
||||
var invalidField = field(nil)
|
||||
|
||||
// zeroField is a noop when calling pointer.offset.
|
||||
var zeroField = field([]int{})
|
||||
|
||||
// IsValid reports whether the field identifier is valid.
|
||||
func (f field) IsValid() bool { return f != nil }
|
||||
|
||||
// The pointer type is for the table-driven decoder.
|
||||
// The implementation here uses a reflect.Value of pointer type to
|
||||
// create a generic pointer. In pointer_unsafe.go we use unsafe
|
||||
// instead of reflect to implement the same (but faster) interface.
|
||||
type pointer struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
// toPointer converts an interface of pointer type to a pointer
|
||||
// that points to the same target.
|
||||
func toPointer(i *Message) pointer {
|
||||
return pointer{v: reflect.ValueOf(*i)}
|
||||
}
|
||||
|
||||
// toAddrPointer converts an interface to a pointer that points to
|
||||
// the interface data.
|
||||
func toAddrPointer(i *interface{}, isptr bool) pointer {
|
||||
v := reflect.ValueOf(*i)
|
||||
u := reflect.New(v.Type())
|
||||
u.Elem().Set(v)
|
||||
return pointer{v: u}
|
||||
}
|
||||
|
||||
// valToPointer converts v to a pointer. v must be of pointer type.
|
||||
func valToPointer(v reflect.Value) pointer {
|
||||
return pointer{v: v}
|
||||
}
|
||||
|
||||
// offset converts from a pointer to a structure to a pointer to
|
||||
// one of its fields.
|
||||
func (p pointer) offset(f field) pointer {
|
||||
return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
|
||||
}
|
||||
|
||||
func (p pointer) isNil() bool {
|
||||
return p.v.IsNil()
|
||||
}
|
||||
|
||||
// grow updates the slice s in place to make it one element longer.
|
||||
// s must be addressable.
|
||||
// Returns the (addressable) new element.
|
||||
func grow(s reflect.Value) reflect.Value {
|
||||
n, m := s.Len(), s.Cap()
|
||||
if n < m {
|
||||
s.SetLen(n + 1)
|
||||
} else {
|
||||
s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
|
||||
}
|
||||
return s.Index(n)
|
||||
}
|
||||
|
||||
func (p pointer) toInt64() *int64 {
|
||||
return p.v.Interface().(*int64)
|
||||
}
|
||||
func (p pointer) toInt64Ptr() **int64 {
|
||||
return p.v.Interface().(**int64)
|
||||
}
|
||||
func (p pointer) toInt64Slice() *[]int64 {
|
||||
return p.v.Interface().(*[]int64)
|
||||
}
|
||||
|
||||
var int32ptr = reflect.TypeOf((*int32)(nil))
|
||||
|
||||
func (p pointer) toInt32() *int32 {
|
||||
return p.v.Convert(int32ptr).Interface().(*int32)
|
||||
}
|
||||
|
||||
// The toInt32Ptr/Slice methods don't work because of enums.
|
||||
// Instead, we must use set/get methods for the int32ptr/slice case.
|
||||
/*
|
||||
func (p pointer) toInt32Ptr() **int32 {
|
||||
return p.v.Interface().(**int32)
|
||||
}
|
||||
func (p pointer) toInt32Slice() *[]int32 {
|
||||
return p.v.Interface().(*[]int32)
|
||||
}
|
||||
*/
|
||||
func (p pointer) getInt32Ptr() *int32 {
|
||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||
// raw int32 type
|
||||
return p.v.Elem().Interface().(*int32)
|
||||
}
|
||||
// an enum
|
||||
return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
|
||||
}
|
||||
func (p pointer) setInt32Ptr(v int32) {
|
||||
// Allocate value in a *int32. Possibly convert that to a *enum.
|
||||
// Then assign it to a **int32 or **enum.
|
||||
// Note: we can convert *int32 to *enum, but we can't convert
|
||||
// **int32 to **enum!
|
||||
p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
|
||||
}
|
||||
|
||||
// getInt32Slice copies []int32 from p as a new slice.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) getInt32Slice() []int32 {
|
||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||
// raw int32 type
|
||||
return p.v.Elem().Interface().([]int32)
|
||||
}
|
||||
// an enum
|
||||
// Allocate a []int32, then assign []enum's values into it.
|
||||
// Note: we can't convert []enum to []int32.
|
||||
slice := p.v.Elem()
|
||||
s := make([]int32, slice.Len())
|
||||
for i := 0; i < slice.Len(); i++ {
|
||||
s[i] = int32(slice.Index(i).Int())
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// setInt32Slice copies []int32 into p as a new slice.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) setInt32Slice(v []int32) {
|
||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||
// raw int32 type
|
||||
p.v.Elem().Set(reflect.ValueOf(v))
|
||||
return
|
||||
}
|
||||
// an enum
|
||||
// Allocate a []enum, then assign []int32's values into it.
|
||||
// Note: we can't convert []enum to []int32.
|
||||
slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
|
||||
for i, x := range v {
|
||||
slice.Index(i).SetInt(int64(x))
|
||||
}
|
||||
p.v.Elem().Set(slice)
|
||||
}
|
||||
func (p pointer) appendInt32Slice(v int32) {
|
||||
grow(p.v.Elem()).SetInt(int64(v))
|
||||
}
|
||||
|
||||
func (p pointer) toUint64() *uint64 {
|
||||
return p.v.Interface().(*uint64)
|
||||
}
|
||||
func (p pointer) toUint64Ptr() **uint64 {
|
||||
return p.v.Interface().(**uint64)
|
||||
}
|
||||
func (p pointer) toUint64Slice() *[]uint64 {
|
||||
return p.v.Interface().(*[]uint64)
|
||||
}
|
||||
func (p pointer) toUint32() *uint32 {
|
||||
return p.v.Interface().(*uint32)
|
||||
}
|
||||
func (p pointer) toUint32Ptr() **uint32 {
|
||||
return p.v.Interface().(**uint32)
|
||||
}
|
||||
func (p pointer) toUint32Slice() *[]uint32 {
|
||||
return p.v.Interface().(*[]uint32)
|
||||
}
|
||||
func (p pointer) toBool() *bool {
|
||||
return p.v.Interface().(*bool)
|
||||
}
|
||||
func (p pointer) toBoolPtr() **bool {
|
||||
return p.v.Interface().(**bool)
|
||||
}
|
||||
func (p pointer) toBoolSlice() *[]bool {
|
||||
return p.v.Interface().(*[]bool)
|
||||
}
|
||||
func (p pointer) toFloat64() *float64 {
|
||||
return p.v.Interface().(*float64)
|
||||
}
|
||||
func (p pointer) toFloat64Ptr() **float64 {
|
||||
return p.v.Interface().(**float64)
|
||||
}
|
||||
func (p pointer) toFloat64Slice() *[]float64 {
|
||||
return p.v.Interface().(*[]float64)
|
||||
}
|
||||
func (p pointer) toFloat32() *float32 {
|
||||
return p.v.Interface().(*float32)
|
||||
}
|
||||
func (p pointer) toFloat32Ptr() **float32 {
|
||||
return p.v.Interface().(**float32)
|
||||
}
|
||||
func (p pointer) toFloat32Slice() *[]float32 {
|
||||
return p.v.Interface().(*[]float32)
|
||||
}
|
||||
func (p pointer) toString() *string {
|
||||
return p.v.Interface().(*string)
|
||||
}
|
||||
func (p pointer) toStringPtr() **string {
|
||||
return p.v.Interface().(**string)
|
||||
}
|
||||
func (p pointer) toStringSlice() *[]string {
|
||||
return p.v.Interface().(*[]string)
|
||||
}
|
||||
func (p pointer) toBytes() *[]byte {
|
||||
return p.v.Interface().(*[]byte)
|
||||
}
|
||||
func (p pointer) toBytesSlice() *[][]byte {
|
||||
return p.v.Interface().(*[][]byte)
|
||||
}
|
||||
func (p pointer) toExtensions() *XXX_InternalExtensions {
|
||||
return p.v.Interface().(*XXX_InternalExtensions)
|
||||
}
|
||||
func (p pointer) toOldExtensions() *map[int32]Extension {
|
||||
return p.v.Interface().(*map[int32]Extension)
|
||||
}
|
||||
func (p pointer) getPointer() pointer {
|
||||
return pointer{v: p.v.Elem()}
|
||||
}
|
||||
func (p pointer) setPointer(q pointer) {
|
||||
p.v.Elem().Set(q.v)
|
||||
}
|
||||
func (p pointer) appendPointer(q pointer) {
|
||||
grow(p.v.Elem()).Set(q.v)
|
||||
}
|
||||
|
||||
// getPointerSlice copies []*T from p as a new []pointer.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) getPointerSlice() []pointer {
|
||||
if p.v.IsNil() {
|
||||
return nil
|
||||
}
|
||||
n := p.v.Elem().Len()
|
||||
s := make([]pointer, n)
|
||||
for i := 0; i < n; i++ {
|
||||
s[i] = pointer{v: p.v.Elem().Index(i)}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// setPointerSlice copies []pointer into p as a new []*T.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) setPointerSlice(v []pointer) {
|
||||
if v == nil {
|
||||
p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
|
||||
return
|
||||
}
|
||||
s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
|
||||
for _, p := range v {
|
||||
s = reflect.Append(s, p.v)
|
||||
}
|
||||
p.v.Elem().Set(s)
|
||||
}
|
||||
|
||||
// getInterfacePointer returns a pointer that points to the
|
||||
// interface data of the interface pointed by p.
|
||||
func (p pointer) getInterfacePointer() pointer {
|
||||
if p.v.Elem().IsNil() {
|
||||
return pointer{v: p.v.Elem()}
|
||||
}
|
||||
return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
|
||||
}
|
||||
|
||||
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
|
||||
// TODO: check that p.v.Type().Elem() == t?
|
||||
return p.v
|
||||
}
|
||||
|
||||
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
|
||||
var atomicLock sync.Mutex
|
308
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
308
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
|
@ -1,308 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build !purego,!appengine,!js
|
||||
|
||||
// This file contains the implementation of the proto field accesses using package unsafe.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const unsafeAllowed = true
|
||||
|
||||
// A field identifies a field in a struct, accessible from a pointer.
|
||||
// In this implementation, a field is identified by its byte offset from the start of the struct.
|
||||
type field uintptr
|
||||
|
||||
// toField returns a field equivalent to the given reflect field.
|
||||
func toField(f *reflect.StructField) field {
|
||||
return field(f.Offset)
|
||||
}
|
||||
|
||||
// invalidField is an invalid field identifier.
|
||||
const invalidField = ^field(0)
|
||||
|
||||
// zeroField is a noop when calling pointer.offset.
|
||||
const zeroField = field(0)
|
||||
|
||||
// IsValid reports whether the field identifier is valid.
|
||||
func (f field) IsValid() bool {
|
||||
return f != invalidField
|
||||
}
|
||||
|
||||
// The pointer type below is for the new table-driven encoder/decoder.
|
||||
// The implementation here uses unsafe.Pointer to create a generic pointer.
|
||||
// In pointer_reflect.go we use reflect instead of unsafe to implement
|
||||
// the same (but slower) interface.
|
||||
type pointer struct {
|
||||
p unsafe.Pointer
|
||||
}
|
||||
|
||||
// size of pointer
|
||||
var ptrSize = unsafe.Sizeof(uintptr(0))
|
||||
|
||||
// toPointer converts an interface of pointer type to a pointer
|
||||
// that points to the same target.
|
||||
func toPointer(i *Message) pointer {
|
||||
// Super-tricky - read pointer out of data word of interface value.
|
||||
// Saves ~25ns over the equivalent:
|
||||
// return valToPointer(reflect.ValueOf(*i))
|
||||
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
||||
}
|
||||
|
||||
// toAddrPointer converts an interface to a pointer that points to
|
||||
// the interface data.
|
||||
func toAddrPointer(i *interface{}, isptr bool) pointer {
|
||||
// Super-tricky - read or get the address of data word of interface value.
|
||||
if isptr {
|
||||
// The interface is of pointer type, thus it is a direct interface.
|
||||
// The data word is the pointer data itself. We take its address.
|
||||
return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
|
||||
}
|
||||
// The interface is not of pointer type. The data word is the pointer
|
||||
// to the data.
|
||||
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
||||
}
|
||||
|
||||
// valToPointer converts v to a pointer. v must be of pointer type.
|
||||
func valToPointer(v reflect.Value) pointer {
|
||||
return pointer{p: unsafe.Pointer(v.Pointer())}
|
||||
}
|
||||
|
||||
// offset converts from a pointer to a structure to a pointer to
|
||||
// one of its fields.
|
||||
func (p pointer) offset(f field) pointer {
|
||||
// For safety, we should panic if !f.IsValid, however calling panic causes
|
||||
// this to no longer be inlineable, which is a serious performance cost.
|
||||
/*
|
||||
if !f.IsValid() {
|
||||
panic("invalid field")
|
||||
}
|
||||
*/
|
||||
return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
|
||||
}
|
||||
|
||||
func (p pointer) isNil() bool {
|
||||
return p.p == nil
|
||||
}
|
||||
|
||||
func (p pointer) toInt64() *int64 {
|
||||
return (*int64)(p.p)
|
||||
}
|
||||
func (p pointer) toInt64Ptr() **int64 {
|
||||
return (**int64)(p.p)
|
||||
}
|
||||
func (p pointer) toInt64Slice() *[]int64 {
|
||||
return (*[]int64)(p.p)
|
||||
}
|
||||
func (p pointer) toInt32() *int32 {
|
||||
return (*int32)(p.p)
|
||||
}
|
||||
|
||||
// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
|
||||
/*
|
||||
func (p pointer) toInt32Ptr() **int32 {
|
||||
return (**int32)(p.p)
|
||||
}
|
||||
func (p pointer) toInt32Slice() *[]int32 {
|
||||
return (*[]int32)(p.p)
|
||||
}
|
||||
*/
|
||||
func (p pointer) getInt32Ptr() *int32 {
|
||||
return *(**int32)(p.p)
|
||||
}
|
||||
func (p pointer) setInt32Ptr(v int32) {
|
||||
*(**int32)(p.p) = &v
|
||||
}
|
||||
|
||||
// getInt32Slice loads a []int32 from p.
|
||||
// The value returned is aliased with the original slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) getInt32Slice() []int32 {
|
||||
return *(*[]int32)(p.p)
|
||||
}
|
||||
|
||||
// setInt32Slice stores a []int32 to p.
|
||||
// The value set is aliased with the input slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) setInt32Slice(v []int32) {
|
||||
*(*[]int32)(p.p) = v
|
||||
}
|
||||
|
||||
// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
|
||||
func (p pointer) appendInt32Slice(v int32) {
|
||||
s := (*[]int32)(p.p)
|
||||
*s = append(*s, v)
|
||||
}
|
||||
|
||||
func (p pointer) toUint64() *uint64 {
|
||||
return (*uint64)(p.p)
|
||||
}
|
||||
func (p pointer) toUint64Ptr() **uint64 {
|
||||
return (**uint64)(p.p)
|
||||
}
|
||||
func (p pointer) toUint64Slice() *[]uint64 {
|
||||
return (*[]uint64)(p.p)
|
||||
}
|
||||
func (p pointer) toUint32() *uint32 {
|
||||
return (*uint32)(p.p)
|
||||
}
|
||||
func (p pointer) toUint32Ptr() **uint32 {
|
||||
return (**uint32)(p.p)
|
||||
}
|
||||
func (p pointer) toUint32Slice() *[]uint32 {
|
||||
return (*[]uint32)(p.p)
|
||||
}
|
||||
func (p pointer) toBool() *bool {
|
||||
return (*bool)(p.p)
|
||||
}
|
||||
func (p pointer) toBoolPtr() **bool {
|
||||
return (**bool)(p.p)
|
||||
}
|
||||
func (p pointer) toBoolSlice() *[]bool {
|
||||
return (*[]bool)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat64() *float64 {
|
||||
return (*float64)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat64Ptr() **float64 {
|
||||
return (**float64)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat64Slice() *[]float64 {
|
||||
return (*[]float64)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat32() *float32 {
|
||||
return (*float32)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat32Ptr() **float32 {
|
||||
return (**float32)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat32Slice() *[]float32 {
|
||||
return (*[]float32)(p.p)
|
||||
}
|
||||
func (p pointer) toString() *string {
|
||||
return (*string)(p.p)
|
||||
}
|
||||
func (p pointer) toStringPtr() **string {
|
||||
return (**string)(p.p)
|
||||
}
|
||||
func (p pointer) toStringSlice() *[]string {
|
||||
return (*[]string)(p.p)
|
||||
}
|
||||
func (p pointer) toBytes() *[]byte {
|
||||
return (*[]byte)(p.p)
|
||||
}
|
||||
func (p pointer) toBytesSlice() *[][]byte {
|
||||
return (*[][]byte)(p.p)
|
||||
}
|
||||
func (p pointer) toExtensions() *XXX_InternalExtensions {
|
||||
return (*XXX_InternalExtensions)(p.p)
|
||||
}
|
||||
func (p pointer) toOldExtensions() *map[int32]Extension {
|
||||
return (*map[int32]Extension)(p.p)
|
||||
}
|
||||
|
||||
// getPointerSlice loads []*T from p as a []pointer.
|
||||
// The value returned is aliased with the original slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) getPointerSlice() []pointer {
|
||||
// Super-tricky - p should point to a []*T where T is a
|
||||
// message type. We load it as []pointer.
|
||||
return *(*[]pointer)(p.p)
|
||||
}
|
||||
|
||||
// setPointerSlice stores []pointer into p as a []*T.
|
||||
// The value set is aliased with the input slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) setPointerSlice(v []pointer) {
|
||||
// Super-tricky - p should point to a []*T where T is a
|
||||
// message type. We store it as []pointer.
|
||||
*(*[]pointer)(p.p) = v
|
||||
}
|
||||
|
||||
// getPointer loads the pointer at p and returns it.
|
||||
func (p pointer) getPointer() pointer {
|
||||
return pointer{p: *(*unsafe.Pointer)(p.p)}
|
||||
}
|
||||
|
||||
// setPointer stores the pointer q at p.
|
||||
func (p pointer) setPointer(q pointer) {
|
||||
*(*unsafe.Pointer)(p.p) = q.p
|
||||
}
|
||||
|
||||
// append q to the slice pointed to by p.
|
||||
func (p pointer) appendPointer(q pointer) {
|
||||
s := (*[]unsafe.Pointer)(p.p)
|
||||
*s = append(*s, q.p)
|
||||
}
|
||||
|
||||
// getInterfacePointer returns a pointer that points to the
|
||||
// interface data of the interface pointed by p.
|
||||
func (p pointer) getInterfacePointer() pointer {
|
||||
// Super-tricky - read pointer out of data word of interface value.
|
||||
return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
|
||||
}
|
||||
|
||||
// asPointerTo returns a reflect.Value that is a pointer to an
|
||||
// object of type t stored at p.
|
||||
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
|
||||
return reflect.NewAt(t, p.p)
|
||||
}
|
||||
|
||||
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
|
||||
return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
|
||||
return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
|
||||
return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
|
||||
return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
544
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
544
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
|
@ -1,544 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for encoding data into the wire format for protocol buffers.
|
||||
*/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const debug bool = false
|
||||
|
||||
// Constants that identify the encoding of a value on the wire.
|
||||
const (
|
||||
WireVarint = 0
|
||||
WireFixed64 = 1
|
||||
WireBytes = 2
|
||||
WireStartGroup = 3
|
||||
WireEndGroup = 4
|
||||
WireFixed32 = 5
|
||||
)
|
||||
|
||||
// tagMap is an optimization over map[int]int for typical protocol buffer
|
||||
// use-cases. Encoded protocol buffers are often in tag order with small tag
|
||||
// numbers.
|
||||
type tagMap struct {
|
||||
fastTags []int
|
||||
slowTags map[int]int
|
||||
}
|
||||
|
||||
// tagMapFastLimit is the upper bound on the tag number that will be stored in
|
||||
// the tagMap slice rather than its map.
|
||||
const tagMapFastLimit = 1024
|
||||
|
||||
func (p *tagMap) get(t int) (int, bool) {
|
||||
if t > 0 && t < tagMapFastLimit {
|
||||
if t >= len(p.fastTags) {
|
||||
return 0, false
|
||||
}
|
||||
fi := p.fastTags[t]
|
||||
return fi, fi >= 0
|
||||
}
|
||||
fi, ok := p.slowTags[t]
|
||||
return fi, ok
|
||||
}
|
||||
|
||||
func (p *tagMap) put(t int, fi int) {
|
||||
if t > 0 && t < tagMapFastLimit {
|
||||
for len(p.fastTags) < t+1 {
|
||||
p.fastTags = append(p.fastTags, -1)
|
||||
}
|
||||
p.fastTags[t] = fi
|
||||
return
|
||||
}
|
||||
if p.slowTags == nil {
|
||||
p.slowTags = make(map[int]int)
|
||||
}
|
||||
p.slowTags[t] = fi
|
||||
}
|
||||
|
||||
// StructProperties represents properties for all the fields of a struct.
|
||||
// decoderTags and decoderOrigNames should only be used by the decoder.
|
||||
type StructProperties struct {
|
||||
Prop []*Properties // properties for each field
|
||||
reqCount int // required count
|
||||
decoderTags tagMap // map from proto tag to struct field number
|
||||
decoderOrigNames map[string]int // map from original name to struct field number
|
||||
order []int // list of struct field numbers in tag order
|
||||
|
||||
// OneofTypes contains information about the oneof fields in this message.
|
||||
// It is keyed by the original name of a field.
|
||||
OneofTypes map[string]*OneofProperties
|
||||
}
|
||||
|
||||
// OneofProperties represents information about a specific field in a oneof.
|
||||
type OneofProperties struct {
|
||||
Type reflect.Type // pointer to generated struct type for this oneof field
|
||||
Field int // struct field number of the containing oneof in the message
|
||||
Prop *Properties
|
||||
}
|
||||
|
||||
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
|
||||
// See encode.go, (*Buffer).enc_struct.
|
||||
|
||||
func (sp *StructProperties) Len() int { return len(sp.order) }
|
||||
func (sp *StructProperties) Less(i, j int) bool {
|
||||
return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
|
||||
}
|
||||
func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
|
||||
|
||||
// Properties represents the protocol-specific behavior of a single struct field.
|
||||
type Properties struct {
|
||||
Name string // name of the field, for error messages
|
||||
OrigName string // original name before protocol compiler (always set)
|
||||
JSONName string // name to use for JSON; determined by protoc
|
||||
Wire string
|
||||
WireType int
|
||||
Tag int
|
||||
Required bool
|
||||
Optional bool
|
||||
Repeated bool
|
||||
Packed bool // relevant for repeated primitives only
|
||||
Enum string // set for enum types only
|
||||
proto3 bool // whether this is known to be a proto3 field
|
||||
oneof bool // whether this is a oneof field
|
||||
|
||||
Default string // default value
|
||||
HasDefault bool // whether an explicit default was provided
|
||||
|
||||
stype reflect.Type // set for struct types only
|
||||
sprop *StructProperties // set for struct types only
|
||||
|
||||
mtype reflect.Type // set for map types only
|
||||
MapKeyProp *Properties // set for map types only
|
||||
MapValProp *Properties // set for map types only
|
||||
}
|
||||
|
||||
// String formats the properties in the protobuf struct field tag style.
|
||||
func (p *Properties) String() string {
|
||||
s := p.Wire
|
||||
s += ","
|
||||
s += strconv.Itoa(p.Tag)
|
||||
if p.Required {
|
||||
s += ",req"
|
||||
}
|
||||
if p.Optional {
|
||||
s += ",opt"
|
||||
}
|
||||
if p.Repeated {
|
||||
s += ",rep"
|
||||
}
|
||||
if p.Packed {
|
||||
s += ",packed"
|
||||
}
|
||||
s += ",name=" + p.OrigName
|
||||
if p.JSONName != p.OrigName {
|
||||
s += ",json=" + p.JSONName
|
||||
}
|
||||
if p.proto3 {
|
||||
s += ",proto3"
|
||||
}
|
||||
if p.oneof {
|
||||
s += ",oneof"
|
||||
}
|
||||
if len(p.Enum) > 0 {
|
||||
s += ",enum=" + p.Enum
|
||||
}
|
||||
if p.HasDefault {
|
||||
s += ",def=" + p.Default
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Parse populates p by parsing a string in the protobuf struct field tag style.
|
||||
func (p *Properties) Parse(s string) {
|
||||
// "bytes,49,opt,name=foo,def=hello!"
|
||||
fields := strings.Split(s, ",") // breaks def=, but handled below.
|
||||
if len(fields) < 2 {
|
||||
fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
|
||||
return
|
||||
}
|
||||
|
||||
p.Wire = fields[0]
|
||||
switch p.Wire {
|
||||
case "varint":
|
||||
p.WireType = WireVarint
|
||||
case "fixed32":
|
||||
p.WireType = WireFixed32
|
||||
case "fixed64":
|
||||
p.WireType = WireFixed64
|
||||
case "zigzag32":
|
||||
p.WireType = WireVarint
|
||||
case "zigzag64":
|
||||
p.WireType = WireVarint
|
||||
case "bytes", "group":
|
||||
p.WireType = WireBytes
|
||||
// no numeric converter for non-numeric types
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
p.Tag, err = strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
outer:
|
||||
for i := 2; i < len(fields); i++ {
|
||||
f := fields[i]
|
||||
switch {
|
||||
case f == "req":
|
||||
p.Required = true
|
||||
case f == "opt":
|
||||
p.Optional = true
|
||||
case f == "rep":
|
||||
p.Repeated = true
|
||||
case f == "packed":
|
||||
p.Packed = true
|
||||
case strings.HasPrefix(f, "name="):
|
||||
p.OrigName = f[5:]
|
||||
case strings.HasPrefix(f, "json="):
|
||||
p.JSONName = f[5:]
|
||||
case strings.HasPrefix(f, "enum="):
|
||||
p.Enum = f[5:]
|
||||
case f == "proto3":
|
||||
p.proto3 = true
|
||||
case f == "oneof":
|
||||
p.oneof = true
|
||||
case strings.HasPrefix(f, "def="):
|
||||
p.HasDefault = true
|
||||
p.Default = f[4:] // rest of string
|
||||
if i+1 < len(fields) {
|
||||
// Commas aren't escaped, and def is always last.
|
||||
p.Default += "," + strings.Join(fields[i+1:], ",")
|
||||
break outer
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
|
||||
|
||||
// setFieldProps initializes the field properties for submessages and maps.
|
||||
func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
|
||||
switch t1 := typ; t1.Kind() {
|
||||
case reflect.Ptr:
|
||||
if t1.Elem().Kind() == reflect.Struct {
|
||||
p.stype = t1.Elem()
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
|
||||
p.stype = t2.Elem()
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
p.mtype = t1
|
||||
p.MapKeyProp = &Properties{}
|
||||
p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
|
||||
p.MapValProp = &Properties{}
|
||||
vtype := p.mtype.Elem()
|
||||
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
|
||||
// The value type is not a message (*T) or bytes ([]byte),
|
||||
// so we need encoders for the pointer to this type.
|
||||
vtype = reflect.PtrTo(vtype)
|
||||
}
|
||||
p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
|
||||
}
|
||||
|
||||
if p.stype != nil {
|
||||
if lockGetProp {
|
||||
p.sprop = GetProperties(p.stype)
|
||||
} else {
|
||||
p.sprop = getPropertiesLocked(p.stype)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||
)
|
||||
|
||||
// Init populates the properties from a protocol buffer struct tag.
|
||||
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
|
||||
p.init(typ, name, tag, f, true)
|
||||
}
|
||||
|
||||
func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
|
||||
// "bytes,49,opt,def=hello!"
|
||||
p.Name = name
|
||||
p.OrigName = name
|
||||
if tag == "" {
|
||||
return
|
||||
}
|
||||
p.Parse(tag)
|
||||
p.setFieldProps(typ, f, lockGetProp)
|
||||
}
|
||||
|
||||
var (
|
||||
propertiesMu sync.RWMutex
|
||||
propertiesMap = make(map[reflect.Type]*StructProperties)
|
||||
)
|
||||
|
||||
// GetProperties returns the list of properties for the type represented by t.
|
||||
// t must represent a generated struct type of a protocol message.
|
||||
func GetProperties(t reflect.Type) *StructProperties {
|
||||
if t.Kind() != reflect.Struct {
|
||||
panic("proto: type must have kind struct")
|
||||
}
|
||||
|
||||
// Most calls to GetProperties in a long-running program will be
|
||||
// retrieving details for types we have seen before.
|
||||
propertiesMu.RLock()
|
||||
sprop, ok := propertiesMap[t]
|
||||
propertiesMu.RUnlock()
|
||||
if ok {
|
||||
if collectStats {
|
||||
stats.Chit++
|
||||
}
|
||||
return sprop
|
||||
}
|
||||
|
||||
propertiesMu.Lock()
|
||||
sprop = getPropertiesLocked(t)
|
||||
propertiesMu.Unlock()
|
||||
return sprop
|
||||
}
|
||||
|
||||
// getPropertiesLocked requires that propertiesMu is held.
|
||||
func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||
if prop, ok := propertiesMap[t]; ok {
|
||||
if collectStats {
|
||||
stats.Chit++
|
||||
}
|
||||
return prop
|
||||
}
|
||||
if collectStats {
|
||||
stats.Cmiss++
|
||||
}
|
||||
|
||||
prop := new(StructProperties)
|
||||
// in case of recursive protos, fill this in now.
|
||||
propertiesMap[t] = prop
|
||||
|
||||
// build properties
|
||||
prop.Prop = make([]*Properties, t.NumField())
|
||||
prop.order = make([]int, t.NumField())
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
p := new(Properties)
|
||||
name := f.Name
|
||||
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
|
||||
|
||||
oneof := f.Tag.Get("protobuf_oneof") // special case
|
||||
if oneof != "" {
|
||||
// Oneof fields don't use the traditional protobuf tag.
|
||||
p.OrigName = oneof
|
||||
}
|
||||
prop.Prop[i] = p
|
||||
prop.order[i] = i
|
||||
if debug {
|
||||
print(i, " ", f.Name, " ", t.String(), " ")
|
||||
if p.Tag > 0 {
|
||||
print(p.String())
|
||||
}
|
||||
print("\n")
|
||||
}
|
||||
}
|
||||
|
||||
// Re-order prop.order.
|
||||
sort.Sort(prop)
|
||||
|
||||
type oneofMessage interface {
|
||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
||||
}
|
||||
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
|
||||
var oots []interface{}
|
||||
_, _, _, oots = om.XXX_OneofFuncs()
|
||||
|
||||
// Interpret oneof metadata.
|
||||
prop.OneofTypes = make(map[string]*OneofProperties)
|
||||
for _, oot := range oots {
|
||||
oop := &OneofProperties{
|
||||
Type: reflect.ValueOf(oot).Type(), // *T
|
||||
Prop: new(Properties),
|
||||
}
|
||||
sft := oop.Type.Elem().Field(0)
|
||||
oop.Prop.Name = sft.Name
|
||||
oop.Prop.Parse(sft.Tag.Get("protobuf"))
|
||||
// There will be exactly one interface field that
|
||||
// this new value is assignable to.
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if f.Type.Kind() != reflect.Interface {
|
||||
continue
|
||||
}
|
||||
if !oop.Type.AssignableTo(f.Type) {
|
||||
continue
|
||||
}
|
||||
oop.Field = i
|
||||
break
|
||||
}
|
||||
prop.OneofTypes[oop.Prop.OrigName] = oop
|
||||
}
|
||||
}
|
||||
|
||||
// build required counts
|
||||
// build tags
|
||||
reqCount := 0
|
||||
prop.decoderOrigNames = make(map[string]int)
|
||||
for i, p := range prop.Prop {
|
||||
if strings.HasPrefix(p.Name, "XXX_") {
|
||||
// Internal fields should not appear in tags/origNames maps.
|
||||
// They are handled specially when encoding and decoding.
|
||||
continue
|
||||
}
|
||||
if p.Required {
|
||||
reqCount++
|
||||
}
|
||||
prop.decoderTags.put(p.Tag, i)
|
||||
prop.decoderOrigNames[p.OrigName] = i
|
||||
}
|
||||
prop.reqCount = reqCount
|
||||
|
||||
return prop
|
||||
}
|
||||
|
||||
// A global registry of enum types.
|
||||
// The generated code will register the generated maps by calling RegisterEnum.
|
||||
|
||||
var enumValueMaps = make(map[string]map[string]int32)
|
||||
|
||||
// RegisterEnum is called from the generated code to install the enum descriptor
|
||||
// maps into the global table to aid parsing text format protocol buffers.
|
||||
func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
|
||||
if _, ok := enumValueMaps[typeName]; ok {
|
||||
panic("proto: duplicate enum registered: " + typeName)
|
||||
}
|
||||
enumValueMaps[typeName] = valueMap
|
||||
}
|
||||
|
||||
// EnumValueMap returns the mapping from names to integers of the
|
||||
// enum type enumType, or a nil if not found.
|
||||
func EnumValueMap(enumType string) map[string]int32 {
|
||||
return enumValueMaps[enumType]
|
||||
}
|
||||
|
||||
// A registry of all linked message types.
|
||||
// The string is a fully-qualified proto name ("pkg.Message").
|
||||
var (
|
||||
protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
|
||||
protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
|
||||
revProtoTypes = make(map[reflect.Type]string)
|
||||
)
|
||||
|
||||
// RegisterType is called from generated code and maps from the fully qualified
|
||||
// proto name to the type (pointer to struct) of the protocol buffer.
|
||||
func RegisterType(x Message, name string) {
|
||||
if _, ok := protoTypedNils[name]; ok {
|
||||
// TODO: Some day, make this a panic.
|
||||
log.Printf("proto: duplicate proto type registered: %s", name)
|
||||
return
|
||||
}
|
||||
t := reflect.TypeOf(x)
|
||||
if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
|
||||
// Generated code always calls RegisterType with nil x.
|
||||
// This check is just for extra safety.
|
||||
protoTypedNils[name] = x
|
||||
} else {
|
||||
protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
|
||||
}
|
||||
revProtoTypes[t] = name
|
||||
}
|
||||
|
||||
// RegisterMapType is called from generated code and maps from the fully qualified
|
||||
// proto name to the native map type of the proto map definition.
|
||||
func RegisterMapType(x interface{}, name string) {
|
||||
if reflect.TypeOf(x).Kind() != reflect.Map {
|
||||
panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
|
||||
}
|
||||
if _, ok := protoMapTypes[name]; ok {
|
||||
log.Printf("proto: duplicate proto type registered: %s", name)
|
||||
return
|
||||
}
|
||||
t := reflect.TypeOf(x)
|
||||
protoMapTypes[name] = t
|
||||
revProtoTypes[t] = name
|
||||
}
|
||||
|
||||
// MessageName returns the fully-qualified proto name for the given message type.
|
||||
func MessageName(x Message) string {
|
||||
type xname interface {
|
||||
XXX_MessageName() string
|
||||
}
|
||||
if m, ok := x.(xname); ok {
|
||||
return m.XXX_MessageName()
|
||||
}
|
||||
return revProtoTypes[reflect.TypeOf(x)]
|
||||
}
|
||||
|
||||
// MessageType returns the message type (pointer to struct) for a named message.
|
||||
// The type is not guaranteed to implement proto.Message if the name refers to a
|
||||
// map entry.
|
||||
func MessageType(name string) reflect.Type {
|
||||
if t, ok := protoTypedNils[name]; ok {
|
||||
return reflect.TypeOf(t)
|
||||
}
|
||||
return protoMapTypes[name]
|
||||
}
|
||||
|
||||
// A registry of all linked proto files.
|
||||
var (
|
||||
protoFiles = make(map[string][]byte) // file name => fileDescriptor
|
||||
)
|
||||
|
||||
// RegisterFile is called from generated code and maps from the
|
||||
// full file name of a .proto file to its compressed FileDescriptorProto.
|
||||
func RegisterFile(filename string, fileDescriptor []byte) {
|
||||
protoFiles[filename] = fileDescriptor
|
||||
}
|
||||
|
||||
// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
|
||||
func FileDescriptor(filename string) []byte { return protoFiles[filename] }
|
2767
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
2767
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
File diff suppressed because it is too large
Load diff
654
vendor/github.com/golang/protobuf/proto/table_merge.go
generated
vendored
654
vendor/github.com/golang/protobuf/proto/table_merge.go
generated
vendored
|
@ -1,654 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Merge merges the src message into dst.
|
||||
// This assumes that dst and src of the same type and are non-nil.
|
||||
func (a *InternalMessageInfo) Merge(dst, src Message) {
|
||||
mi := atomicLoadMergeInfo(&a.merge)
|
||||
if mi == nil {
|
||||
mi = getMergeInfo(reflect.TypeOf(dst).Elem())
|
||||
atomicStoreMergeInfo(&a.merge, mi)
|
||||
}
|
||||
mi.merge(toPointer(&dst), toPointer(&src))
|
||||
}
|
||||
|
||||
type mergeInfo struct {
|
||||
typ reflect.Type
|
||||
|
||||
initialized int32 // 0: only typ is valid, 1: everything is valid
|
||||
lock sync.Mutex
|
||||
|
||||
fields []mergeFieldInfo
|
||||
unrecognized field // Offset of XXX_unrecognized
|
||||
}
|
||||
|
||||
type mergeFieldInfo struct {
|
||||
field field // Offset of field, guaranteed to be valid
|
||||
|
||||
// isPointer reports whether the value in the field is a pointer.
|
||||
// This is true for the following situations:
|
||||
// * Pointer to struct
|
||||
// * Pointer to basic type (proto2 only)
|
||||
// * Slice (first value in slice header is a pointer)
|
||||
// * String (first value in string header is a pointer)
|
||||
isPointer bool
|
||||
|
||||
// basicWidth reports the width of the field assuming that it is directly
|
||||
// embedded in the struct (as is the case for basic types in proto3).
|
||||
// The possible values are:
|
||||
// 0: invalid
|
||||
// 1: bool
|
||||
// 4: int32, uint32, float32
|
||||
// 8: int64, uint64, float64
|
||||
basicWidth int
|
||||
|
||||
// Where dst and src are pointers to the types being merged.
|
||||
merge func(dst, src pointer)
|
||||
}
|
||||
|
||||
var (
|
||||
mergeInfoMap = map[reflect.Type]*mergeInfo{}
|
||||
mergeInfoLock sync.Mutex
|
||||
)
|
||||
|
||||
func getMergeInfo(t reflect.Type) *mergeInfo {
|
||||
mergeInfoLock.Lock()
|
||||
defer mergeInfoLock.Unlock()
|
||||
mi := mergeInfoMap[t]
|
||||
if mi == nil {
|
||||
mi = &mergeInfo{typ: t}
|
||||
mergeInfoMap[t] = mi
|
||||
}
|
||||
return mi
|
||||
}
|
||||
|
||||
// merge merges src into dst assuming they are both of type *mi.typ.
|
||||
func (mi *mergeInfo) merge(dst, src pointer) {
|
||||
if dst.isNil() {
|
||||
panic("proto: nil destination")
|
||||
}
|
||||
if src.isNil() {
|
||||
return // Nothing to do.
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&mi.initialized) == 0 {
|
||||
mi.computeMergeInfo()
|
||||
}
|
||||
|
||||
for _, fi := range mi.fields {
|
||||
sfp := src.offset(fi.field)
|
||||
|
||||
// As an optimization, we can avoid the merge function call cost
|
||||
// if we know for sure that the source will have no effect
|
||||
// by checking if it is the zero value.
|
||||
if unsafeAllowed {
|
||||
if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
|
||||
continue
|
||||
}
|
||||
if fi.basicWidth > 0 {
|
||||
switch {
|
||||
case fi.basicWidth == 1 && !*sfp.toBool():
|
||||
continue
|
||||
case fi.basicWidth == 4 && *sfp.toUint32() == 0:
|
||||
continue
|
||||
case fi.basicWidth == 8 && *sfp.toUint64() == 0:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dfp := dst.offset(fi.field)
|
||||
fi.merge(dfp, sfp)
|
||||
}
|
||||
|
||||
// TODO: Make this faster?
|
||||
out := dst.asPointerTo(mi.typ).Elem()
|
||||
in := src.asPointerTo(mi.typ).Elem()
|
||||
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
||||
emOut, _ := extendable(out.Addr().Interface())
|
||||
mIn, muIn := emIn.extensionsRead()
|
||||
if mIn != nil {
|
||||
mOut := emOut.extensionsWrite()
|
||||
muIn.Lock()
|
||||
mergeExtension(mOut, mIn)
|
||||
muIn.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
if mi.unrecognized.IsValid() {
|
||||
if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
|
||||
*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mi *mergeInfo) computeMergeInfo() {
|
||||
mi.lock.Lock()
|
||||
defer mi.lock.Unlock()
|
||||
if mi.initialized != 0 {
|
||||
return
|
||||
}
|
||||
t := mi.typ
|
||||
n := t.NumField()
|
||||
|
||||
props := GetProperties(t)
|
||||
for i := 0; i < n; i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
|
||||
mfi := mergeFieldInfo{field: toField(&f)}
|
||||
tf := f.Type
|
||||
|
||||
// As an optimization, we can avoid the merge function call cost
|
||||
// if we know for sure that the source will have no effect
|
||||
// by checking if it is the zero value.
|
||||
if unsafeAllowed {
|
||||
switch tf.Kind() {
|
||||
case reflect.Ptr, reflect.Slice, reflect.String:
|
||||
// As a special case, we assume slices and strings are pointers
|
||||
// since we know that the first field in the SliceSlice or
|
||||
// StringHeader is a data pointer.
|
||||
mfi.isPointer = true
|
||||
case reflect.Bool:
|
||||
mfi.basicWidth = 1
|
||||
case reflect.Int32, reflect.Uint32, reflect.Float32:
|
||||
mfi.basicWidth = 4
|
||||
case reflect.Int64, reflect.Uint64, reflect.Float64:
|
||||
mfi.basicWidth = 8
|
||||
}
|
||||
}
|
||||
|
||||
// Unwrap tf to get at its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic("both pointer and slice for basic type in " + tf.Name())
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Int32:
|
||||
switch {
|
||||
case isSlice: // E.g., []int32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
|
||||
/*
|
||||
sfsp := src.toInt32Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toInt32Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []int64{}
|
||||
}
|
||||
}
|
||||
*/
|
||||
sfs := src.getInt32Slice()
|
||||
if sfs != nil {
|
||||
dfs := dst.getInt32Slice()
|
||||
dfs = append(dfs, sfs...)
|
||||
if dfs == nil {
|
||||
dfs = []int32{}
|
||||
}
|
||||
dst.setInt32Slice(dfs)
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *int32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
|
||||
/*
|
||||
sfpp := src.toInt32Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toInt32Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Int32(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
*/
|
||||
sfp := src.getInt32Ptr()
|
||||
if sfp != nil {
|
||||
dfp := dst.getInt32Ptr()
|
||||
if dfp == nil {
|
||||
dst.setInt32Ptr(*sfp)
|
||||
} else {
|
||||
*dfp = *sfp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., int32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toInt32(); v != 0 {
|
||||
*dst.toInt32() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Int64:
|
||||
switch {
|
||||
case isSlice: // E.g., []int64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toInt64Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toInt64Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []int64{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *int64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toInt64Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toInt64Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Int64(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., int64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toInt64(); v != 0 {
|
||||
*dst.toInt64() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Uint32:
|
||||
switch {
|
||||
case isSlice: // E.g., []uint32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toUint32Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toUint32Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []uint32{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *uint32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toUint32Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toUint32Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Uint32(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., uint32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toUint32(); v != 0 {
|
||||
*dst.toUint32() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Uint64:
|
||||
switch {
|
||||
case isSlice: // E.g., []uint64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toUint64Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toUint64Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []uint64{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *uint64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toUint64Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toUint64Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Uint64(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., uint64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toUint64(); v != 0 {
|
||||
*dst.toUint64() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Float32:
|
||||
switch {
|
||||
case isSlice: // E.g., []float32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toFloat32Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toFloat32Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []float32{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *float32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toFloat32Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toFloat32Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Float32(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., float32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toFloat32(); v != 0 {
|
||||
*dst.toFloat32() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Float64:
|
||||
switch {
|
||||
case isSlice: // E.g., []float64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toFloat64Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toFloat64Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []float64{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *float64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toFloat64Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toFloat64Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Float64(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., float64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toFloat64(); v != 0 {
|
||||
*dst.toFloat64() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Bool:
|
||||
switch {
|
||||
case isSlice: // E.g., []bool
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toBoolSlice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toBoolSlice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []bool{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *bool
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toBoolPtr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toBoolPtr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Bool(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., bool
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toBool(); v {
|
||||
*dst.toBool() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.String:
|
||||
switch {
|
||||
case isSlice: // E.g., []string
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toStringSlice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toStringSlice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []string{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *string
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toStringPtr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toStringPtr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = String(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., string
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toString(); v != "" {
|
||||
*dst.toString() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
isProto3 := props.Prop[i].proto3
|
||||
switch {
|
||||
case isPointer:
|
||||
panic("bad pointer in byte slice case in " + tf.Name())
|
||||
case tf.Elem().Kind() != reflect.Uint8:
|
||||
panic("bad element kind in byte slice case in " + tf.Name())
|
||||
case isSlice: // E.g., [][]byte
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sbsp := src.toBytesSlice()
|
||||
if *sbsp != nil {
|
||||
dbsp := dst.toBytesSlice()
|
||||
for _, sb := range *sbsp {
|
||||
if sb == nil {
|
||||
*dbsp = append(*dbsp, nil)
|
||||
} else {
|
||||
*dbsp = append(*dbsp, append([]byte{}, sb...))
|
||||
}
|
||||
}
|
||||
if *dbsp == nil {
|
||||
*dbsp = [][]byte{}
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., []byte
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sbp := src.toBytes()
|
||||
if *sbp != nil {
|
||||
dbp := dst.toBytes()
|
||||
if !isProto3 || len(*sbp) > 0 {
|
||||
*dbp = append([]byte{}, *sbp...)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("message field %s without pointer", tf))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
mi := getMergeInfo(tf)
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sps := src.getPointerSlice()
|
||||
if sps != nil {
|
||||
dps := dst.getPointerSlice()
|
||||
for _, sp := range sps {
|
||||
var dp pointer
|
||||
if !sp.isNil() {
|
||||
dp = valToPointer(reflect.New(tf))
|
||||
mi.merge(dp, sp)
|
||||
}
|
||||
dps = append(dps, dp)
|
||||
}
|
||||
if dps == nil {
|
||||
dps = []pointer{}
|
||||
}
|
||||
dst.setPointerSlice(dps)
|
||||
}
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
mi := getMergeInfo(tf)
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sp := src.getPointer()
|
||||
if !sp.isNil() {
|
||||
dp := dst.getPointer()
|
||||
if dp.isNil() {
|
||||
dp = valToPointer(reflect.New(tf))
|
||||
dst.setPointer(dp)
|
||||
}
|
||||
mi.merge(dp, sp)
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic("bad pointer or slice in map case in " + tf.Name())
|
||||
default: // E.g., map[K]V
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sm := src.asPointerTo(tf).Elem()
|
||||
if sm.Len() == 0 {
|
||||
return
|
||||
}
|
||||
dm := dst.asPointerTo(tf).Elem()
|
||||
if dm.IsNil() {
|
||||
dm.Set(reflect.MakeMap(tf))
|
||||
}
|
||||
|
||||
switch tf.Elem().Kind() {
|
||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
val = reflect.ValueOf(Clone(val.Interface().(Message)))
|
||||
dm.SetMapIndex(key, val)
|
||||
}
|
||||
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
||||
dm.SetMapIndex(key, val)
|
||||
}
|
||||
default: // Basic type (e.g., string)
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
dm.SetMapIndex(key, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic("bad pointer or slice in interface case in " + tf.Name())
|
||||
default: // E.g., interface{}
|
||||
// TODO: Make this faster?
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
su := src.asPointerTo(tf).Elem()
|
||||
if !su.IsNil() {
|
||||
du := dst.asPointerTo(tf).Elem()
|
||||
typ := su.Elem().Type()
|
||||
if du.IsNil() || du.Elem().Type() != typ {
|
||||
du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
|
||||
}
|
||||
sv := su.Elem().Elem().Field(0)
|
||||
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
||||
return
|
||||
}
|
||||
dv := du.Elem().Elem().Field(0)
|
||||
if dv.Kind() == reflect.Ptr && dv.IsNil() {
|
||||
dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
|
||||
}
|
||||
switch sv.Type().Kind() {
|
||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||
Merge(dv.Interface().(Message), sv.Interface().(Message))
|
||||
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
|
||||
dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
|
||||
default: // Basic type (e.g., string)
|
||||
dv.Set(sv)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("merger not found for type:%s", tf))
|
||||
}
|
||||
mi.fields = append(mi.fields, mfi)
|
||||
}
|
||||
|
||||
mi.unrecognized = invalidField
|
||||
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
|
||||
if f.Type != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
mi.unrecognized = toField(&f)
|
||||
}
|
||||
|
||||
atomic.StoreInt32(&mi.initialized, 1)
|
||||
}
|
2051
vendor/github.com/golang/protobuf/proto/table_unmarshal.go
generated
vendored
2051
vendor/github.com/golang/protobuf/proto/table_unmarshal.go
generated
vendored
File diff suppressed because it is too large
Load diff
843
vendor/github.com/golang/protobuf/proto/text.go
generated
vendored
843
vendor/github.com/golang/protobuf/proto/text.go
generated
vendored
|
@ -1,843 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
// Functions for writing the text protocol buffer format.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
newline = []byte("\n")
|
||||
spaces = []byte(" ")
|
||||
endBraceNewline = []byte("}\n")
|
||||
backslashN = []byte{'\\', 'n'}
|
||||
backslashR = []byte{'\\', 'r'}
|
||||
backslashT = []byte{'\\', 't'}
|
||||
backslashDQ = []byte{'\\', '"'}
|
||||
backslashBS = []byte{'\\', '\\'}
|
||||
posInf = []byte("inf")
|
||||
negInf = []byte("-inf")
|
||||
nan = []byte("nan")
|
||||
)
|
||||
|
||||
type writer interface {
|
||||
io.Writer
|
||||
WriteByte(byte) error
|
||||
}
|
||||
|
||||
// textWriter is an io.Writer that tracks its indentation level.
|
||||
type textWriter struct {
|
||||
ind int
|
||||
complete bool // if the current position is a complete line
|
||||
compact bool // whether to write out as a one-liner
|
||||
w writer
|
||||
}
|
||||
|
||||
func (w *textWriter) WriteString(s string) (n int, err error) {
|
||||
if !strings.Contains(s, "\n") {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.complete = false
|
||||
return io.WriteString(w.w, s)
|
||||
}
|
||||
// WriteString is typically called without newlines, so this
|
||||
// codepath and its copy are rare. We copy to avoid
|
||||
// duplicating all of Write's logic here.
|
||||
return w.Write([]byte(s))
|
||||
}
|
||||
|
||||
func (w *textWriter) Write(p []byte) (n int, err error) {
|
||||
newlines := bytes.Count(p, newline)
|
||||
if newlines == 0 {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
n, err = w.w.Write(p)
|
||||
w.complete = false
|
||||
return n, err
|
||||
}
|
||||
|
||||
frags := bytes.SplitN(p, newline, newlines+1)
|
||||
if w.compact {
|
||||
for i, frag := range frags {
|
||||
if i > 0 {
|
||||
if err := w.w.WriteByte(' '); err != nil {
|
||||
return n, err
|
||||
}
|
||||
n++
|
||||
}
|
||||
nn, err := w.w.Write(frag)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
for i, frag := range frags {
|
||||
if w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
nn, err := w.w.Write(frag)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
if i+1 < len(frags) {
|
||||
if err := w.w.WriteByte('\n'); err != nil {
|
||||
return n, err
|
||||
}
|
||||
n++
|
||||
}
|
||||
}
|
||||
w.complete = len(frags[len(frags)-1]) == 0
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (w *textWriter) WriteByte(c byte) error {
|
||||
if w.compact && c == '\n' {
|
||||
c = ' '
|
||||
}
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
err := w.w.WriteByte(c)
|
||||
w.complete = c == '\n'
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *textWriter) indent() { w.ind++ }
|
||||
|
||||
func (w *textWriter) unindent() {
|
||||
if w.ind == 0 {
|
||||
log.Print("proto: textWriter unindented too far")
|
||||
return
|
||||
}
|
||||
w.ind--
|
||||
}
|
||||
|
||||
func writeName(w *textWriter, props *Properties) error {
|
||||
if _, err := w.WriteString(props.OrigName); err != nil {
|
||||
return err
|
||||
}
|
||||
if props.Wire != "group" {
|
||||
return w.WriteByte(':')
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func requiresQuotes(u string) bool {
|
||||
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
||||
for _, ch := range u {
|
||||
switch {
|
||||
case ch == '.' || ch == '/' || ch == '_':
|
||||
continue
|
||||
case '0' <= ch && ch <= '9':
|
||||
continue
|
||||
case 'A' <= ch && ch <= 'Z':
|
||||
continue
|
||||
case 'a' <= ch && ch <= 'z':
|
||||
continue
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isAny reports whether sv is a google.protobuf.Any message
|
||||
func isAny(sv reflect.Value) bool {
|
||||
type wkt interface {
|
||||
XXX_WellKnownType() string
|
||||
}
|
||||
t, ok := sv.Addr().Interface().(wkt)
|
||||
return ok && t.XXX_WellKnownType() == "Any"
|
||||
}
|
||||
|
||||
// writeProto3Any writes an expanded google.protobuf.Any message.
|
||||
//
|
||||
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
|
||||
// required messages are not linked in).
|
||||
//
|
||||
// It returns (true, error) when sv was written in expanded format or an error
|
||||
// was encountered.
|
||||
func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
|
||||
turl := sv.FieldByName("TypeUrl")
|
||||
val := sv.FieldByName("Value")
|
||||
if !turl.IsValid() || !val.IsValid() {
|
||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
||||
}
|
||||
|
||||
b, ok := val.Interface().([]byte)
|
||||
if !ok {
|
||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
||||
}
|
||||
|
||||
parts := strings.Split(turl.String(), "/")
|
||||
mt := MessageType(parts[len(parts)-1])
|
||||
if mt == nil {
|
||||
return false, nil
|
||||
}
|
||||
m := reflect.New(mt.Elem())
|
||||
if err := Unmarshal(b, m.Interface().(Message)); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
w.Write([]byte("["))
|
||||
u := turl.String()
|
||||
if requiresQuotes(u) {
|
||||
writeString(w, u)
|
||||
} else {
|
||||
w.Write([]byte(u))
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("]:<"))
|
||||
} else {
|
||||
w.Write([]byte("]: <\n"))
|
||||
w.ind++
|
||||
}
|
||||
if err := tm.writeStruct(w, m.Elem()); err != nil {
|
||||
return true, err
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("> "))
|
||||
} else {
|
||||
w.ind--
|
||||
w.Write([]byte(">\n"))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
if tm.ExpandAny && isAny(sv) {
|
||||
if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
|
||||
return err
|
||||
}
|
||||
}
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
for i := 0; i < sv.NumField(); i++ {
|
||||
fv := sv.Field(i)
|
||||
props := sprops.Prop[i]
|
||||
name := st.Field(i).Name
|
||||
|
||||
if name == "XXX_NoUnkeyedLiteral" {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(name, "XXX_") {
|
||||
// There are two XXX_ fields:
|
||||
// XXX_unrecognized []byte
|
||||
// XXX_extensions map[int32]proto.Extension
|
||||
// The first is handled here;
|
||||
// the second is handled at the bottom of this function.
|
||||
if name == "XXX_unrecognized" && !fv.IsNil() {
|
||||
if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Ptr && fv.IsNil() {
|
||||
// Field not filled in. This could be an optional field or
|
||||
// a required field that wasn't filled in. Either way, there
|
||||
// isn't anything we can show for it.
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Slice && fv.IsNil() {
|
||||
// Repeated field that is empty, or a bytes field that is unused.
|
||||
continue
|
||||
}
|
||||
|
||||
if props.Repeated && fv.Kind() == reflect.Slice {
|
||||
// Repeated field.
|
||||
for j := 0; j < fv.Len(); j++ {
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
v := fv.Index(j)
|
||||
if v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
// A nil message in a repeated field is not valid,
|
||||
// but we can handle that more gracefully than panicking.
|
||||
if _, err := w.Write([]byte("<nil>\n")); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err := tm.writeAny(w, v, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Map {
|
||||
// Map fields are rendered as a repeated struct with key/value fields.
|
||||
keys := fv.MapKeys()
|
||||
sort.Sort(mapKeys(keys))
|
||||
for _, key := range keys {
|
||||
val := fv.MapIndex(key)
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// open struct
|
||||
if err := w.WriteByte('<'); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.indent()
|
||||
// key
|
||||
if _, err := w.WriteString("key:"); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
// nil values aren't legal, but we can avoid panicking because of them.
|
||||
if val.Kind() != reflect.Ptr || !val.IsNil() {
|
||||
// value
|
||||
if _, err := w.WriteString("value:"); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, val, props.MapValProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// close struct
|
||||
w.unindent()
|
||||
if err := w.WriteByte('>'); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
|
||||
// empty bytes field
|
||||
continue
|
||||
}
|
||||
if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
|
||||
// proto3 non-repeated scalar field; skip if zero value
|
||||
if isProto3Zero(fv) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if fv.Kind() == reflect.Interface {
|
||||
// Check if it is a oneof.
|
||||
if st.Field(i).Tag.Get("protobuf_oneof") != "" {
|
||||
// fv is nil, or holds a pointer to generated struct.
|
||||
// That generated struct has exactly one field,
|
||||
// which has a protobuf struct tag.
|
||||
if fv.IsNil() {
|
||||
continue
|
||||
}
|
||||
inner := fv.Elem().Elem() // interface -> *T -> T
|
||||
tag := inner.Type().Field(0).Tag.Get("protobuf")
|
||||
props = new(Properties) // Overwrite the outer props var, but not its pointee.
|
||||
props.Parse(tag)
|
||||
// Write the value in the oneof, not the oneof itself.
|
||||
fv = inner.Field(0)
|
||||
|
||||
// Special case to cope with malformed messages gracefully:
|
||||
// If the value in the oneof is a nil pointer, don't panic
|
||||
// in writeAny.
|
||||
if fv.Kind() == reflect.Ptr && fv.IsNil() {
|
||||
// Use errors.New so writeAny won't render quotes.
|
||||
msg := errors.New("/* nil */")
|
||||
fv = reflect.ValueOf(&msg).Elem()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Enums have a String method, so writeAny will work fine.
|
||||
if err := tm.writeAny(w, fv, props); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Extensions (the XXX_extensions field).
|
||||
pv := sv.Addr()
|
||||
if _, err := extendable(pv.Interface()); err == nil {
|
||||
if err := tm.writeExtensions(w, pv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeAny writes an arbitrary field.
|
||||
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
||||
v = reflect.Indirect(v)
|
||||
|
||||
// Floats have special cases.
|
||||
if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
|
||||
x := v.Float()
|
||||
var b []byte
|
||||
switch {
|
||||
case math.IsInf(x, 1):
|
||||
b = posInf
|
||||
case math.IsInf(x, -1):
|
||||
b = negInf
|
||||
case math.IsNaN(x):
|
||||
b = nan
|
||||
}
|
||||
if b != nil {
|
||||
_, err := w.Write(b)
|
||||
return err
|
||||
}
|
||||
// Other values are handled below.
|
||||
}
|
||||
|
||||
// We don't attempt to serialise every possible value type; only those
|
||||
// that can occur in protocol buffers.
|
||||
switch v.Kind() {
|
||||
case reflect.Slice:
|
||||
// Should only be a []byte; repeated fields are handled in writeStruct.
|
||||
if err := writeString(w, string(v.Bytes())); err != nil {
|
||||
return err
|
||||
}
|
||||
case reflect.String:
|
||||
if err := writeString(w, v.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
case reflect.Struct:
|
||||
// Required/optional group/message.
|
||||
var bra, ket byte = '<', '>'
|
||||
if props != nil && props.Wire == "group" {
|
||||
bra, ket = '{', '}'
|
||||
}
|
||||
if err := w.WriteByte(bra); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.indent()
|
||||
if v.CanAddr() {
|
||||
// Calling v.Interface on a struct causes the reflect package to
|
||||
// copy the entire struct. This is racy with the new Marshaler
|
||||
// since we atomically update the XXX_sizecache.
|
||||
//
|
||||
// Thus, we retrieve a pointer to the struct if possible to avoid
|
||||
// a race since v.Interface on the pointer doesn't copy the struct.
|
||||
//
|
||||
// If v is not addressable, then we are not worried about a race
|
||||
// since it implies that the binary Marshaler cannot possibly be
|
||||
// mutating this value.
|
||||
v = v.Addr()
|
||||
}
|
||||
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
||||
text, err := etm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = w.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
if err := tm.writeStruct(w, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.unindent()
|
||||
if err := w.WriteByte(ket); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
_, err := fmt.Fprint(w, v.Interface())
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// equivalent to C's isprint.
|
||||
func isprint(c byte) bool {
|
||||
return c >= 0x20 && c < 0x7f
|
||||
}
|
||||
|
||||
// writeString writes a string in the protocol buffer text format.
|
||||
// It is similar to strconv.Quote except we don't use Go escape sequences,
|
||||
// we treat the string as a byte sequence, and we use octal escapes.
|
||||
// These differences are to maintain interoperability with the other
|
||||
// languages' implementations of the text format.
|
||||
func writeString(w *textWriter, s string) error {
|
||||
// use WriteByte here to get any needed indent
|
||||
if err := w.WriteByte('"'); err != nil {
|
||||
return err
|
||||
}
|
||||
// Loop over the bytes, not the runes.
|
||||
for i := 0; i < len(s); i++ {
|
||||
var err error
|
||||
// Divergence from C++: we don't escape apostrophes.
|
||||
// There's no need to escape them, and the C++ parser
|
||||
// copes with a naked apostrophe.
|
||||
switch c := s[i]; c {
|
||||
case '\n':
|
||||
_, err = w.w.Write(backslashN)
|
||||
case '\r':
|
||||
_, err = w.w.Write(backslashR)
|
||||
case '\t':
|
||||
_, err = w.w.Write(backslashT)
|
||||
case '"':
|
||||
_, err = w.w.Write(backslashDQ)
|
||||
case '\\':
|
||||
_, err = w.w.Write(backslashBS)
|
||||
default:
|
||||
if isprint(c) {
|
||||
err = w.w.WriteByte(c)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w.w, "\\%03o", c)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return w.WriteByte('"')
|
||||
}
|
||||
|
||||
func writeUnknownStruct(w *textWriter, data []byte) (err error) {
|
||||
if !w.compact {
|
||||
if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
b := NewBuffer(data)
|
||||
for b.index < len(b.buf) {
|
||||
x, err := b.DecodeVarint()
|
||||
if err != nil {
|
||||
_, err := fmt.Fprintf(w, "/* %v */\n", err)
|
||||
return err
|
||||
}
|
||||
wire, tag := x&7, x>>3
|
||||
if wire == WireEndGroup {
|
||||
w.unindent()
|
||||
if _, err := w.Write(endBraceNewline); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if _, err := fmt.Fprint(w, tag); err != nil {
|
||||
return err
|
||||
}
|
||||
if wire != WireStartGroup {
|
||||
if err := w.WriteByte(':'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !w.compact || wire == WireStartGroup {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
switch wire {
|
||||
case WireBytes:
|
||||
buf, e := b.DecodeRawBytes(false)
|
||||
if e == nil {
|
||||
_, err = fmt.Fprintf(w, "%q", buf)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w, "/* %v */", e)
|
||||
}
|
||||
case WireFixed32:
|
||||
x, err = b.DecodeFixed32()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
case WireFixed64:
|
||||
x, err = b.DecodeFixed64()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
case WireStartGroup:
|
||||
err = w.WriteByte('{')
|
||||
w.indent()
|
||||
case WireVarint:
|
||||
x, err = b.DecodeVarint()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
default:
|
||||
_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeUnknownInt(w *textWriter, x uint64, err error) error {
|
||||
if err == nil {
|
||||
_, err = fmt.Fprint(w, x)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w, "/* %v */", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type int32Slice []int32
|
||||
|
||||
func (s int32Slice) Len() int { return len(s) }
|
||||
func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// writeExtensions writes all the extensions in pv.
|
||||
// pv is assumed to be a pointer to a protocol message struct that is extendable.
|
||||
func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
|
||||
emap := extensionMaps[pv.Type().Elem()]
|
||||
ep, _ := extendable(pv.Interface())
|
||||
|
||||
// Order the extensions by ID.
|
||||
// This isn't strictly necessary, but it will give us
|
||||
// canonical output, which will also make testing easier.
|
||||
m, mu := ep.extensionsRead()
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
mu.Lock()
|
||||
ids := make([]int32, 0, len(m))
|
||||
for id := range m {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
sort.Sort(int32Slice(ids))
|
||||
mu.Unlock()
|
||||
|
||||
for _, extNum := range ids {
|
||||
ext := m[extNum]
|
||||
var desc *ExtensionDesc
|
||||
if emap != nil {
|
||||
desc = emap[extNum]
|
||||
}
|
||||
if desc == nil {
|
||||
// Unknown extension.
|
||||
if err := writeUnknownStruct(w, ext.enc); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
pb, err := GetExtension(ep, desc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed getting extension: %v", err)
|
||||
}
|
||||
|
||||
// Repeated extensions will appear as a slice.
|
||||
if !desc.repeated() {
|
||||
if err := tm.writeExtension(w, desc.Name, pb); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
v := reflect.ValueOf(pb)
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
|
||||
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeIndent() {
|
||||
if !w.complete {
|
||||
return
|
||||
}
|
||||
remain := w.ind * 2
|
||||
for remain > 0 {
|
||||
n := remain
|
||||
if n > len(spaces) {
|
||||
n = len(spaces)
|
||||
}
|
||||
w.w.Write(spaces[:n])
|
||||
remain -= n
|
||||
}
|
||||
w.complete = false
|
||||
}
|
||||
|
||||
// TextMarshaler is a configurable text format marshaler.
|
||||
type TextMarshaler struct {
|
||||
Compact bool // use compact text format (one line).
|
||||
ExpandAny bool // expand google.protobuf.Any messages of known types
|
||||
}
|
||||
|
||||
// Marshal writes a given protocol buffer in text format.
|
||||
// The only errors returned are from w.
|
||||
func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
||||
val := reflect.ValueOf(pb)
|
||||
if pb == nil || val.IsNil() {
|
||||
w.Write([]byte("<nil>"))
|
||||
return nil
|
||||
}
|
||||
var bw *bufio.Writer
|
||||
ww, ok := w.(writer)
|
||||
if !ok {
|
||||
bw = bufio.NewWriter(w)
|
||||
ww = bw
|
||||
}
|
||||
aw := &textWriter{
|
||||
w: ww,
|
||||
complete: true,
|
||||
compact: tm.Compact,
|
||||
}
|
||||
|
||||
if etm, ok := pb.(encoding.TextMarshaler); ok {
|
||||
text, err := etm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = aw.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
if bw != nil {
|
||||
return bw.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Dereference the received pointer so we don't have outer < and >.
|
||||
v := reflect.Indirect(val)
|
||||
if err := tm.writeStruct(aw, v); err != nil {
|
||||
return err
|
||||
}
|
||||
if bw != nil {
|
||||
return bw.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Text is the same as Marshal, but returns the string directly.
|
||||
func (tm *TextMarshaler) Text(pb Message) string {
|
||||
var buf bytes.Buffer
|
||||
tm.Marshal(&buf, pb)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
var (
|
||||
defaultTextMarshaler = TextMarshaler{}
|
||||
compactTextMarshaler = TextMarshaler{Compact: true}
|
||||
)
|
||||
|
||||
// TODO: consider removing some of the Marshal functions below.
|
||||
|
||||
// MarshalText writes a given protocol buffer in text format.
|
||||
// The only errors returned are from w.
|
||||
func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
|
||||
|
||||
// MarshalTextString is the same as MarshalText, but returns the string directly.
|
||||
func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
|
||||
|
||||
// CompactText writes a given protocol buffer in compact text format (one line).
|
||||
func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
|
||||
|
||||
// CompactTextString is the same as CompactText, but returns the string directly.
|
||||
func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
|
880
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
880
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
|
@ -1,880 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
// Functions for parsing the Text protocol buffer format.
|
||||
// TODO: message sets.
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Error string emitted when deserializing Any and fields are already set
|
||||
const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
|
||||
|
||||
type ParseError struct {
|
||||
Message string
|
||||
Line int // 1-based line number
|
||||
Offset int // 0-based byte offset from start of input
|
||||
}
|
||||
|
||||
func (p *ParseError) Error() string {
|
||||
if p.Line == 1 {
|
||||
// show offset only for first line
|
||||
return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
|
||||
}
|
||||
return fmt.Sprintf("line %d: %v", p.Line, p.Message)
|
||||
}
|
||||
|
||||
type token struct {
|
||||
value string
|
||||
err *ParseError
|
||||
line int // line number
|
||||
offset int // byte number from start of input, not start of line
|
||||
unquoted string // the unquoted version of value, if it was a quoted string
|
||||
}
|
||||
|
||||
func (t *token) String() string {
|
||||
if t.err == nil {
|
||||
return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
|
||||
}
|
||||
return fmt.Sprintf("parse error: %v", t.err)
|
||||
}
|
||||
|
||||
type textParser struct {
|
||||
s string // remaining input
|
||||
done bool // whether the parsing is finished (success or error)
|
||||
backed bool // whether back() was called
|
||||
offset, line int
|
||||
cur token
|
||||
}
|
||||
|
||||
func newTextParser(s string) *textParser {
|
||||
p := new(textParser)
|
||||
p.s = s
|
||||
p.line = 1
|
||||
p.cur.line = 1
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
|
||||
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
|
||||
p.cur.err = pe
|
||||
p.done = true
|
||||
return pe
|
||||
}
|
||||
|
||||
// Numbers and identifiers are matched by [-+._A-Za-z0-9]
|
||||
func isIdentOrNumberChar(c byte) bool {
|
||||
switch {
|
||||
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
|
||||
return true
|
||||
case '0' <= c && c <= '9':
|
||||
return true
|
||||
}
|
||||
switch c {
|
||||
case '-', '+', '.', '_':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isWhitespace(c byte) bool {
|
||||
switch c {
|
||||
case ' ', '\t', '\n', '\r':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isQuote(c byte) bool {
|
||||
switch c {
|
||||
case '"', '\'':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *textParser) skipWhitespace() {
|
||||
i := 0
|
||||
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
|
||||
if p.s[i] == '#' {
|
||||
// comment; skip to end of line or input
|
||||
for i < len(p.s) && p.s[i] != '\n' {
|
||||
i++
|
||||
}
|
||||
if i == len(p.s) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if p.s[i] == '\n' {
|
||||
p.line++
|
||||
}
|
||||
i++
|
||||
}
|
||||
p.offset += i
|
||||
p.s = p.s[i:len(p.s)]
|
||||
if len(p.s) == 0 {
|
||||
p.done = true
|
||||
}
|
||||
}
|
||||
|
||||
func (p *textParser) advance() {
|
||||
// Skip whitespace
|
||||
p.skipWhitespace()
|
||||
if p.done {
|
||||
return
|
||||
}
|
||||
|
||||
// Start of non-whitespace
|
||||
p.cur.err = nil
|
||||
p.cur.offset, p.cur.line = p.offset, p.line
|
||||
p.cur.unquoted = ""
|
||||
switch p.s[0] {
|
||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
|
||||
// Single symbol
|
||||
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
||||
case '"', '\'':
|
||||
// Quoted string
|
||||
i := 1
|
||||
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
|
||||
if p.s[i] == '\\' && i+1 < len(p.s) {
|
||||
// skip escaped char
|
||||
i++
|
||||
}
|
||||
i++
|
||||
}
|
||||
if i >= len(p.s) || p.s[i] != p.s[0] {
|
||||
p.errorf("unmatched quote")
|
||||
return
|
||||
}
|
||||
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
|
||||
if err != nil {
|
||||
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
|
||||
p.cur.unquoted = unq
|
||||
default:
|
||||
i := 0
|
||||
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
|
||||
i++
|
||||
}
|
||||
if i == 0 {
|
||||
p.errorf("unexpected byte %#x", p.s[0])
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
|
||||
}
|
||||
p.offset += len(p.cur.value)
|
||||
}
|
||||
|
||||
var (
|
||||
errBadUTF8 = errors.New("proto: bad UTF-8")
|
||||
)
|
||||
|
||||
func unquoteC(s string, quote rune) (string, error) {
|
||||
// This is based on C++'s tokenizer.cc.
|
||||
// Despite its name, this is *not* parsing C syntax.
|
||||
// For instance, "\0" is an invalid quoted string.
|
||||
|
||||
// Avoid allocation in trivial cases.
|
||||
simple := true
|
||||
for _, r := range s {
|
||||
if r == '\\' || r == quote {
|
||||
simple = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if simple {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
buf := make([]byte, 0, 3*len(s)/2)
|
||||
for len(s) > 0 {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
if r != '\\' {
|
||||
if r < utf8.RuneSelf {
|
||||
buf = append(buf, byte(r))
|
||||
} else {
|
||||
buf = append(buf, string(r)...)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
ch, tail, err := unescape(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buf = append(buf, ch...)
|
||||
s = tail
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
func unescape(s string) (ch string, tail string, err error) {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
switch r {
|
||||
case 'a':
|
||||
return "\a", s, nil
|
||||
case 'b':
|
||||
return "\b", s, nil
|
||||
case 'f':
|
||||
return "\f", s, nil
|
||||
case 'n':
|
||||
return "\n", s, nil
|
||||
case 'r':
|
||||
return "\r", s, nil
|
||||
case 't':
|
||||
return "\t", s, nil
|
||||
case 'v':
|
||||
return "\v", s, nil
|
||||
case '?':
|
||||
return "?", s, nil // trigraph workaround
|
||||
case '\'', '"', '\\':
|
||||
return string(r), s, nil
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
if len(s) < 2 {
|
||||
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
||||
}
|
||||
ss := string(r) + s[:2]
|
||||
s = s[2:]
|
||||
i, err := strconv.ParseUint(ss, 8, 8)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
|
||||
}
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
case 'x', 'X', 'u', 'U':
|
||||
var n int
|
||||
switch r {
|
||||
case 'x', 'X':
|
||||
n = 2
|
||||
case 'u':
|
||||
n = 4
|
||||
case 'U':
|
||||
n = 8
|
||||
}
|
||||
if len(s) < n {
|
||||
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
|
||||
}
|
||||
ss := s[:n]
|
||||
s = s[n:]
|
||||
i, err := strconv.ParseUint(ss, 16, 64)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
|
||||
}
|
||||
if r == 'x' || r == 'X' {
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
}
|
||||
if i > utf8.MaxRune {
|
||||
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
|
||||
}
|
||||
return string(i), s, nil
|
||||
}
|
||||
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
||||
}
|
||||
|
||||
// Back off the parser by one token. Can only be done between calls to next().
|
||||
// It makes the next advance() a no-op.
|
||||
func (p *textParser) back() { p.backed = true }
|
||||
|
||||
// Advances the parser and returns the new current token.
|
||||
func (p *textParser) next() *token {
|
||||
if p.backed || p.done {
|
||||
p.backed = false
|
||||
return &p.cur
|
||||
}
|
||||
p.advance()
|
||||
if p.done {
|
||||
p.cur.value = ""
|
||||
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
|
||||
// Look for multiple quoted strings separated by whitespace,
|
||||
// and concatenate them.
|
||||
cat := p.cur
|
||||
for {
|
||||
p.skipWhitespace()
|
||||
if p.done || !isQuote(p.s[0]) {
|
||||
break
|
||||
}
|
||||
p.advance()
|
||||
if p.cur.err != nil {
|
||||
return &p.cur
|
||||
}
|
||||
cat.value += " " + p.cur.value
|
||||
cat.unquoted += p.cur.unquoted
|
||||
}
|
||||
p.done = false // parser may have seen EOF, but we want to return cat
|
||||
p.cur = cat
|
||||
}
|
||||
return &p.cur
|
||||
}
|
||||
|
||||
func (p *textParser) consumeToken(s string) error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != s {
|
||||
p.back()
|
||||
return p.errorf("expected %q, found %q", s, tok.value)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return a RequiredNotSetError indicating which required field was not set.
|
||||
func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
for i := 0; i < st.NumField(); i++ {
|
||||
if !isNil(sv.Field(i)) {
|
||||
continue
|
||||
}
|
||||
|
||||
props := sprops.Prop[i]
|
||||
if props.Required {
|
||||
return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
|
||||
}
|
||||
}
|
||||
return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
|
||||
}
|
||||
|
||||
// Returns the index in the struct for the named field, as well as the parsed tag properties.
|
||||
func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
|
||||
i, ok := sprops.decoderOrigNames[name]
|
||||
if ok {
|
||||
return i, sprops.Prop[i], true
|
||||
}
|
||||
return -1, nil, false
|
||||
}
|
||||
|
||||
// Consume a ':' from the input stream (if the next token is a colon),
|
||||
// returning an error if a colon is needed but not present.
|
||||
func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ":" {
|
||||
// Colon is optional when the field is a group or message.
|
||||
needColon := true
|
||||
switch props.Wire {
|
||||
case "group":
|
||||
needColon = false
|
||||
case "bytes":
|
||||
// A "bytes" field is either a message, a string, or a repeated field;
|
||||
// those three become *T, *string and []T respectively, so we can check for
|
||||
// this field being a pointer to a non-string.
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
// *T or *string
|
||||
if typ.Elem().Kind() == reflect.String {
|
||||
break
|
||||
}
|
||||
} else if typ.Kind() == reflect.Slice {
|
||||
// []T or []*T
|
||||
if typ.Elem().Kind() != reflect.Ptr {
|
||||
break
|
||||
}
|
||||
} else if typ.Kind() == reflect.String {
|
||||
// The proto3 exception is for a string field,
|
||||
// which requires a colon.
|
||||
break
|
||||
}
|
||||
needColon = false
|
||||
}
|
||||
if needColon {
|
||||
return p.errorf("expected ':', found %q", tok.value)
|
||||
}
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
reqCount := sprops.reqCount
|
||||
var reqFieldErr error
|
||||
fieldSet := make(map[string]bool)
|
||||
// A struct is a sequence of "name: value", terminated by one of
|
||||
// '>' or '}', or the end of the input. A name may also be
|
||||
// "[extension]" or "[type/url]".
|
||||
//
|
||||
// The whole struct can also be an expanded Any message, like:
|
||||
// [type/url] < ... struct contents ... >
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
if tok.value == "[" {
|
||||
// Looks like an extension or an Any.
|
||||
//
|
||||
// TODO: Check whether we need to handle
|
||||
// namespace rooted names (e.g. ".something.Foo").
|
||||
extName, err := p.consumeExtName()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s := strings.LastIndex(extName, "/"); s >= 0 {
|
||||
// If it contains a slash, it's an Any type URL.
|
||||
messageName := extName[s+1:]
|
||||
mt := MessageType(messageName)
|
||||
if mt == nil {
|
||||
return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
|
||||
}
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
// consume an optional colon
|
||||
if tok.value == ":" {
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
}
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
v := reflect.New(mt.Elem())
|
||||
if pe := p.readStruct(v.Elem(), terminator); pe != nil {
|
||||
return pe
|
||||
}
|
||||
b, err := Marshal(v.Interface().(Message))
|
||||
if err != nil {
|
||||
return p.errorf("failed to marshal message of type %q: %v", messageName, err)
|
||||
}
|
||||
if fieldSet["type_url"] {
|
||||
return p.errorf(anyRepeatedlyUnpacked, "type_url")
|
||||
}
|
||||
if fieldSet["value"] {
|
||||
return p.errorf(anyRepeatedlyUnpacked, "value")
|
||||
}
|
||||
sv.FieldByName("TypeUrl").SetString(extName)
|
||||
sv.FieldByName("Value").SetBytes(b)
|
||||
fieldSet["type_url"] = true
|
||||
fieldSet["value"] = true
|
||||
continue
|
||||
}
|
||||
|
||||
var desc *ExtensionDesc
|
||||
// This could be faster, but it's functional.
|
||||
// TODO: Do something smarter than a linear scan.
|
||||
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
|
||||
if d.Name == extName {
|
||||
desc = d
|
||||
break
|
||||
}
|
||||
}
|
||||
if desc == nil {
|
||||
return p.errorf("unrecognized extension %q", extName)
|
||||
}
|
||||
|
||||
props := &Properties{}
|
||||
props.Parse(desc.Tag)
|
||||
|
||||
typ := reflect.TypeOf(desc.ExtensionType)
|
||||
if err := p.checkForColon(props, typ); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rep := desc.repeated()
|
||||
|
||||
// Read the extension structure, and set it in
|
||||
// the value we're constructing.
|
||||
var ext reflect.Value
|
||||
if !rep {
|
||||
ext = reflect.New(typ).Elem()
|
||||
} else {
|
||||
ext = reflect.New(typ.Elem()).Elem()
|
||||
}
|
||||
if err := p.readAny(ext, props); err != nil {
|
||||
if _, ok := err.(*RequiredNotSetError); !ok {
|
||||
return err
|
||||
}
|
||||
reqFieldErr = err
|
||||
}
|
||||
ep := sv.Addr().Interface().(Message)
|
||||
if !rep {
|
||||
SetExtension(ep, desc, ext.Interface())
|
||||
} else {
|
||||
old, err := GetExtension(ep, desc)
|
||||
var sl reflect.Value
|
||||
if err == nil {
|
||||
sl = reflect.ValueOf(old) // existing slice
|
||||
} else {
|
||||
sl = reflect.MakeSlice(typ, 0, 1)
|
||||
}
|
||||
sl = reflect.Append(sl, ext)
|
||||
SetExtension(ep, desc, sl.Interface())
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// This is a normal, non-extension field.
|
||||
name := tok.value
|
||||
var dst reflect.Value
|
||||
fi, props, ok := structFieldByName(sprops, name)
|
||||
if ok {
|
||||
dst = sv.Field(fi)
|
||||
} else if oop, ok := sprops.OneofTypes[name]; ok {
|
||||
// It is a oneof.
|
||||
props = oop.Prop
|
||||
nv := reflect.New(oop.Type.Elem())
|
||||
dst = nv.Elem().Field(0)
|
||||
field := sv.Field(oop.Field)
|
||||
if !field.IsNil() {
|
||||
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
|
||||
}
|
||||
field.Set(nv)
|
||||
}
|
||||
if !dst.IsValid() {
|
||||
return p.errorf("unknown field name %q in %v", name, st)
|
||||
}
|
||||
|
||||
if dst.Kind() == reflect.Map {
|
||||
// Consume any colon.
|
||||
if err := p.checkForColon(props, dst.Type()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Construct the map if it doesn't already exist.
|
||||
if dst.IsNil() {
|
||||
dst.Set(reflect.MakeMap(dst.Type()))
|
||||
}
|
||||
key := reflect.New(dst.Type().Key()).Elem()
|
||||
val := reflect.New(dst.Type().Elem()).Elem()
|
||||
|
||||
// The map entry should be this sequence of tokens:
|
||||
// < key : KEY value : VALUE >
|
||||
// However, implementations may omit key or value, and technically
|
||||
// we should support them in any order. See b/28924776 for a time
|
||||
// this went wrong.
|
||||
|
||||
tok := p.next()
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
switch tok.value {
|
||||
case "key":
|
||||
if err := p.consumeToken(":"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.readAny(key, props.MapKeyProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
case "value":
|
||||
if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.readAny(val, props.MapValProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
p.back()
|
||||
return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
|
||||
}
|
||||
}
|
||||
|
||||
dst.SetMapIndex(key, val)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check that it's not already set if it's not a repeated field.
|
||||
if !props.Repeated && fieldSet[name] {
|
||||
return p.errorf("non-repeated field %q was repeated", name)
|
||||
}
|
||||
|
||||
if err := p.checkForColon(props, dst.Type()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse into the field.
|
||||
fieldSet[name] = true
|
||||
if err := p.readAny(dst, props); err != nil {
|
||||
if _, ok := err.(*RequiredNotSetError); !ok {
|
||||
return err
|
||||
}
|
||||
reqFieldErr = err
|
||||
}
|
||||
if props.Required {
|
||||
reqCount--
|
||||
}
|
||||
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if reqCount > 0 {
|
||||
return p.missingRequiredFieldError(sv)
|
||||
}
|
||||
return reqFieldErr
|
||||
}
|
||||
|
||||
// consumeExtName consumes extension name or expanded Any type URL and the
|
||||
// following ']'. It returns the name or URL consumed.
|
||||
func (p *textParser) consumeExtName() (string, error) {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return "", tok.err
|
||||
}
|
||||
|
||||
// If extension name or type url is quoted, it's a single token.
|
||||
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
|
||||
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return name, p.consumeToken("]")
|
||||
}
|
||||
|
||||
// Consume everything up to "]"
|
||||
var parts []string
|
||||
for tok.value != "]" {
|
||||
parts = append(parts, tok.value)
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
||||
}
|
||||
if p.done && tok.value != "]" {
|
||||
return "", p.errorf("unclosed type_url or extension name")
|
||||
}
|
||||
}
|
||||
return strings.Join(parts, ""), nil
|
||||
}
|
||||
|
||||
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
||||
// It is used in readStruct to provide backward compatibility.
|
||||
func (p *textParser) consumeOptionalSeparator() error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ";" && tok.value != "," {
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == "" {
|
||||
return p.errorf("unexpected EOF")
|
||||
}
|
||||
|
||||
switch fv := v; fv.Kind() {
|
||||
case reflect.Slice:
|
||||
at := v.Type()
|
||||
if at.Elem().Kind() == reflect.Uint8 {
|
||||
// Special case for []byte
|
||||
if tok.value[0] != '"' && tok.value[0] != '\'' {
|
||||
// Deliberately written out here, as the error after
|
||||
// this switch statement would write "invalid []byte: ...",
|
||||
// which is not as user-friendly.
|
||||
return p.errorf("invalid string: %v", tok.value)
|
||||
}
|
||||
bytes := []byte(tok.unquoted)
|
||||
fv.Set(reflect.ValueOf(bytes))
|
||||
return nil
|
||||
}
|
||||
// Repeated field.
|
||||
if tok.value == "[" {
|
||||
// Repeated field with list notation, like [1,2,3].
|
||||
for {
|
||||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
||||
err := p.readAny(fv.Index(fv.Len()-1), props)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == "]" {
|
||||
break
|
||||
}
|
||||
if tok.value != "," {
|
||||
return p.errorf("Expected ']' or ',' found %q", tok.value)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// One value of the repeated field.
|
||||
p.back()
|
||||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
||||
return p.readAny(fv.Index(fv.Len()-1), props)
|
||||
case reflect.Bool:
|
||||
// true/1/t/True or false/f/0/False.
|
||||
switch tok.value {
|
||||
case "true", "1", "t", "True":
|
||||
fv.SetBool(true)
|
||||
return nil
|
||||
case "false", "0", "f", "False":
|
||||
fv.SetBool(false)
|
||||
return nil
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
v := tok.value
|
||||
// Ignore 'f' for compatibility with output generated by C++, but don't
|
||||
// remove 'f' when the value is "-inf" or "inf".
|
||||
if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
|
||||
v = v[:len(v)-1]
|
||||
}
|
||||
if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
|
||||
fv.SetFloat(f)
|
||||
return nil
|
||||
}
|
||||
case reflect.Int32:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
||||
fv.SetInt(x)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(props.Enum) == 0 {
|
||||
break
|
||||
}
|
||||
m, ok := enumValueMaps[props.Enum]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
x, ok := m[tok.value]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
fv.SetInt(int64(x))
|
||||
return nil
|
||||
case reflect.Int64:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
|
||||
fv.SetInt(x)
|
||||
return nil
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// A basic field (indirected through pointer), or a repeated message/group
|
||||
p.back()
|
||||
fv.Set(reflect.New(fv.Type().Elem()))
|
||||
return p.readAny(fv.Elem(), props)
|
||||
case reflect.String:
|
||||
if tok.value[0] == '"' || tok.value[0] == '\'' {
|
||||
fv.SetString(tok.unquoted)
|
||||
return nil
|
||||
}
|
||||
case reflect.Struct:
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "{":
|
||||
terminator = "}"
|
||||
case "<":
|
||||
terminator = ">"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
|
||||
return p.readStruct(fv, terminator)
|
||||
case reflect.Uint32:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||
fv.SetUint(uint64(x))
|
||||
return nil
|
||||
}
|
||||
case reflect.Uint64:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
||||
fv.SetUint(x)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return p.errorf("invalid %v: %v", v.Type(), tok.value)
|
||||
}
|
||||
|
||||
// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
|
||||
// before starting to unmarshal, so any existing data in pb is always removed.
|
||||
// If a required field is not set and no other error occurs,
|
||||
// UnmarshalText returns *RequiredNotSetError.
|
||||
func UnmarshalText(s string, pb Message) error {
|
||||
if um, ok := pb.(encoding.TextUnmarshaler); ok {
|
||||
return um.UnmarshalText([]byte(s))
|
||||
}
|
||||
pb.Reset()
|
||||
v := reflect.ValueOf(pb)
|
||||
return newTextParser(s).readStruct(v.Elem(), "")
|
||||
}
|
191
vendor/github.com/juju/errors/LICENSE
generated
vendored
191
vendor/github.com/juju/errors/LICENSE
generated
vendored
|
@ -1,191 +0,0 @@
|
|||
All files in this repository are licensed as follows. If you contribute
|
||||
to this repository, it is assumed that you license your contribution
|
||||
under the same license unless you state otherwise.
|
||||
|
||||
All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
|
||||
|
||||
This software is licensed under the LGPLv3, included below.
|
||||
|
||||
As a special exception to the GNU Lesser General Public License version 3
|
||||
("LGPL3"), the copyright holders of this Library give you permission to
|
||||
convey to a third party a Combined Work that links statically or dynamically
|
||||
to this Library without providing any Minimal Corresponding Source or
|
||||
Minimal Application Code as set out in 4d or providing the installation
|
||||
information set out in section 4e, provided that you comply with the other
|
||||
provisions of LGPL3 and provided that you meet, for the Application the
|
||||
terms and conditions of the license(s) which apply to the Application.
|
||||
|
||||
Except as stated in this special exception, the provisions of LGPL3 will
|
||||
continue to comply in full to this Library. If you modify this Library, you
|
||||
may apply this exception to your version of this Library, but you are not
|
||||
obliged to do so. If you do not wish to do so, delete this exception
|
||||
statement from your version. This exception does not (and cannot) modify any
|
||||
license terms which apply to the Application, with which you must still
|
||||
comply.
|
||||
|
||||
|
||||
GNU LESSER GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
|
||||
This version of the GNU Lesser General Public License incorporates
|
||||
the terms and conditions of version 3 of the GNU General Public
|
||||
License, supplemented by the additional permissions listed below.
|
||||
|
||||
0. Additional Definitions.
|
||||
|
||||
As used herein, "this License" refers to version 3 of the GNU Lesser
|
||||
General Public License, and the "GNU GPL" refers to version 3 of the GNU
|
||||
General Public License.
|
||||
|
||||
"The Library" refers to a covered work governed by this License,
|
||||
other than an Application or a Combined Work as defined below.
|
||||
|
||||
An "Application" is any work that makes use of an interface provided
|
||||
by the Library, but which is not otherwise based on the Library.
|
||||
Defining a subclass of a class defined by the Library is deemed a mode
|
||||
of using an interface provided by the Library.
|
||||
|
||||
A "Combined Work" is a work produced by combining or linking an
|
||||
Application with the Library. The particular version of the Library
|
||||
with which the Combined Work was made is also called the "Linked
|
||||
Version".
|
||||
|
||||
The "Minimal Corresponding Source" for a Combined Work means the
|
||||
Corresponding Source for the Combined Work, excluding any source code
|
||||
for portions of the Combined Work that, considered in isolation, are
|
||||
based on the Application, and not on the Linked Version.
|
||||
|
||||
The "Corresponding Application Code" for a Combined Work means the
|
||||
object code and/or source code for the Application, including any data
|
||||
and utility programs needed for reproducing the Combined Work from the
|
||||
Application, but excluding the System Libraries of the Combined Work.
|
||||
|
||||
1. Exception to Section 3 of the GNU GPL.
|
||||
|
||||
You may convey a covered work under sections 3 and 4 of this License
|
||||
without being bound by section 3 of the GNU GPL.
|
||||
|
||||
2. Conveying Modified Versions.
|
||||
|
||||
If you modify a copy of the Library, and, in your modifications, a
|
||||
facility refers to a function or data to be supplied by an Application
|
||||
that uses the facility (other than as an argument passed when the
|
||||
facility is invoked), then you may convey a copy of the modified
|
||||
version:
|
||||
|
||||
a) under this License, provided that you make a good faith effort to
|
||||
ensure that, in the event an Application does not supply the
|
||||
function or data, the facility still operates, and performs
|
||||
whatever part of its purpose remains meaningful, or
|
||||
|
||||
b) under the GNU GPL, with none of the additional permissions of
|
||||
this License applicable to that copy.
|
||||
|
||||
3. Object Code Incorporating Material from Library Header Files.
|
||||
|
||||
The object code form of an Application may incorporate material from
|
||||
a header file that is part of the Library. You may convey such object
|
||||
code under terms of your choice, provided that, if the incorporated
|
||||
material is not limited to numerical parameters, data structure
|
||||
layouts and accessors, or small macros, inline functions and templates
|
||||
(ten or fewer lines in length), you do both of the following:
|
||||
|
||||
a) Give prominent notice with each copy of the object code that the
|
||||
Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the object code with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
4. Combined Works.
|
||||
|
||||
You may convey a Combined Work under terms of your choice that,
|
||||
taken together, effectively do not restrict modification of the
|
||||
portions of the Library contained in the Combined Work and reverse
|
||||
engineering for debugging such modifications, if you also do each of
|
||||
the following:
|
||||
|
||||
a) Give prominent notice with each copy of the Combined Work that
|
||||
the Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the Combined Work with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
c) For a Combined Work that displays copyright notices during
|
||||
execution, include the copyright notice for the Library among
|
||||
these notices, as well as a reference directing the user to the
|
||||
copies of the GNU GPL and this license document.
|
||||
|
||||
d) Do one of the following:
|
||||
|
||||
0) Convey the Minimal Corresponding Source under the terms of this
|
||||
License, and the Corresponding Application Code in a form
|
||||
suitable for, and under terms that permit, the user to
|
||||
recombine or relink the Application with a modified version of
|
||||
the Linked Version to produce a modified Combined Work, in the
|
||||
manner specified by section 6 of the GNU GPL for conveying
|
||||
Corresponding Source.
|
||||
|
||||
1) Use a suitable shared library mechanism for linking with the
|
||||
Library. A suitable mechanism is one that (a) uses at run time
|
||||
a copy of the Library already present on the user's computer
|
||||
system, and (b) will operate properly with a modified version
|
||||
of the Library that is interface-compatible with the Linked
|
||||
Version.
|
||||
|
||||
e) Provide Installation Information, but only if you would otherwise
|
||||
be required to provide such information under section 6 of the
|
||||
GNU GPL, and only to the extent that such information is
|
||||
necessary to install and execute a modified version of the
|
||||
Combined Work produced by recombining or relinking the
|
||||
Application with a modified version of the Linked Version. (If
|
||||
you use option 4d0, the Installation Information must accompany
|
||||
the Minimal Corresponding Source and Corresponding Application
|
||||
Code. If you use option 4d1, you must provide the Installation
|
||||
Information in the manner specified by section 6 of the GNU GPL
|
||||
for conveying Corresponding Source.)
|
||||
|
||||
5. Combined Libraries.
|
||||
|
||||
You may place library facilities that are a work based on the
|
||||
Library side by side in a single library together with other library
|
||||
facilities that are not Applications and are not covered by this
|
||||
License, and convey such a combined library under terms of your
|
||||
choice, if you do both of the following:
|
||||
|
||||
a) Accompany the combined library with a copy of the same work based
|
||||
on the Library, uncombined with any other library facilities,
|
||||
conveyed under the terms of this License.
|
||||
|
||||
b) Give prominent notice with the combined library that part of it
|
||||
is a work based on the Library, and explaining where to find the
|
||||
accompanying uncombined form of the same work.
|
||||
|
||||
6. Revised Versions of the GNU Lesser General Public License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions
|
||||
of the GNU Lesser General Public License from time to time. Such new
|
||||
versions will be similar in spirit to the present version, but may
|
||||
differ in detail to address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Library as you received it specifies that a certain numbered version
|
||||
of the GNU Lesser General Public License "or any later version"
|
||||
applies to it, you have the option of following the terms and
|
||||
conditions either of that published version or of any later version
|
||||
published by the Free Software Foundation. If the Library as you
|
||||
received it does not specify a version number of the GNU Lesser
|
||||
General Public License, you may choose any version of the GNU Lesser
|
||||
General Public License ever published by the Free Software Foundation.
|
||||
|
||||
If the Library as you received it specifies that a proxy can decide
|
||||
whether future versions of the GNU Lesser General Public License shall
|
||||
apply, that proxy's public statement of acceptance of any version is
|
||||
permanent authorization for you to choose that version for the
|
||||
Library.
|
81
vendor/github.com/juju/errors/doc.go
generated
vendored
81
vendor/github.com/juju/errors/doc.go
generated
vendored
|
@ -1,81 +0,0 @@
|
|||
// Copyright 2013, 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3, see LICENCE file for details.
|
||||
|
||||
/*
|
||||
[godoc-link-here]
|
||||
|
||||
The juju/errors provides an easy way to annotate errors without losing the
|
||||
orginal error context.
|
||||
|
||||
The exported `New` and `Errorf` functions are designed to replace the
|
||||
`errors.New` and `fmt.Errorf` functions respectively. The same underlying
|
||||
error is there, but the package also records the location at which the error
|
||||
was created.
|
||||
|
||||
A primary use case for this library is to add extra context any time an
|
||||
error is returned from a function.
|
||||
|
||||
if err := SomeFunc(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
This instead becomes:
|
||||
|
||||
if err := SomeFunc(); err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
which just records the file and line number of the Trace call, or
|
||||
|
||||
if err := SomeFunc(); err != nil {
|
||||
return errors.Annotate(err, "more context")
|
||||
}
|
||||
|
||||
which also adds an annotation to the error.
|
||||
|
||||
When you want to check to see if an error is of a particular type, a helper
|
||||
function is normally exported by the package that returned the error, like the
|
||||
`os` package does. The underlying cause of the error is available using the
|
||||
`Cause` function.
|
||||
|
||||
os.IsNotExist(errors.Cause(err))
|
||||
|
||||
The result of the `Error()` call on an annotated error is the annotations joined
|
||||
with colons, then the result of the `Error()` method for the underlying error
|
||||
that was the cause.
|
||||
|
||||
err := errors.Errorf("original")
|
||||
err = errors.Annotatef(err, "context")
|
||||
err = errors.Annotatef(err, "more context")
|
||||
err.Error() -> "more context: context: original"
|
||||
|
||||
Obviously recording the file, line and functions is not very useful if you
|
||||
cannot get them back out again.
|
||||
|
||||
errors.ErrorStack(err)
|
||||
|
||||
will return something like:
|
||||
|
||||
first error
|
||||
github.com/juju/errors/annotation_test.go:193:
|
||||
github.com/juju/errors/annotation_test.go:194: annotation
|
||||
github.com/juju/errors/annotation_test.go:195:
|
||||
github.com/juju/errors/annotation_test.go:196: more context
|
||||
github.com/juju/errors/annotation_test.go:197:
|
||||
|
||||
The first error was generated by an external system, so there was no location
|
||||
associated. The second, fourth, and last lines were generated with Trace calls,
|
||||
and the other two through Annotate.
|
||||
|
||||
Sometimes when responding to an error you want to return a more specific error
|
||||
for the situation.
|
||||
|
||||
if err := FindField(field); err != nil {
|
||||
return errors.Wrap(err, errors.NotFoundf(field))
|
||||
}
|
||||
|
||||
This returns an error where the complete error stack is still available, and
|
||||
`errors.Cause()` will return the `NotFound` error.
|
||||
|
||||
*/
|
||||
package errors
|
172
vendor/github.com/juju/errors/error.go
generated
vendored
172
vendor/github.com/juju/errors/error.go
generated
vendored
|
@ -1,172 +0,0 @@
|
|||
// Copyright 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3, see LICENCE file for details.
|
||||
|
||||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Err holds a description of an error along with information about
|
||||
// where the error was created.
|
||||
//
|
||||
// It may be embedded in custom error types to add extra information that
|
||||
// this errors package can understand.
|
||||
type Err struct {
|
||||
// message holds an annotation of the error.
|
||||
message string
|
||||
|
||||
// cause holds the cause of the error as returned
|
||||
// by the Cause method.
|
||||
cause error
|
||||
|
||||
// previous holds the previous error in the error stack, if any.
|
||||
previous error
|
||||
|
||||
// file and line hold the source code location where the error was
|
||||
// created.
|
||||
file string
|
||||
line int
|
||||
}
|
||||
|
||||
// NewErr is used to return an Err for the purpose of embedding in other
|
||||
// structures. The location is not specified, and needs to be set with a call
|
||||
// to SetLocation.
|
||||
//
|
||||
// For example:
|
||||
// type FooError struct {
|
||||
// errors.Err
|
||||
// code int
|
||||
// }
|
||||
//
|
||||
// func NewFooError(code int) error {
|
||||
// err := &FooError{errors.NewErr("foo"), code}
|
||||
// err.SetLocation(1)
|
||||
// return err
|
||||
// }
|
||||
func NewErr(format string, args ...interface{}) Err {
|
||||
return Err{
|
||||
message: fmt.Sprintf(format, args...),
|
||||
}
|
||||
}
|
||||
|
||||
// NewErrWithCause is used to return an Err with case by other error for the purpose of embedding in other
|
||||
// structures. The location is not specified, and needs to be set with a call
|
||||
// to SetLocation.
|
||||
//
|
||||
// For example:
|
||||
// type FooError struct {
|
||||
// errors.Err
|
||||
// code int
|
||||
// }
|
||||
//
|
||||
// func (e *FooError) Annotate(format string, args ...interface{}) error {
|
||||
// err := &FooError{errors.NewErrWithCause(e.Err, format, args...), e.code}
|
||||
// err.SetLocation(1)
|
||||
// return err
|
||||
// })
|
||||
func NewErrWithCause(other error, format string, args ...interface{}) Err {
|
||||
return Err{
|
||||
message: fmt.Sprintf(format, args...),
|
||||
cause: Cause(other),
|
||||
previous: other,
|
||||
}
|
||||
}
|
||||
|
||||
// Location is the file and line of where the error was most recently
|
||||
// created or annotated.
|
||||
func (e *Err) Location() (filename string, line int) {
|
||||
return e.file, e.line
|
||||
}
|
||||
|
||||
// Underlying returns the previous error in the error stack, if any. A client
|
||||
// should not ever really call this method. It is used to build the error
|
||||
// stack and should not be introspected by client calls. Or more
|
||||
// specifically, clients should not depend on anything but the `Cause` of an
|
||||
// error.
|
||||
func (e *Err) Underlying() error {
|
||||
return e.previous
|
||||
}
|
||||
|
||||
// The Cause of an error is the most recent error in the error stack that
|
||||
// meets one of these criteria: the original error that was raised; the new
|
||||
// error that was passed into the Wrap function; the most recently masked
|
||||
// error; or nil if the error itself is considered the Cause. Normally this
|
||||
// method is not invoked directly, but instead through the Cause stand alone
|
||||
// function.
|
||||
func (e *Err) Cause() error {
|
||||
return e.cause
|
||||
}
|
||||
|
||||
// Message returns the message stored with the most recent location. This is
|
||||
// the empty string if the most recent call was Trace, or the message stored
|
||||
// with Annotate or Mask.
|
||||
func (e *Err) Message() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
// Error implements error.Error.
|
||||
func (e *Err) Error() string {
|
||||
// We want to walk up the stack of errors showing the annotations
|
||||
// as long as the cause is the same.
|
||||
err := e.previous
|
||||
if !sameError(Cause(err), e.cause) && e.cause != nil {
|
||||
err = e.cause
|
||||
}
|
||||
switch {
|
||||
case err == nil:
|
||||
return e.message
|
||||
case e.message == "":
|
||||
return err.Error()
|
||||
}
|
||||
return fmt.Sprintf("%s: %v", e.message, err)
|
||||
}
|
||||
|
||||
// Format implements fmt.Formatter
|
||||
// When printing errors with %+v it also prints the stack trace.
|
||||
// %#v unsurprisingly will print the real underlying type.
|
||||
func (e *Err) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
switch {
|
||||
case s.Flag('+'):
|
||||
fmt.Fprintf(s, "%s", ErrorStack(e))
|
||||
return
|
||||
case s.Flag('#'):
|
||||
// avoid infinite recursion by wrapping e into a type
|
||||
// that doesn't implement Formatter.
|
||||
fmt.Fprintf(s, "%#v", (*unformatter)(e))
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case 's':
|
||||
fmt.Fprintf(s, "%s", e.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// helper for Format
|
||||
type unformatter Err
|
||||
|
||||
func (unformatter) Format() { /* break the fmt.Formatter interface */ }
|
||||
|
||||
// SetLocation records the source location of the error at callDepth stack
|
||||
// frames above the call.
|
||||
func (e *Err) SetLocation(callDepth int) {
|
||||
_, file, line, _ := runtime.Caller(callDepth + 1)
|
||||
e.file = trimGoPath(file)
|
||||
e.line = line
|
||||
}
|
||||
|
||||
// StackTrace returns one string for each location recorded in the stack of
|
||||
// errors. The first value is the originating error, with a line for each
|
||||
// other annotation or tracing of the error.
|
||||
func (e *Err) StackTrace() []string {
|
||||
return errorStack(e)
|
||||
}
|
||||
|
||||
// Ideally we'd have a way to check identity, but deep equals will do.
|
||||
func sameError(e1, e2 error) bool {
|
||||
return reflect.DeepEqual(e1, e2)
|
||||
}
|
333
vendor/github.com/juju/errors/errortypes.go
generated
vendored
333
vendor/github.com/juju/errors/errortypes.go
generated
vendored
|
@ -1,333 +0,0 @@
|
|||
// Copyright 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3, see LICENCE file for details.
|
||||
|
||||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// wrap is a helper to construct an *wrapper.
|
||||
func wrap(err error, format, suffix string, args ...interface{}) Err {
|
||||
newErr := Err{
|
||||
message: fmt.Sprintf(format+suffix, args...),
|
||||
previous: err,
|
||||
}
|
||||
newErr.SetLocation(2)
|
||||
return newErr
|
||||
}
|
||||
|
||||
// timeout represents an error on timeout.
|
||||
type timeout struct {
|
||||
Err
|
||||
}
|
||||
|
||||
// Timeoutf returns an error which satisfies IsTimeout().
|
||||
func Timeoutf(format string, args ...interface{}) error {
|
||||
return &timeout{wrap(nil, format, " timeout", args...)}
|
||||
}
|
||||
|
||||
// NewTimeout returns an error which wraps err that satisfies
|
||||
// IsTimeout().
|
||||
func NewTimeout(err error, msg string) error {
|
||||
return &timeout{wrap(err, msg, "")}
|
||||
}
|
||||
|
||||
// IsTimeout reports whether err was created with Timeoutf() or
|
||||
// NewTimeout().
|
||||
func IsTimeout(err error) bool {
|
||||
err = Cause(err)
|
||||
_, ok := err.(*timeout)
|
||||
return ok
|
||||
}
|
||||
|
||||
// notFound represents an error when something has not been found.
|
||||
type notFound struct {
|
||||
Err
|
||||
}
|
||||
|
||||
// NotFoundf returns an error which satisfies IsNotFound().
|
||||
func NotFoundf(format string, args ...interface{}) error {
|
||||
return ¬Found{wrap(nil, format, " not found", args...)}
|
||||
}
|
||||
|
||||
// NewNotFound returns an error which wraps err that satisfies
|
||||
// IsNotFound().
|
||||
func NewNotFound(err error, msg string) error {
|
||||
return ¬Found{wrap(err, msg, "")}
|
||||
}
|
||||
|
||||
// IsNotFound reports whether err was created with NotFoundf() or
|
||||
// NewNotFound().
|
||||
func IsNotFound(err error) bool {
|
||||
err = Cause(err)
|
||||
_, ok := err.(*notFound)
|
||||
return ok
|
||||
}
|
||||
|
||||
// userNotFound represents an error when an inexistent user is looked up.
|
||||
type userNotFound struct {
|
||||
Err
|
||||
}
|
||||
|
||||
// UserNotFoundf returns an error which satisfies IsUserNotFound().
|
||||
func UserNotFoundf(format string, args ...interface{}) error {
|
||||
return &userNotFound{wrap(nil, format, " user not found", args...)}
|
||||
}
|
||||
|
||||
// NewUserNotFound returns an error which wraps err and satisfies
|
||||
// IsUserNotFound().
|
||||
func NewUserNotFound(err error, msg string) error {
|
||||
return &userNotFound{wrap(err, msg, "")}
|
||||
}
|
||||
|
||||
// IsUserNotFound reports whether err was created with UserNotFoundf() or
|
||||
// NewUserNotFound().
|
||||
func IsUserNotFound(err error) bool {
|
||||
err = Cause(err)
|
||||
_, ok := err.(*userNotFound)
|
||||
return ok
|
||||
}
|
||||
|
||||
// unauthorized represents an error when an operation is unauthorized.
|
||||
type unauthorized struct {
|
||||
Err
|
||||
}
|
||||
|
||||
// Unauthorizedf returns an error which satisfies IsUnauthorized().
|
||||
func Unauthorizedf(format string, args ...interface{}) error {
|
||||
return &unauthorized{wrap(nil, format, "", args...)}
|
||||
}
|
||||
|
||||
// NewUnauthorized returns an error which wraps err and satisfies
|
||||
// IsUnauthorized().
|
||||
func NewUnauthorized(err error, msg string) error {
|
||||
return &unauthorized{wrap(err, msg, "")}
|
||||
}
|
||||
|
||||
// IsUnauthorized reports whether err was created with Unauthorizedf() or
|
||||
// NewUnauthorized().
|
||||
func IsUnauthorized(err error) bool {
|
||||
err = Cause(err)
|
||||
_, ok := err.(*unauthorized)
|
||||
return ok
|
||||
}
|
||||
|
||||
// notImplemented represents an error when something is not
|
||||
// implemented.
|
||||
type notImplemented struct {
|
||||
Err
|
||||
}
|
||||
|
||||
// NotImplementedf returns an error which satisfies IsNotImplemented().
|
||||
func NotImplementedf(format string, args ...interface{}) error {
|
||||
return ¬Implemented{wrap(nil, format, " not implemented", args...)}
|
||||
}
|
||||
|
||||
// NewNotImplemented returns an error which wraps err and satisfies
|
||||
// IsNotImplemented().
|
||||
func NewNotImplemented(err error, msg string) error {
|
||||
return ¬Implemented{wrap(err, msg, "")}
|
||||
}
|
||||
|
||||
// IsNotImplemented reports whether err was created with
|
||||
// NotImplementedf() or NewNotImplemented().
|
||||
func IsNotImplemented(err error) bool {
|
||||
err = Cause(err)
|
||||
_, ok := err.(*notImplemented)
|
||||
return ok
|
||||
}
|
||||
|
||||
// alreadyExists represents and error when something already exists.
|
||||
type alreadyExists struct {
|
||||
Err
|
||||
}
|
||||
|
||||
// AlreadyExistsf returns an error which satisfies IsAlreadyExists().
|
||||
func AlreadyExistsf(format string, args ...interface{}) error {
|
||||
return &alreadyExists{wrap(nil, format, " already exists", args...)}
|
||||
}
|
||||
|
||||
// NewAlreadyExists returns an error which wraps err and satisfies
|
||||
// IsAlreadyExists().
|
||||
func NewAlreadyExists(err error, msg string) error {
|
||||
return &alreadyExists{wrap(err, msg, "")}
|
||||
}
|
||||
|
||||
// IsAlreadyExists reports whether the error was created with
|
||||
// AlreadyExistsf() or NewAlreadyExists().
|
||||
func IsAlreadyExists(err error) bool {
|
||||
err = Cause(err)
|
||||
_, ok := err.(*alreadyExists)
|
||||
return ok
|
||||
}
|
||||
|
||||
// notSupported represents an error when something is not supported.
|
||||
type notSupported struct {
|
||||
Err
|
||||
}
|
||||
|
||||
// NotSupportedf returns an error which satisfies IsNotSupported().
|
||||
func NotSupportedf(format string, args ...interface{}) error {
|
||||
return ¬Supported{wrap(nil, format, " not supported", args...)}
|
||||
}
|
||||
|
||||
// NewNotSupported returns an error which wraps err and satisfies
|
||||
// IsNotSupported().
|
||||
func NewNotSupported(err error, msg string) error {
|
||||
return ¬Supported{wrap(err, msg, "")}
|
||||
}
|
||||
|
||||
// IsNotSupported reports whether the error was created with
|
||||
// NotSupportedf() or NewNotSupported().
|
||||
func IsNotSupported(err error) bool {
|
||||
err = Cause(err)
|
||||
_, ok := err.(*notSupported)
|
||||
return ok
|
||||
}
|
||||
|
||||
// notValid represents an error when something is not valid.
|
||||
type notValid struct {
|
||||
Err
|
||||
}
|
||||
|
||||
// NotValidf returns an error which satisfies IsNotValid().
|
||||
func NotValidf(format string, args ...interface{}) error {
|
||||
return ¬Valid{wrap(nil, format, " not valid", args...)}
|
||||
}
|
||||
|
||||
// NewNotValid returns an error which wraps err and satisfies IsNotValid().
|
||||
func NewNotValid(err error, msg string) error {
|
||||
return ¬Valid{wrap(err, msg, "")}
|
||||
}
|
||||
|
||||
// IsNotValid reports whether the error was created with NotValidf() or
|
||||
// NewNotValid().
|
||||
func IsNotValid(err error) bool {
|
||||
err = Cause(err)
|
||||
_, ok := err.(*notValid)
|
||||
return ok
|
||||
}
|
||||
|
||||
// notProvisioned represents an error when something is not yet provisioned.
|
||||
type notProvisioned struct {
|
||||
Err
|
||||
}
|
||||
|
||||
// NotProvisionedf returns an error which satisfies IsNotProvisioned().
|
||||
func NotProvisionedf(format string, args ...interface{}) error {
|
||||
return ¬Provisioned{wrap(nil, format, " not provisioned", args...)}
|
||||
}
|
||||
|
||||
// NewNotProvisioned returns an error which wraps err that satisfies
|
||||
// IsNotProvisioned().
|
||||
func NewNotProvisioned(err error, msg string) error {
|
||||
return ¬Provisioned{wrap(err, msg, "")}
|
||||
}
|
||||
|
||||
// IsNotProvisioned reports whether err was created with NotProvisionedf() or
|
||||
// NewNotProvisioned().
|
||||
func IsNotProvisioned(err error) bool {
|
||||
err = Cause(err)
|
||||
_, ok := err.(*notProvisioned)
|
||||
return ok
|
||||
}
|
||||
|
||||
// notAssigned represents an error when something is not yet assigned to
|
||||
// something else.
|
||||
type notAssigned struct {
|
||||
Err
|
||||
}
|
||||
|
||||
// NotAssignedf returns an error which satisfies IsNotAssigned().
|
||||
func NotAssignedf(format string, args ...interface{}) error {
|
||||
return ¬Assigned{wrap(nil, format, " not assigned", args...)}
|
||||
}
|
||||
|
||||
// NewNotAssigned returns an error which wraps err that satisfies
|
||||
// IsNotAssigned().
|
||||
func NewNotAssigned(err error, msg string) error {
|
||||
return ¬Assigned{wrap(err, msg, "")}
|
||||
}
|
||||
|
||||
// IsNotAssigned reports whether err was created with NotAssignedf() or
|
||||
// NewNotAssigned().
|
||||
func IsNotAssigned(err error) bool {
|
||||
err = Cause(err)
|
||||
_, ok := err.(*notAssigned)
|
||||
return ok
|
||||
}
|
||||
|
||||
// badRequest represents an error when a request has bad parameters.
|
||||
type badRequest struct {
|
||||
Err
|
||||
}
|
||||
|
||||
// BadRequestf returns an error which satisfies IsBadRequest().
|
||||
func BadRequestf(format string, args ...interface{}) error {
|
||||
return &badRequest{wrap(nil, format, "", args...)}
|
||||
}
|
||||
|
||||
// NewBadRequest returns an error which wraps err that satisfies
|
||||
// IsBadRequest().
|
||||
func NewBadRequest(err error, msg string) error {
|
||||
return &badRequest{wrap(err, msg, "")}
|
||||
}
|
||||
|
||||
// IsBadRequest reports whether err was created with BadRequestf() or
|
||||
// NewBadRequest().
|
||||
func IsBadRequest(err error) bool {
|
||||
err = Cause(err)
|
||||
_, ok := err.(*badRequest)
|
||||
return ok
|
||||
}
|
||||
|
||||
// methodNotAllowed represents an error when an HTTP request
|
||||
// is made with an inappropriate method.
|
||||
type methodNotAllowed struct {
|
||||
Err
|
||||
}
|
||||
|
||||
// MethodNotAllowedf returns an error which satisfies IsMethodNotAllowed().
|
||||
func MethodNotAllowedf(format string, args ...interface{}) error {
|
||||
return &methodNotAllowed{wrap(nil, format, "", args...)}
|
||||
}
|
||||
|
||||
// NewMethodNotAllowed returns an error which wraps err that satisfies
|
||||
// IsMethodNotAllowed().
|
||||
func NewMethodNotAllowed(err error, msg string) error {
|
||||
return &methodNotAllowed{wrap(err, msg, "")}
|
||||
}
|
||||
|
||||
// IsMethodNotAllowed reports whether err was created with MethodNotAllowedf() or
|
||||
// NewMethodNotAllowed().
|
||||
func IsMethodNotAllowed(err error) bool {
|
||||
err = Cause(err)
|
||||
_, ok := err.(*methodNotAllowed)
|
||||
return ok
|
||||
}
|
||||
|
||||
// forbidden represents an error when a request cannot be completed because of
|
||||
// missing privileges
|
||||
type forbidden struct {
|
||||
Err
|
||||
}
|
||||
|
||||
// Forbiddenf returns an error which satistifes IsForbidden()
|
||||
func Forbiddenf(format string, args ...interface{}) error {
|
||||
return &forbidden{wrap(nil, format, "", args...)}
|
||||
}
|
||||
|
||||
// NewForbidden returns an error which wraps err that satisfies
|
||||
// IsForbidden().
|
||||
func NewForbidden(err error, msg string) error {
|
||||
return &forbidden{wrap(err, msg, "")}
|
||||
}
|
||||
|
||||
// IsForbidden reports whether err was created with Forbiddenf() or
|
||||
// NewForbidden().
|
||||
func IsForbidden(err error) bool {
|
||||
err = Cause(err)
|
||||
_, ok := err.(*forbidden)
|
||||
return ok
|
||||
}
|
330
vendor/github.com/juju/errors/functions.go
generated
vendored
330
vendor/github.com/juju/errors/functions.go
generated
vendored
|
@ -1,330 +0,0 @@
|
|||
// Copyright 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3, see LICENCE file for details.
|
||||
|
||||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// New is a drop in replacement for the standard library errors module that records
|
||||
// the location that the error is created.
|
||||
//
|
||||
// For example:
|
||||
// return errors.New("validation failed")
|
||||
//
|
||||
func New(message string) error {
|
||||
err := &Err{message: message}
|
||||
err.SetLocation(1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Errorf creates a new annotated error and records the location that the
|
||||
// error is created. This should be a drop in replacement for fmt.Errorf.
|
||||
//
|
||||
// For example:
|
||||
// return errors.Errorf("validation failed: %s", message)
|
||||
//
|
||||
func Errorf(format string, args ...interface{}) error {
|
||||
err := &Err{message: fmt.Sprintf(format, args...)}
|
||||
err.SetLocation(1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Trace adds the location of the Trace call to the stack. The Cause of the
|
||||
// resulting error is the same as the error parameter. If the other error is
|
||||
// nil, the result will be nil.
|
||||
//
|
||||
// For example:
|
||||
// if err := SomeFunc(); err != nil {
|
||||
// return errors.Trace(err)
|
||||
// }
|
||||
//
|
||||
func Trace(other error) error {
|
||||
if other == nil {
|
||||
return nil
|
||||
}
|
||||
err := &Err{previous: other, cause: Cause(other)}
|
||||
err.SetLocation(1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Annotate is used to add extra context to an existing error. The location of
|
||||
// the Annotate call is recorded with the annotations. The file, line and
|
||||
// function are also recorded.
|
||||
//
|
||||
// For example:
|
||||
// if err := SomeFunc(); err != nil {
|
||||
// return errors.Annotate(err, "failed to frombulate")
|
||||
// }
|
||||
//
|
||||
func Annotate(other error, message string) error {
|
||||
if other == nil {
|
||||
return nil
|
||||
}
|
||||
err := &Err{
|
||||
previous: other,
|
||||
cause: Cause(other),
|
||||
message: message,
|
||||
}
|
||||
err.SetLocation(1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Annotatef is used to add extra context to an existing error. The location of
|
||||
// the Annotate call is recorded with the annotations. The file, line and
|
||||
// function are also recorded.
|
||||
//
|
||||
// For example:
|
||||
// if err := SomeFunc(); err != nil {
|
||||
// return errors.Annotatef(err, "failed to frombulate the %s", arg)
|
||||
// }
|
||||
//
|
||||
func Annotatef(other error, format string, args ...interface{}) error {
|
||||
if other == nil {
|
||||
return nil
|
||||
}
|
||||
err := &Err{
|
||||
previous: other,
|
||||
cause: Cause(other),
|
||||
message: fmt.Sprintf(format, args...),
|
||||
}
|
||||
err.SetLocation(1)
|
||||
return err
|
||||
}
|
||||
|
||||
// DeferredAnnotatef annotates the given error (when it is not nil) with the given
|
||||
// format string and arguments (like fmt.Sprintf). If *err is nil, DeferredAnnotatef
|
||||
// does nothing. This method is used in a defer statement in order to annotate any
|
||||
// resulting error with the same message.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// defer DeferredAnnotatef(&err, "failed to frombulate the %s", arg)
|
||||
//
|
||||
func DeferredAnnotatef(err *error, format string, args ...interface{}) {
|
||||
if *err == nil {
|
||||
return
|
||||
}
|
||||
newErr := &Err{
|
||||
message: fmt.Sprintf(format, args...),
|
||||
cause: Cause(*err),
|
||||
previous: *err,
|
||||
}
|
||||
newErr.SetLocation(1)
|
||||
*err = newErr
|
||||
}
|
||||
|
||||
// Wrap changes the Cause of the error. The location of the Wrap call is also
|
||||
// stored in the error stack.
|
||||
//
|
||||
// For example:
|
||||
// if err := SomeFunc(); err != nil {
|
||||
// newErr := &packageError{"more context", private_value}
|
||||
// return errors.Wrap(err, newErr)
|
||||
// }
|
||||
//
|
||||
func Wrap(other, newDescriptive error) error {
|
||||
err := &Err{
|
||||
previous: other,
|
||||
cause: newDescriptive,
|
||||
}
|
||||
err.SetLocation(1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Wrapf changes the Cause of the error, and adds an annotation. The location
|
||||
// of the Wrap call is also stored in the error stack.
|
||||
//
|
||||
// For example:
|
||||
// if err := SomeFunc(); err != nil {
|
||||
// return errors.Wrapf(err, simpleErrorType, "invalid value %q", value)
|
||||
// }
|
||||
//
|
||||
func Wrapf(other, newDescriptive error, format string, args ...interface{}) error {
|
||||
err := &Err{
|
||||
message: fmt.Sprintf(format, args...),
|
||||
previous: other,
|
||||
cause: newDescriptive,
|
||||
}
|
||||
err.SetLocation(1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Mask masks the given error with the given format string and arguments (like
|
||||
// fmt.Sprintf), returning a new error that maintains the error stack, but
|
||||
// hides the underlying error type. The error string still contains the full
|
||||
// annotations. If you want to hide the annotations, call Wrap.
|
||||
func Maskf(other error, format string, args ...interface{}) error {
|
||||
if other == nil {
|
||||
return nil
|
||||
}
|
||||
err := &Err{
|
||||
message: fmt.Sprintf(format, args...),
|
||||
previous: other,
|
||||
}
|
||||
err.SetLocation(1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Mask hides the underlying error type, and records the location of the masking.
|
||||
func Mask(other error) error {
|
||||
if other == nil {
|
||||
return nil
|
||||
}
|
||||
err := &Err{
|
||||
previous: other,
|
||||
}
|
||||
err.SetLocation(1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Cause returns the cause of the given error. This will be either the
|
||||
// original error, or the result of a Wrap or Mask call.
|
||||
//
|
||||
// Cause is the usual way to diagnose errors that may have been wrapped by
|
||||
// the other errors functions.
|
||||
func Cause(err error) error {
|
||||
var diag error
|
||||
if err, ok := err.(causer); ok {
|
||||
diag = err.Cause()
|
||||
}
|
||||
if diag != nil {
|
||||
return diag
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type causer interface {
|
||||
Cause() error
|
||||
}
|
||||
|
||||
type wrapper interface {
|
||||
// Message returns the top level error message,
|
||||
// not including the message from the Previous
|
||||
// error.
|
||||
Message() string
|
||||
|
||||
// Underlying returns the Previous error, or nil
|
||||
// if there is none.
|
||||
Underlying() error
|
||||
}
|
||||
|
||||
type locationer interface {
|
||||
Location() (string, int)
|
||||
}
|
||||
|
||||
var (
|
||||
_ wrapper = (*Err)(nil)
|
||||
_ locationer = (*Err)(nil)
|
||||
_ causer = (*Err)(nil)
|
||||
)
|
||||
|
||||
// Details returns information about the stack of errors wrapped by err, in
|
||||
// the format:
|
||||
//
|
||||
// [{filename:99: error one} {otherfile:55: cause of error one}]
|
||||
//
|
||||
// This is a terse alternative to ErrorStack as it returns a single line.
|
||||
func Details(err error) string {
|
||||
if err == nil {
|
||||
return "[]"
|
||||
}
|
||||
var s []byte
|
||||
s = append(s, '[')
|
||||
for {
|
||||
s = append(s, '{')
|
||||
if err, ok := err.(locationer); ok {
|
||||
file, line := err.Location()
|
||||
if file != "" {
|
||||
s = append(s, fmt.Sprintf("%s:%d", file, line)...)
|
||||
s = append(s, ": "...)
|
||||
}
|
||||
}
|
||||
if cerr, ok := err.(wrapper); ok {
|
||||
s = append(s, cerr.Message()...)
|
||||
err = cerr.Underlying()
|
||||
} else {
|
||||
s = append(s, err.Error()...)
|
||||
err = nil
|
||||
}
|
||||
s = append(s, '}')
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
s = append(s, ' ')
|
||||
}
|
||||
s = append(s, ']')
|
||||
return string(s)
|
||||
}
|
||||
|
||||
// ErrorStack returns a string representation of the annotated error. If the
|
||||
// error passed as the parameter is not an annotated error, the result is
|
||||
// simply the result of the Error() method on that error.
|
||||
//
|
||||
// If the error is an annotated error, a multi-line string is returned where
|
||||
// each line represents one entry in the annotation stack. The full filename
|
||||
// from the call stack is used in the output.
|
||||
//
|
||||
// first error
|
||||
// github.com/juju/errors/annotation_test.go:193:
|
||||
// github.com/juju/errors/annotation_test.go:194: annotation
|
||||
// github.com/juju/errors/annotation_test.go:195:
|
||||
// github.com/juju/errors/annotation_test.go:196: more context
|
||||
// github.com/juju/errors/annotation_test.go:197:
|
||||
func ErrorStack(err error) string {
|
||||
return strings.Join(errorStack(err), "\n")
|
||||
}
|
||||
|
||||
func errorStack(err error) []string {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We want the first error first
|
||||
var lines []string
|
||||
for {
|
||||
var buff []byte
|
||||
if err, ok := err.(locationer); ok {
|
||||
file, line := err.Location()
|
||||
// Strip off the leading GOPATH/src path elements.
|
||||
file = trimGoPath(file)
|
||||
if file != "" {
|
||||
buff = append(buff, fmt.Sprintf("%s:%d", file, line)...)
|
||||
buff = append(buff, ": "...)
|
||||
}
|
||||
}
|
||||
if cerr, ok := err.(wrapper); ok {
|
||||
message := cerr.Message()
|
||||
buff = append(buff, message...)
|
||||
// If there is a cause for this error, and it is different to the cause
|
||||
// of the underlying error, then output the error string in the stack trace.
|
||||
var cause error
|
||||
if err1, ok := err.(causer); ok {
|
||||
cause = err1.Cause()
|
||||
}
|
||||
err = cerr.Underlying()
|
||||
if cause != nil && !sameError(Cause(err), cause) {
|
||||
if message != "" {
|
||||
buff = append(buff, ": "...)
|
||||
}
|
||||
buff = append(buff, cause.Error()...)
|
||||
}
|
||||
} else {
|
||||
buff = append(buff, err.Error()...)
|
||||
err = nil
|
||||
}
|
||||
lines = append(lines, string(buff))
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
// reverse the lines to get the original error, which was at the end of
|
||||
// the list, back to the start.
|
||||
var result []string
|
||||
for i := len(lines); i > 0; i-- {
|
||||
result = append(result, lines[i-1])
|
||||
}
|
||||
return result
|
||||
}
|
19
vendor/github.com/juju/errors/path.go
generated
vendored
19
vendor/github.com/juju/errors/path.go
generated
vendored
|
@ -1,19 +0,0 @@
|
|||
// Copyright 2013, 2014 Canonical Ltd.
|
||||
// Licensed under the LGPLv3, see LICENCE file for details.
|
||||
|
||||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/build"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var goPath = build.Default.GOPATH
|
||||
var srcDir = filepath.Join(goPath, "src")
|
||||
|
||||
func trimGoPath(filename string) string {
|
||||
return strings.TrimPrefix(filename, fmt.Sprintf("%s%s", srcDir, string(os.PathSeparator)))
|
||||
}
|
9
vendor/github.com/konsorten/go-windows-terminal-sequences/license
generated
vendored
9
vendor/github.com/konsorten/go-windows-terminal-sequences/license
generated
vendored
|
@ -1,9 +0,0 @@
|
|||
(The MIT License)
|
||||
|
||||
Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
36
vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
generated
vendored
36
vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
generated
vendored
|
@ -1,36 +0,0 @@
|
|||
// +build windows
|
||||
|
||||
package sequences
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
kernel32Dll *syscall.LazyDLL = syscall.NewLazyDLL("Kernel32.dll")
|
||||
setConsoleMode *syscall.LazyProc = kernel32Dll.NewProc("SetConsoleMode")
|
||||
)
|
||||
|
||||
func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error {
|
||||
const ENABLE_VIRTUAL_TERMINAL_PROCESSING uint32 = 0x4
|
||||
|
||||
var mode uint32
|
||||
err := syscall.GetConsoleMode(syscall.Stdout, &mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if enable {
|
||||
mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING
|
||||
} else {
|
||||
mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING
|
||||
}
|
||||
|
||||
ret, _, err := setConsoleMode.Call(uintptr(unsafe.Pointer(stream)), uintptr(mode))
|
||||
if ret == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
201
vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
generated
vendored
201
vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
generated
vendored
|
@ -1,201 +0,0 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
1
vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
generated
vendored
1
vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
generated
vendored
|
@ -1 +0,0 @@
|
|||
Copyright 2012 Matt T. Proud (matt.proud@gmail.com)
|
75
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
generated
vendored
75
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
generated
vendored
|
@ -1,75 +0,0 @@
|
|||
// Copyright 2013 Matt T. Proud
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pbutil
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
var errInvalidVarint = errors.New("invalid varint32 encountered")
|
||||
|
||||
// ReadDelimited decodes a message from the provided length-delimited stream,
|
||||
// where the length is encoded as 32-bit varint prefix to the message body.
|
||||
// It returns the total number of bytes read and any applicable error. This is
|
||||
// roughly equivalent to the companion Java API's
|
||||
// MessageLite#parseDelimitedFrom. As per the reader contract, this function
|
||||
// calls r.Read repeatedly as required until exactly one message including its
|
||||
// prefix is read and decoded (or an error has occurred). The function never
|
||||
// reads more bytes from the stream than required. The function never returns
|
||||
// an error if a message has been read and decoded correctly, even if the end
|
||||
// of the stream has been reached in doing so. In that case, any subsequent
|
||||
// calls return (0, io.EOF).
|
||||
func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
|
||||
// Per AbstractParser#parsePartialDelimitedFrom with
|
||||
// CodedInputStream#readRawVarint32.
|
||||
var headerBuf [binary.MaxVarintLen32]byte
|
||||
var bytesRead, varIntBytes int
|
||||
var messageLength uint64
|
||||
for varIntBytes == 0 { // i.e. no varint has been decoded yet.
|
||||
if bytesRead >= len(headerBuf) {
|
||||
return bytesRead, errInvalidVarint
|
||||
}
|
||||
// We have to read byte by byte here to avoid reading more bytes
|
||||
// than required. Each read byte is appended to what we have
|
||||
// read before.
|
||||
newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
|
||||
if newBytesRead == 0 {
|
||||
if err != nil {
|
||||
return bytesRead, err
|
||||
}
|
||||
// A Reader should not return (0, nil), but if it does,
|
||||
// it should be treated as no-op (according to the
|
||||
// Reader contract). So let's go on...
|
||||
continue
|
||||
}
|
||||
bytesRead += newBytesRead
|
||||
// Now present everything read so far to the varint decoder and
|
||||
// see if a varint can be decoded already.
|
||||
messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
|
||||
}
|
||||
|
||||
messageBuf := make([]byte, messageLength)
|
||||
newBytesRead, err := io.ReadFull(r, messageBuf)
|
||||
bytesRead += newBytesRead
|
||||
if err != nil {
|
||||
return bytesRead, err
|
||||
}
|
||||
|
||||
return bytesRead, proto.Unmarshal(messageBuf, m)
|
||||
}
|
16
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
generated
vendored
16
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
generated
vendored
|
@ -1,16 +0,0 @@
|
|||
// Copyright 2013 Matt T. Proud
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package pbutil provides record length-delimited Protocol Buffer streaming.
|
||||
package pbutil
|
46
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
generated
vendored
46
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
generated
vendored
|
@ -1,46 +0,0 @@
|
|||
// Copyright 2013 Matt T. Proud
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pbutil
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
// WriteDelimited encodes and dumps a message to the provided writer prefixed
|
||||
// with a 32-bit varint indicating the length of the encoded message, producing
|
||||
// a length-delimited record stream, which can be used to chain together
|
||||
// encoded messages of the same type together in a file. It returns the total
|
||||
// number of bytes written and any applicable error. This is roughly
|
||||
// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
|
||||
func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
|
||||
buffer, err := proto.Marshal(m)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var buf [binary.MaxVarintLen32]byte
|
||||
encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
|
||||
|
||||
sync, err := w.Write(buf[:encodedLength])
|
||||
if err != nil {
|
||||
return sync, err
|
||||
}
|
||||
|
||||
n, err = w.Write(buffer)
|
||||
return n + sync, err
|
||||
}
|
18
vendor/github.com/prometheus/client_golang/AUTHORS.md
generated
vendored
18
vendor/github.com/prometheus/client_golang/AUTHORS.md
generated
vendored
|
@ -1,18 +0,0 @@
|
|||
The Prometheus project was started by Matt T. Proud (emeritus) and
|
||||
Julius Volz in 2012.
|
||||
|
||||
Maintainers of this repository:
|
||||
|
||||
* Björn Rabenstein <beorn@soundcloud.com>
|
||||
|
||||
The following individuals have contributed code to this repository
|
||||
(listed in alphabetical order):
|
||||
|
||||
* Bernerd Schaefer <bj.schaefer@gmail.com>
|
||||
* Björn Rabenstein <beorn@soundcloud.com>
|
||||
* Daniel Bornkessel <daniel@soundcloud.com>
|
||||
* Jeff Younker <jeff@drinktomi.com>
|
||||
* Julius Volz <julius.volz@gmail.com>
|
||||
* Matt T. Proud <matt.proud@gmail.com>
|
||||
* Tobias Schmidt <ts@soundcloud.com>
|
||||
|
201
vendor/github.com/prometheus/client_golang/LICENSE
generated
vendored
201
vendor/github.com/prometheus/client_golang/LICENSE
generated
vendored
|
@ -1,201 +0,0 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
23
vendor/github.com/prometheus/client_golang/NOTICE
generated
vendored
23
vendor/github.com/prometheus/client_golang/NOTICE
generated
vendored
|
@ -1,23 +0,0 @@
|
|||
Prometheus instrumentation library for Go applications
|
||||
Copyright 2012-2015 The Prometheus Authors
|
||||
|
||||
This product includes software developed at
|
||||
SoundCloud Ltd. (http://soundcloud.com/).
|
||||
|
||||
|
||||
The following components are included in this product:
|
||||
|
||||
perks - a fork of https://github.com/bmizerany/perks
|
||||
https://github.com/beorn7/perks
|
||||
Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
|
||||
See https://github.com/beorn7/perks/blob/master/README.md for license details.
|
||||
|
||||
Go support for Protocol Buffers - Google's data interchange format
|
||||
http://github.com/golang/protobuf/
|
||||
Copyright 2010 The Go Authors
|
||||
See source code for license details.
|
||||
|
||||
Support for streaming Protocol Buffer messages for the Go language (golang).
|
||||
https://github.com/matttproud/golang_protobuf_extensions
|
||||
Copyright 2013 Matt T. Proud
|
||||
Licensed under the Apache License, Version 2.0
|
75
vendor/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
75
vendor/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
|
@ -1,75 +0,0 @@
|
|||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
// Collector is the interface implemented by anything that can be used by
|
||||
// Prometheus to collect metrics. A Collector has to be registered for
|
||||
// collection. See Registerer.Register.
|
||||
//
|
||||
// The stock metrics provided by this package (Gauge, Counter, Summary,
|
||||
// Histogram, Untyped) are also Collectors (which only ever collect one metric,
|
||||
// namely itself). An implementer of Collector may, however, collect multiple
|
||||
// metrics in a coordinated fashion and/or create metrics on the fly. Examples
|
||||
// for collectors already implemented in this library are the metric vectors
|
||||
// (i.e. collection of multiple instances of the same Metric but with different
|
||||
// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
|
||||
type Collector interface {
|
||||
// Describe sends the super-set of all possible descriptors of metrics
|
||||
// collected by this Collector to the provided channel and returns once
|
||||
// the last descriptor has been sent. The sent descriptors fulfill the
|
||||
// consistency and uniqueness requirements described in the Desc
|
||||
// documentation. (It is valid if one and the same Collector sends
|
||||
// duplicate descriptors. Those duplicates are simply ignored. However,
|
||||
// two different Collectors must not send duplicate descriptors.) This
|
||||
// method idempotently sends the same descriptors throughout the
|
||||
// lifetime of the Collector. If a Collector encounters an error while
|
||||
// executing this method, it must send an invalid descriptor (created
|
||||
// with NewInvalidDesc) to signal the error to the registry.
|
||||
Describe(chan<- *Desc)
|
||||
// Collect is called by the Prometheus registry when collecting
|
||||
// metrics. The implementation sends each collected metric via the
|
||||
// provided channel and returns once the last metric has been sent. The
|
||||
// descriptor of each sent metric is one of those returned by
|
||||
// Describe. Returned metrics that share the same descriptor must differ
|
||||
// in their variable label values. This method may be called
|
||||
// concurrently and must therefore be implemented in a concurrency safe
|
||||
// way. Blocking occurs at the expense of total performance of rendering
|
||||
// all registered metrics. Ideally, Collector implementations support
|
||||
// concurrent readers.
|
||||
Collect(chan<- Metric)
|
||||
}
|
||||
|
||||
// selfCollector implements Collector for a single Metric so that the Metric
|
||||
// collects itself. Add it as an anonymous field to a struct that implements
|
||||
// Metric, and call init with the Metric itself as an argument.
|
||||
type selfCollector struct {
|
||||
self Metric
|
||||
}
|
||||
|
||||
// init provides the selfCollector with a reference to the metric it is supposed
|
||||
// to collect. It is usually called within the factory function to create a
|
||||
// metric. See example.
|
||||
func (c *selfCollector) init(self Metric) {
|
||||
c.self = self
|
||||
}
|
||||
|
||||
// Describe implements Collector.
|
||||
func (c *selfCollector) Describe(ch chan<- *Desc) {
|
||||
ch <- c.self.Desc()
|
||||
}
|
||||
|
||||
// Collect implements Collector.
|
||||
func (c *selfCollector) Collect(ch chan<- Metric) {
|
||||
ch <- c.self
|
||||
}
|
172
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
172
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
|
@ -1,172 +0,0 @@
|
|||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Counter is a Metric that represents a single numerical value that only ever
|
||||
// goes up. That implies that it cannot be used to count items whose number can
|
||||
// also go down, e.g. the number of currently running goroutines. Those
|
||||
// "counters" are represented by Gauges.
|
||||
//
|
||||
// A Counter is typically used to count requests served, tasks completed, errors
|
||||
// occurred, etc.
|
||||
//
|
||||
// To create Counter instances, use NewCounter.
|
||||
type Counter interface {
|
||||
Metric
|
||||
Collector
|
||||
|
||||
// Set is used to set the Counter to an arbitrary value. It is only used
|
||||
// if you have to transfer a value from an external counter into this
|
||||
// Prometheus metric. Do not use it for regular handling of a
|
||||
// Prometheus counter (as it can be used to break the contract of
|
||||
// monotonically increasing values).
|
||||
//
|
||||
// Deprecated: Use NewConstMetric to create a counter for an external
|
||||
// value. A Counter should never be set.
|
||||
Set(float64)
|
||||
// Inc increments the counter by 1.
|
||||
Inc()
|
||||
// Add adds the given value to the counter. It panics if the value is <
|
||||
// 0.
|
||||
Add(float64)
|
||||
}
|
||||
|
||||
// CounterOpts is an alias for Opts. See there for doc comments.
|
||||
type CounterOpts Opts
|
||||
|
||||
// NewCounter creates a new Counter based on the provided CounterOpts.
|
||||
func NewCounter(opts CounterOpts) Counter {
|
||||
desc := NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
nil,
|
||||
opts.ConstLabels,
|
||||
)
|
||||
result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
|
||||
result.init(result) // Init self-collection.
|
||||
return result
|
||||
}
|
||||
|
||||
type counter struct {
|
||||
value
|
||||
}
|
||||
|
||||
func (c *counter) Add(v float64) {
|
||||
if v < 0 {
|
||||
panic(errors.New("counter cannot decrease in value"))
|
||||
}
|
||||
c.value.Add(v)
|
||||
}
|
||||
|
||||
// CounterVec is a Collector that bundles a set of Counters that all share the
|
||||
// same Desc, but have different values for their variable labels. This is used
|
||||
// if you want to count the same thing partitioned by various dimensions
|
||||
// (e.g. number of HTTP requests, partitioned by response code and
|
||||
// method). Create instances with NewCounterVec.
|
||||
//
|
||||
// CounterVec embeds MetricVec. See there for a full list of methods with
|
||||
// detailed documentation.
|
||||
type CounterVec struct {
|
||||
*MetricVec
|
||||
}
|
||||
|
||||
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
|
||||
// partitioned by the given label names. At least one label name must be
|
||||
// provided.
|
||||
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
|
||||
desc := NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
labelNames,
|
||||
opts.ConstLabels,
|
||||
)
|
||||
return &CounterVec{
|
||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
result := &counter{value: value{
|
||||
desc: desc,
|
||||
valType: CounterValue,
|
||||
labelPairs: makeLabelPairs(desc, lvs),
|
||||
}}
|
||||
result.init(result) // Init self-collection.
|
||||
return result
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// GetMetricWithLabelValues replaces the method of the same name in
|
||||
// MetricVec. The difference is that this method returns a Counter and not a
|
||||
// Metric so that no type conversion is required.
|
||||
func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
|
||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
||||
if metric != nil {
|
||||
return metric.(Counter), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
||||
// difference is that this method returns a Counter and not a Metric so that no
|
||||
// type conversion is required.
|
||||
func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
|
||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
||||
if metric != nil {
|
||||
return metric.(Counter), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
||||
// error, WithLabelValues allows shortcuts like
|
||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
||||
func (m *CounterVec) WithLabelValues(lvs ...string) Counter {
|
||||
return m.MetricVec.WithLabelValues(lvs...).(Counter)
|
||||
}
|
||||
|
||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||
// returned an error. By not returning an error, With allows shortcuts like
|
||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
||||
func (m *CounterVec) With(labels Labels) Counter {
|
||||
return m.MetricVec.With(labels).(Counter)
|
||||
}
|
||||
|
||||
// CounterFunc is a Counter whose value is determined at collect time by calling a
|
||||
// provided function.
|
||||
//
|
||||
// To create CounterFunc instances, use NewCounterFunc.
|
||||
type CounterFunc interface {
|
||||
Metric
|
||||
Collector
|
||||
}
|
||||
|
||||
// NewCounterFunc creates a new CounterFunc based on the provided
|
||||
// CounterOpts. The value reported is determined by calling the given function
|
||||
// from within the Write method. Take into account that metric collection may
|
||||
// happen concurrently. If that results in concurrent calls to Write, like in
|
||||
// the case where a CounterFunc is directly registered with Prometheus, the
|
||||
// provided function must be concurrency-safe. The function should also honor
|
||||
// the contract for a Counter (values only go up, not down), but compliance will
|
||||
// not be checked.
|
||||
func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
|
||||
return newValueFunc(NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
nil,
|
||||
opts.ConstLabels,
|
||||
), CounterValue, function)
|
||||
}
|
205
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
205
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
|
@ -1,205 +0,0 @@
|
|||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
var (
|
||||
metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
|
||||
labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
|
||||
)
|
||||
|
||||
// reservedLabelPrefix is a prefix which is not legal in user-supplied
|
||||
// label names.
|
||||
const reservedLabelPrefix = "__"
|
||||
|
||||
// Labels represents a collection of label name -> value mappings. This type is
|
||||
// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
|
||||
// metric vector Collectors, e.g.:
|
||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
||||
//
|
||||
// The other use-case is the specification of constant label pairs in Opts or to
|
||||
// create a Desc.
|
||||
type Labels map[string]string
|
||||
|
||||
// Desc is the descriptor used by every Prometheus Metric. It is essentially
|
||||
// the immutable meta-data of a Metric. The normal Metric implementations
|
||||
// included in this package manage their Desc under the hood. Users only have to
|
||||
// deal with Desc if they use advanced features like the ExpvarCollector or
|
||||
// custom Collectors and Metrics.
|
||||
//
|
||||
// Descriptors registered with the same registry have to fulfill certain
|
||||
// consistency and uniqueness criteria if they share the same fully-qualified
|
||||
// name: They must have the same help string and the same label names (aka label
|
||||
// dimensions) in each, constLabels and variableLabels, but they must differ in
|
||||
// the values of the constLabels.
|
||||
//
|
||||
// Descriptors that share the same fully-qualified names and the same label
|
||||
// values of their constLabels are considered equal.
|
||||
//
|
||||
// Use NewDesc to create new Desc instances.
|
||||
type Desc struct {
|
||||
// fqName has been built from Namespace, Subsystem, and Name.
|
||||
fqName string
|
||||
// help provides some helpful information about this metric.
|
||||
help string
|
||||
// constLabelPairs contains precalculated DTO label pairs based on
|
||||
// the constant labels.
|
||||
constLabelPairs []*dto.LabelPair
|
||||
// VariableLabels contains names of labels for which the metric
|
||||
// maintains variable values.
|
||||
variableLabels []string
|
||||
// id is a hash of the values of the ConstLabels and fqName. This
|
||||
// must be unique among all registered descriptors and can therefore be
|
||||
// used as an identifier of the descriptor.
|
||||
id uint64
|
||||
// dimHash is a hash of the label names (preset and variable) and the
|
||||
// Help string. Each Desc with the same fqName must have the same
|
||||
// dimHash.
|
||||
dimHash uint64
|
||||
// err is an error that occured during construction. It is reported on
|
||||
// registration time.
|
||||
err error
|
||||
}
|
||||
|
||||
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
|
||||
// and will be reported on registration time. variableLabels and constLabels can
|
||||
// be nil if no such labels should be set. fqName and help must not be empty.
|
||||
//
|
||||
// variableLabels only contain the label names. Their label values are variable
|
||||
// and therefore not part of the Desc. (They are managed within the Metric.)
|
||||
//
|
||||
// For constLabels, the label values are constant. Therefore, they are fully
|
||||
// specified in the Desc. See the Opts documentation for the implications of
|
||||
// constant labels.
|
||||
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
|
||||
d := &Desc{
|
||||
fqName: fqName,
|
||||
help: help,
|
||||
variableLabels: variableLabels,
|
||||
}
|
||||
if help == "" {
|
||||
d.err = errors.New("empty help string")
|
||||
return d
|
||||
}
|
||||
if !metricNameRE.MatchString(fqName) {
|
||||
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
|
||||
return d
|
||||
}
|
||||
// labelValues contains the label values of const labels (in order of
|
||||
// their sorted label names) plus the fqName (at position 0).
|
||||
labelValues := make([]string, 1, len(constLabels)+1)
|
||||
labelValues[0] = fqName
|
||||
labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
|
||||
labelNameSet := map[string]struct{}{}
|
||||
// First add only the const label names and sort them...
|
||||
for labelName := range constLabels {
|
||||
if !checkLabelName(labelName) {
|
||||
d.err = fmt.Errorf("%q is not a valid label name", labelName)
|
||||
return d
|
||||
}
|
||||
labelNames = append(labelNames, labelName)
|
||||
labelNameSet[labelName] = struct{}{}
|
||||
}
|
||||
sort.Strings(labelNames)
|
||||
// ... so that we can now add const label values in the order of their names.
|
||||
for _, labelName := range labelNames {
|
||||
labelValues = append(labelValues, constLabels[labelName])
|
||||
}
|
||||
// Now add the variable label names, but prefix them with something that
|
||||
// cannot be in a regular label name. That prevents matching the label
|
||||
// dimension with a different mix between preset and variable labels.
|
||||
for _, labelName := range variableLabels {
|
||||
if !checkLabelName(labelName) {
|
||||
d.err = fmt.Errorf("%q is not a valid label name", labelName)
|
||||
return d
|
||||
}
|
||||
labelNames = append(labelNames, "$"+labelName)
|
||||
labelNameSet[labelName] = struct{}{}
|
||||
}
|
||||
if len(labelNames) != len(labelNameSet) {
|
||||
d.err = errors.New("duplicate label names")
|
||||
return d
|
||||
}
|
||||
vh := hashNew()
|
||||
for _, val := range labelValues {
|
||||
vh = hashAdd(vh, val)
|
||||
vh = hashAddByte(vh, separatorByte)
|
||||
}
|
||||
d.id = vh
|
||||
// Sort labelNames so that order doesn't matter for the hash.
|
||||
sort.Strings(labelNames)
|
||||
// Now hash together (in this order) the help string and the sorted
|
||||
// label names.
|
||||
lh := hashNew()
|
||||
lh = hashAdd(lh, help)
|
||||
lh = hashAddByte(lh, separatorByte)
|
||||
for _, labelName := range labelNames {
|
||||
lh = hashAdd(lh, labelName)
|
||||
lh = hashAddByte(lh, separatorByte)
|
||||
}
|
||||
d.dimHash = lh
|
||||
|
||||
d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
|
||||
for n, v := range constLabels {
|
||||
d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
|
||||
Name: proto.String(n),
|
||||
Value: proto.String(v),
|
||||
})
|
||||
}
|
||||
sort.Sort(LabelPairSorter(d.constLabelPairs))
|
||||
return d
|
||||
}
|
||||
|
||||
// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
|
||||
// provided error set. If a collector returning such a descriptor is registered,
|
||||
// registration will fail with the provided error. NewInvalidDesc can be used by
|
||||
// a Collector to signal inability to describe itself.
|
||||
func NewInvalidDesc(err error) *Desc {
|
||||
return &Desc{
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Desc) String() string {
|
||||
lpStrings := make([]string, 0, len(d.constLabelPairs))
|
||||
for _, lp := range d.constLabelPairs {
|
||||
lpStrings = append(
|
||||
lpStrings,
|
||||
fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
|
||||
)
|
||||
}
|
||||
return fmt.Sprintf(
|
||||
"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
|
||||
d.fqName,
|
||||
d.help,
|
||||
strings.Join(lpStrings, ","),
|
||||
d.variableLabels,
|
||||
)
|
||||
}
|
||||
|
||||
func checkLabelName(l string) bool {
|
||||
return labelNameRE.MatchString(l) &&
|
||||
!strings.HasPrefix(l, reservedLabelPrefix)
|
||||
}
|
181
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
181
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
|
@ -1,181 +0,0 @@
|
|||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package prometheus provides metrics primitives to instrument code for
|
||||
// monitoring. It also offers a registry for metrics. Sub-packages allow to
|
||||
// expose the registered metrics via HTTP (package promhttp) or push them to a
|
||||
// Pushgateway (package push).
|
||||
//
|
||||
// All exported functions and methods are safe to be used concurrently unless
|
||||
//specified otherwise.
|
||||
//
|
||||
// A Basic Example
|
||||
//
|
||||
// As a starting point, a very basic usage example:
|
||||
//
|
||||
// package main
|
||||
//
|
||||
// import (
|
||||
// "net/http"
|
||||
//
|
||||
// "github.com/prometheus/client_golang/prometheus"
|
||||
// "github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
// )
|
||||
//
|
||||
// var (
|
||||
// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
// Name: "cpu_temperature_celsius",
|
||||
// Help: "Current temperature of the CPU.",
|
||||
// })
|
||||
// hdFailures = prometheus.NewCounterVec(
|
||||
// prometheus.CounterOpts{
|
||||
// Name: "hd_errors_total",
|
||||
// Help: "Number of hard-disk errors.",
|
||||
// },
|
||||
// []string{"device"},
|
||||
// )
|
||||
// )
|
||||
//
|
||||
// func init() {
|
||||
// // Metrics have to be registered to be exposed:
|
||||
// prometheus.MustRegister(cpuTemp)
|
||||
// prometheus.MustRegister(hdFailures)
|
||||
// }
|
||||
//
|
||||
// func main() {
|
||||
// cpuTemp.Set(65.3)
|
||||
// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
|
||||
//
|
||||
// // The Handler function provides a default handler to expose metrics
|
||||
// // via an HTTP server. "/metrics" is the usual endpoint for that.
|
||||
// http.Handle("/metrics", promhttp.Handler())
|
||||
// http.ListenAndServe(":8080", nil)
|
||||
// }
|
||||
//
|
||||
//
|
||||
// This is a complete program that exports two metrics, a Gauge and a Counter,
|
||||
// the latter with a label attached to turn it into a (one-dimensional) vector.
|
||||
//
|
||||
// Metrics
|
||||
//
|
||||
// The number of exported identifiers in this package might appear a bit
|
||||
// overwhelming. Hovever, in addition to the basic plumbing shown in the example
|
||||
// above, you only need to understand the different metric types and their
|
||||
// vector versions for basic usage.
|
||||
//
|
||||
// Above, you have already touched the Counter and the Gauge. There are two more
|
||||
// advanced metric types: the Summary and Histogram. A more thorough description
|
||||
// of those four metric types can be found in the Prometheus docs:
|
||||
// https://prometheus.io/docs/concepts/metric_types/
|
||||
//
|
||||
// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
|
||||
// Prometheus server not to assume anything about its type.
|
||||
//
|
||||
// In addition to the fundamental metric types Gauge, Counter, Summary,
|
||||
// Histogram, and Untyped, a very important part of the Prometheus data model is
|
||||
// the partitioning of samples along dimensions called labels, which results in
|
||||
// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
|
||||
// HistogramVec, and UntypedVec.
|
||||
//
|
||||
// While only the fundamental metric types implement the Metric interface, both
|
||||
// the metrics and their vector versions implement the Collector interface. A
|
||||
// Collector manages the collection of a number of Metrics, but for convenience,
|
||||
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
|
||||
// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
|
||||
// SummaryVec, HistogramVec, and UntypedVec are not.
|
||||
//
|
||||
// To create instances of Metrics and their vector versions, you need a suitable
|
||||
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts,
|
||||
// HistogramOpts, or UntypedOpts.
|
||||
//
|
||||
// Custom Collectors and constant Metrics
|
||||
//
|
||||
// While you could create your own implementations of Metric, most likely you
|
||||
// will only ever implement the Collector interface on your own. At a first
|
||||
// glance, a custom Collector seems handy to bundle Metrics for common
|
||||
// registration (with the prime example of the different metric vectors above,
|
||||
// which bundle all the metrics of the same name but with different labels).
|
||||
//
|
||||
// There is a more involved use case, too: If you already have metrics
|
||||
// available, created outside of the Prometheus context, you don't need the
|
||||
// interface of the various Metric types. You essentially want to mirror the
|
||||
// existing numbers into Prometheus Metrics during collection. An own
|
||||
// implementation of the Collector interface is perfect for that. You can create
|
||||
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
|
||||
// NewConstSummary (and their respective Must… versions). That will happen in
|
||||
// the Collect method. The Describe method has to return separate Desc
|
||||
// instances, representative of the “throw-away” metrics to be created
|
||||
// later. NewDesc comes in handy to create those Desc instances.
|
||||
//
|
||||
// The Collector example illustrates the use case. You can also look at the
|
||||
// source code of the processCollector (mirroring process metrics), the
|
||||
// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
|
||||
// metrics) as examples that are used in this package itself.
|
||||
//
|
||||
// If you just need to call a function to get a single float value to collect as
|
||||
// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
|
||||
// shortcuts.
|
||||
//
|
||||
// Advanced Uses of the Registry
|
||||
//
|
||||
// While MustRegister is the by far most common way of registering a Collector,
|
||||
// sometimes you might want to handle the errors the registration might
|
||||
// cause. As suggested by the name, MustRegister panics if an error occurs. With
|
||||
// the Register function, the error is returned and can be handled.
|
||||
//
|
||||
// An error is returned if the registered Collector is incompatible or
|
||||
// inconsistent with already registered metrics. The registry aims for
|
||||
// consistency of the collected metrics according to the Prometheus data
|
||||
// model. Inconsistencies are ideally detected at registration time, not at
|
||||
// collect time. The former will usually be detected at start-up time of a
|
||||
// program, while the latter will only happen at scrape time, possibly not even
|
||||
// on the first scrape if the inconsistency only becomes relevant later. That is
|
||||
// the main reason why a Collector and a Metric have to describe themselves to
|
||||
// the registry.
|
||||
//
|
||||
// So far, everything we did operated on the so-called default registry, as it
|
||||
// can be found in the global DefaultRegistry variable. With NewRegistry, you
|
||||
// can create a custom registry, or you can even implement the Registerer or
|
||||
// Gatherer interfaces yourself. The methods Register and Unregister work in
|
||||
// the same way on a custom registry as the global functions Register and
|
||||
// Unregister on the default registry.
|
||||
//
|
||||
// There are a number of uses for custom registries: You can use registries
|
||||
// with special properties, see NewPedanticRegistry. You can avoid global state,
|
||||
// as it is imposed by the DefaultRegistry. You can use multiple registries at
|
||||
// the same time to expose different metrics in different ways. You can use
|
||||
// separate registries for testing purposes.
|
||||
//
|
||||
// Also note that the DefaultRegistry comes registered with a Collector for Go
|
||||
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
|
||||
// NewProcessCollector). With a custom registry, you are in control and decide
|
||||
// yourself about the Collectors to register.
|
||||
//
|
||||
// HTTP Exposition
|
||||
//
|
||||
// The Registry implements the Gatherer interface. The caller of the Gather
|
||||
// method can then expose the gathered metrics in some way. Usually, the metrics
|
||||
// are served via HTTP on the /metrics endpoint. That's happening in the example
|
||||
// above. The tools to expose metrics via HTTP are in the promhttp
|
||||
// sub-package. (The top-level functions in the prometheus package are
|
||||
// deprecated.)
|
||||
//
|
||||
// Pushing to the Pushgateway
|
||||
//
|
||||
// Function for pushing to the Pushgateway can be found in the push sub-package.
|
||||
//
|
||||
// Other Means of Exposition
|
||||
//
|
||||
// More ways of exposing metrics can easily be added. Sending metrics to
|
||||
// Graphite would be an example that will soon be implemented.
|
||||
package prometheus
|
119
vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
generated
vendored
119
vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
generated
vendored
|
@ -1,119 +0,0 @@
|
|||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"expvar"
|
||||
)
|
||||
|
||||
type expvarCollector struct {
|
||||
exports map[string]*Desc
|
||||
}
|
||||
|
||||
// NewExpvarCollector returns a newly allocated expvar Collector that still has
|
||||
// to be registered with a Prometheus registry.
|
||||
//
|
||||
// An expvar Collector collects metrics from the expvar interface. It provides a
|
||||
// quick way to expose numeric values that are already exported via expvar as
|
||||
// Prometheus metrics. Note that the data models of expvar and Prometheus are
|
||||
// fundamentally different, and that the expvar Collector is inherently slower
|
||||
// than native Prometheus metrics. Thus, the expvar Collector is probably great
|
||||
// for experiments and prototying, but you should seriously consider a more
|
||||
// direct implementation of Prometheus metrics for monitoring production
|
||||
// systems.
|
||||
//
|
||||
// The exports map has the following meaning:
|
||||
//
|
||||
// The keys in the map correspond to expvar keys, i.e. for every expvar key you
|
||||
// want to export as Prometheus metric, you need an entry in the exports
|
||||
// map. The descriptor mapped to each key describes how to export the expvar
|
||||
// value. It defines the name and the help string of the Prometheus metric
|
||||
// proxying the expvar value. The type will always be Untyped.
|
||||
//
|
||||
// For descriptors without variable labels, the expvar value must be a number or
|
||||
// a bool. The number is then directly exported as the Prometheus sample
|
||||
// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
|
||||
// that are not numbers or bools are silently ignored.
|
||||
//
|
||||
// If the descriptor has one variable label, the expvar value must be an expvar
|
||||
// map. The keys in the expvar map become the various values of the one
|
||||
// Prometheus label. The values in the expvar map must be numbers or bools again
|
||||
// as above.
|
||||
//
|
||||
// For descriptors with more than one variable label, the expvar must be a
|
||||
// nested expvar map, i.e. where the values of the topmost map are maps again
|
||||
// etc. until a depth is reached that corresponds to the number of labels. The
|
||||
// leaves of that structure must be numbers or bools as above to serve as the
|
||||
// sample values.
|
||||
//
|
||||
// Anything that does not fit into the scheme above is silently ignored.
|
||||
func NewExpvarCollector(exports map[string]*Desc) Collector {
|
||||
return &expvarCollector{
|
||||
exports: exports,
|
||||
}
|
||||
}
|
||||
|
||||
// Describe implements Collector.
|
||||
func (e *expvarCollector) Describe(ch chan<- *Desc) {
|
||||
for _, desc := range e.exports {
|
||||
ch <- desc
|
||||
}
|
||||
}
|
||||
|
||||
// Collect implements Collector.
|
||||
func (e *expvarCollector) Collect(ch chan<- Metric) {
|
||||
for name, desc := range e.exports {
|
||||
var m Metric
|
||||
expVar := expvar.Get(name)
|
||||
if expVar == nil {
|
||||
continue
|
||||
}
|
||||
var v interface{}
|
||||
labels := make([]string, len(desc.variableLabels))
|
||||
if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
|
||||
ch <- NewInvalidMetric(desc, err)
|
||||
continue
|
||||
}
|
||||
var processValue func(v interface{}, i int)
|
||||
processValue = func(v interface{}, i int) {
|
||||
if i >= len(labels) {
|
||||
copiedLabels := append(make([]string, 0, len(labels)), labels...)
|
||||
switch v := v.(type) {
|
||||
case float64:
|
||||
m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
|
||||
case bool:
|
||||
if v {
|
||||
m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
|
||||
} else {
|
||||
m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
|
||||
}
|
||||
default:
|
||||
return
|
||||
}
|
||||
ch <- m
|
||||
return
|
||||
}
|
||||
vm, ok := v.(map[string]interface{})
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
for lv, val := range vm {
|
||||
labels[i] = lv
|
||||
processValue(val, i+1)
|
||||
}
|
||||
}
|
||||
processValue(v, 0)
|
||||
}
|
||||
}
|
29
vendor/github.com/prometheus/client_golang/prometheus/fnv.go
generated
vendored
29
vendor/github.com/prometheus/client_golang/prometheus/fnv.go
generated
vendored
|
@ -1,29 +0,0 @@
|
|||
package prometheus
|
||||
|
||||
// Inline and byte-free variant of hash/fnv's fnv64a.
|
||||
|
||||
const (
|
||||
offset64 = 14695981039346656037
|
||||
prime64 = 1099511628211
|
||||
)
|
||||
|
||||
// hashNew initializies a new fnv64a hash value.
|
||||
func hashNew() uint64 {
|
||||
return offset64
|
||||
}
|
||||
|
||||
// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
|
||||
func hashAdd(h uint64, s string) uint64 {
|
||||
for i := 0; i < len(s); i++ {
|
||||
h ^= uint64(s[i])
|
||||
h *= prime64
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
|
||||
func hashAddByte(h uint64, b byte) uint64 {
|
||||
h ^= uint64(b)
|
||||
h *= prime64
|
||||
return h
|
||||
}
|
140
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
140
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
|
@ -1,140 +0,0 @@
|
|||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
// Gauge is a Metric that represents a single numerical value that can
|
||||
// arbitrarily go up and down.
|
||||
//
|
||||
// A Gauge is typically used for measured values like temperatures or current
|
||||
// memory usage, but also "counts" that can go up and down, like the number of
|
||||
// running goroutines.
|
||||
//
|
||||
// To create Gauge instances, use NewGauge.
|
||||
type Gauge interface {
|
||||
Metric
|
||||
Collector
|
||||
|
||||
// Set sets the Gauge to an arbitrary value.
|
||||
Set(float64)
|
||||
// Inc increments the Gauge by 1.
|
||||
Inc()
|
||||
// Dec decrements the Gauge by 1.
|
||||
Dec()
|
||||
// Add adds the given value to the Gauge. (The value can be
|
||||
// negative, resulting in a decrease of the Gauge.)
|
||||
Add(float64)
|
||||
// Sub subtracts the given value from the Gauge. (The value can be
|
||||
// negative, resulting in an increase of the Gauge.)
|
||||
Sub(float64)
|
||||
}
|
||||
|
||||
// GaugeOpts is an alias for Opts. See there for doc comments.
|
||||
type GaugeOpts Opts
|
||||
|
||||
// NewGauge creates a new Gauge based on the provided GaugeOpts.
|
||||
func NewGauge(opts GaugeOpts) Gauge {
|
||||
return newValue(NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
nil,
|
||||
opts.ConstLabels,
|
||||
), GaugeValue, 0)
|
||||
}
|
||||
|
||||
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
|
||||
// Desc, but have different values for their variable labels. This is used if
|
||||
// you want to count the same thing partitioned by various dimensions
|
||||
// (e.g. number of operations queued, partitioned by user and operation
|
||||
// type). Create instances with NewGaugeVec.
|
||||
type GaugeVec struct {
|
||||
*MetricVec
|
||||
}
|
||||
|
||||
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
|
||||
// partitioned by the given label names. At least one label name must be
|
||||
// provided.
|
||||
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
|
||||
desc := NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
labelNames,
|
||||
opts.ConstLabels,
|
||||
)
|
||||
return &GaugeVec{
|
||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
return newValue(desc, GaugeValue, 0, lvs...)
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// GetMetricWithLabelValues replaces the method of the same name in
|
||||
// MetricVec. The difference is that this method returns a Gauge and not a
|
||||
// Metric so that no type conversion is required.
|
||||
func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
|
||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
||||
if metric != nil {
|
||||
return metric.(Gauge), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
||||
// difference is that this method returns a Gauge and not a Metric so that no
|
||||
// type conversion is required.
|
||||
func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
|
||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
||||
if metric != nil {
|
||||
return metric.(Gauge), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
||||
// error, WithLabelValues allows shortcuts like
|
||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
||||
func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge {
|
||||
return m.MetricVec.WithLabelValues(lvs...).(Gauge)
|
||||
}
|
||||
|
||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||
// returned an error. By not returning an error, With allows shortcuts like
|
||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
||||
func (m *GaugeVec) With(labels Labels) Gauge {
|
||||
return m.MetricVec.With(labels).(Gauge)
|
||||
}
|
||||
|
||||
// GaugeFunc is a Gauge whose value is determined at collect time by calling a
|
||||
// provided function.
|
||||
//
|
||||
// To create GaugeFunc instances, use NewGaugeFunc.
|
||||
type GaugeFunc interface {
|
||||
Metric
|
||||
Collector
|
||||
}
|
||||
|
||||
// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
|
||||
// value reported is determined by calling the given function from within the
|
||||
// Write method. Take into account that metric collection may happen
|
||||
// concurrently. If that results in concurrent calls to Write, like in the case
|
||||
// where a GaugeFunc is directly registered with Prometheus, the provided
|
||||
// function must be concurrency-safe.
|
||||
func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
|
||||
return newValueFunc(NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
nil,
|
||||
opts.ConstLabels,
|
||||
), GaugeValue, function)
|
||||
}
|
263
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
263
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
|
@ -1,263 +0,0 @@
|
|||
package prometheus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"time"
|
||||
)
|
||||
|
||||
type goCollector struct {
|
||||
goroutines Gauge
|
||||
gcDesc *Desc
|
||||
|
||||
// metrics to describe and collect
|
||||
metrics memStatsMetrics
|
||||
}
|
||||
|
||||
// NewGoCollector returns a collector which exports metrics about the current
|
||||
// go process.
|
||||
func NewGoCollector() Collector {
|
||||
return &goCollector{
|
||||
goroutines: NewGauge(GaugeOpts{
|
||||
Namespace: "go",
|
||||
Name: "goroutines",
|
||||
Help: "Number of goroutines that currently exist.",
|
||||
}),
|
||||
gcDesc: NewDesc(
|
||||
"go_gc_duration_seconds",
|
||||
"A summary of the GC invocation durations.",
|
||||
nil, nil),
|
||||
metrics: memStatsMetrics{
|
||||
{
|
||||
desc: NewDesc(
|
||||
memstatNamespace("alloc_bytes"),
|
||||
"Number of bytes allocated and still in use.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("alloc_bytes_total"),
|
||||
"Total number of bytes allocated, even if freed.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
|
||||
valType: CounterValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("sys_bytes"),
|
||||
"Number of bytes obtained by system. Sum of all system allocations.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("lookups_total"),
|
||||
"Total number of pointer lookups.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
|
||||
valType: CounterValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("mallocs_total"),
|
||||
"Total number of mallocs.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
|
||||
valType: CounterValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("frees_total"),
|
||||
"Total number of frees.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
|
||||
valType: CounterValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("heap_alloc_bytes"),
|
||||
"Number of heap bytes allocated and still in use.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("heap_sys_bytes"),
|
||||
"Number of heap bytes obtained from system.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("heap_idle_bytes"),
|
||||
"Number of heap bytes waiting to be used.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("heap_inuse_bytes"),
|
||||
"Number of heap bytes that are in use.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("heap_released_bytes_total"),
|
||||
"Total number of heap bytes released to OS.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
|
||||
valType: CounterValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("heap_objects"),
|
||||
"Number of allocated objects.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("stack_inuse_bytes"),
|
||||
"Number of bytes in use by the stack allocator.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("stack_sys_bytes"),
|
||||
"Number of bytes obtained from system for stack allocator.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("mspan_inuse_bytes"),
|
||||
"Number of bytes in use by mspan structures.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("mspan_sys_bytes"),
|
||||
"Number of bytes used for mspan structures obtained from system.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("mcache_inuse_bytes"),
|
||||
"Number of bytes in use by mcache structures.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("mcache_sys_bytes"),
|
||||
"Number of bytes used for mcache structures obtained from system.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("buck_hash_sys_bytes"),
|
||||
"Number of bytes used by the profiling bucket hash table.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("gc_sys_bytes"),
|
||||
"Number of bytes used for garbage collection system metadata.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("other_sys_bytes"),
|
||||
"Number of bytes used for other system allocations.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("next_gc_bytes"),
|
||||
"Number of heap bytes when next garbage collection will take place.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
|
||||
valType: GaugeValue,
|
||||
}, {
|
||||
desc: NewDesc(
|
||||
memstatNamespace("last_gc_time_seconds"),
|
||||
"Number of seconds since 1970 of last garbage collection.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
|
||||
valType: GaugeValue,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func memstatNamespace(s string) string {
|
||||
return fmt.Sprintf("go_memstats_%s", s)
|
||||
}
|
||||
|
||||
// Describe returns all descriptions of the collector.
|
||||
func (c *goCollector) Describe(ch chan<- *Desc) {
|
||||
ch <- c.goroutines.Desc()
|
||||
ch <- c.gcDesc
|
||||
|
||||
for _, i := range c.metrics {
|
||||
ch <- i.desc
|
||||
}
|
||||
}
|
||||
|
||||
// Collect returns the current state of all metrics of the collector.
|
||||
func (c *goCollector) Collect(ch chan<- Metric) {
|
||||
c.goroutines.Set(float64(runtime.NumGoroutine()))
|
||||
ch <- c.goroutines
|
||||
|
||||
var stats debug.GCStats
|
||||
stats.PauseQuantiles = make([]time.Duration, 5)
|
||||
debug.ReadGCStats(&stats)
|
||||
|
||||
quantiles := make(map[float64]float64)
|
||||
for idx, pq := range stats.PauseQuantiles[1:] {
|
||||
quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
|
||||
}
|
||||
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
|
||||
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
|
||||
|
||||
ms := &runtime.MemStats{}
|
||||
runtime.ReadMemStats(ms)
|
||||
for _, i := range c.metrics {
|
||||
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
|
||||
}
|
||||
}
|
||||
|
||||
// memStatsMetrics provide description, value, and value type for memstat metrics.
|
||||
type memStatsMetrics []struct {
|
||||
desc *Desc
|
||||
eval func(*runtime.MemStats) float64
|
||||
valType ValueType
|
||||
}
|
444
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
444
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
|
@ -1,444 +0,0 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
// A Histogram counts individual observations from an event or sample stream in
|
||||
// configurable buckets. Similar to a summary, it also provides a sum of
|
||||
// observations and an observation count.
|
||||
//
|
||||
// On the Prometheus server, quantiles can be calculated from a Histogram using
|
||||
// the histogram_quantile function in the query language.
|
||||
//
|
||||
// Note that Histograms, in contrast to Summaries, can be aggregated with the
|
||||
// Prometheus query language (see the documentation for detailed
|
||||
// procedures). However, Histograms require the user to pre-define suitable
|
||||
// buckets, and they are in general less accurate. The Observe method of a
|
||||
// Histogram has a very low performance overhead in comparison with the Observe
|
||||
// method of a Summary.
|
||||
//
|
||||
// To create Histogram instances, use NewHistogram.
|
||||
type Histogram interface {
|
||||
Metric
|
||||
Collector
|
||||
|
||||
// Observe adds a single observation to the histogram.
|
||||
Observe(float64)
|
||||
}
|
||||
|
||||
// bucketLabel is used for the label that defines the upper bound of a
|
||||
// bucket of a histogram ("le" -> "less or equal").
|
||||
const bucketLabel = "le"
|
||||
|
||||
// DefBuckets are the default Histogram buckets. The default buckets are
|
||||
// tailored to broadly measure the response time (in seconds) of a network
|
||||
// service. Most likely, however, you will be required to define buckets
|
||||
// customized to your use case.
|
||||
var (
|
||||
DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
|
||||
|
||||
errBucketLabelNotAllowed = fmt.Errorf(
|
||||
"%q is not allowed as label name in histograms", bucketLabel,
|
||||
)
|
||||
)
|
||||
|
||||
// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
|
||||
// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
|
||||
// and not included in the returned slice. The returned slice is meant to be
|
||||
// used for the Buckets field of HistogramOpts.
|
||||
//
|
||||
// The function panics if 'count' is zero or negative.
|
||||
func LinearBuckets(start, width float64, count int) []float64 {
|
||||
if count < 1 {
|
||||
panic("LinearBuckets needs a positive count")
|
||||
}
|
||||
buckets := make([]float64, count)
|
||||
for i := range buckets {
|
||||
buckets[i] = start
|
||||
start += width
|
||||
}
|
||||
return buckets
|
||||
}
|
||||
|
||||
// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
|
||||
// upper bound of 'start' and each following bucket's upper bound is 'factor'
|
||||
// times the previous bucket's upper bound. The final +Inf bucket is not counted
|
||||
// and not included in the returned slice. The returned slice is meant to be
|
||||
// used for the Buckets field of HistogramOpts.
|
||||
//
|
||||
// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
|
||||
// or if 'factor' is less than or equal 1.
|
||||
func ExponentialBuckets(start, factor float64, count int) []float64 {
|
||||
if count < 1 {
|
||||
panic("ExponentialBuckets needs a positive count")
|
||||
}
|
||||
if start <= 0 {
|
||||
panic("ExponentialBuckets needs a positive start value")
|
||||
}
|
||||
if factor <= 1 {
|
||||
panic("ExponentialBuckets needs a factor greater than 1")
|
||||
}
|
||||
buckets := make([]float64, count)
|
||||
for i := range buckets {
|
||||
buckets[i] = start
|
||||
start *= factor
|
||||
}
|
||||
return buckets
|
||||
}
|
||||
|
||||
// HistogramOpts bundles the options for creating a Histogram metric. It is
|
||||
// mandatory to set Name and Help to a non-empty string. All other fields are
|
||||
// optional and can safely be left at their zero value.
|
||||
type HistogramOpts struct {
|
||||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
||||
// name of the Histogram (created by joining these components with
|
||||
// "_"). Only Name is mandatory, the others merely help structuring the
|
||||
// name. Note that the fully-qualified name of the Histogram must be a
|
||||
// valid Prometheus metric name.
|
||||
Namespace string
|
||||
Subsystem string
|
||||
Name string
|
||||
|
||||
// Help provides information about this Histogram. Mandatory!
|
||||
//
|
||||
// Metrics with the same fully-qualified name must have the same Help
|
||||
// string.
|
||||
Help string
|
||||
|
||||
// ConstLabels are used to attach fixed labels to this
|
||||
// Histogram. Histograms with the same fully-qualified name must have the
|
||||
// same label names in their ConstLabels.
|
||||
//
|
||||
// Note that in most cases, labels have a value that varies during the
|
||||
// lifetime of a process. Those labels are usually managed with a
|
||||
// HistogramVec. ConstLabels serve only special purposes. One is for the
|
||||
// special case where the value of a label does not change during the
|
||||
// lifetime of a process, e.g. if the revision of the running binary is
|
||||
// put into a label. Another, more advanced purpose is if more than one
|
||||
// Collector needs to collect Histograms with the same fully-qualified
|
||||
// name. In that case, those Summaries must differ in the values of
|
||||
// their ConstLabels. See the Collector examples.
|
||||
//
|
||||
// If the value of a label never changes (not even between binaries),
|
||||
// that label most likely should not be a label at all (but part of the
|
||||
// metric name).
|
||||
ConstLabels Labels
|
||||
|
||||
// Buckets defines the buckets into which observations are counted. Each
|
||||
// element in the slice is the upper inclusive bound of a bucket. The
|
||||
// values must be sorted in strictly increasing order. There is no need
|
||||
// to add a highest bucket with +Inf bound, it will be added
|
||||
// implicitly. The default value is DefBuckets.
|
||||
Buckets []float64
|
||||
}
|
||||
|
||||
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
|
||||
// panics if the buckets in HistogramOpts are not in strictly increasing order.
|
||||
func NewHistogram(opts HistogramOpts) Histogram {
|
||||
return newHistogram(
|
||||
NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
nil,
|
||||
opts.ConstLabels,
|
||||
),
|
||||
opts,
|
||||
)
|
||||
}
|
||||
|
||||
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
|
||||
if len(desc.variableLabels) != len(labelValues) {
|
||||
panic(errInconsistentCardinality)
|
||||
}
|
||||
|
||||
for _, n := range desc.variableLabels {
|
||||
if n == bucketLabel {
|
||||
panic(errBucketLabelNotAllowed)
|
||||
}
|
||||
}
|
||||
for _, lp := range desc.constLabelPairs {
|
||||
if lp.GetName() == bucketLabel {
|
||||
panic(errBucketLabelNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
if len(opts.Buckets) == 0 {
|
||||
opts.Buckets = DefBuckets
|
||||
}
|
||||
|
||||
h := &histogram{
|
||||
desc: desc,
|
||||
upperBounds: opts.Buckets,
|
||||
labelPairs: makeLabelPairs(desc, labelValues),
|
||||
}
|
||||
for i, upperBound := range h.upperBounds {
|
||||
if i < len(h.upperBounds)-1 {
|
||||
if upperBound >= h.upperBounds[i+1] {
|
||||
panic(fmt.Errorf(
|
||||
"histogram buckets must be in increasing order: %f >= %f",
|
||||
upperBound, h.upperBounds[i+1],
|
||||
))
|
||||
}
|
||||
} else {
|
||||
if math.IsInf(upperBound, +1) {
|
||||
// The +Inf bucket is implicit. Remove it here.
|
||||
h.upperBounds = h.upperBounds[:i]
|
||||
}
|
||||
}
|
||||
}
|
||||
// Finally we know the final length of h.upperBounds and can make counts.
|
||||
h.counts = make([]uint64, len(h.upperBounds))
|
||||
|
||||
h.init(h) // Init self-collection.
|
||||
return h
|
||||
}
|
||||
|
||||
type histogram struct {
|
||||
// sumBits contains the bits of the float64 representing the sum of all
|
||||
// observations. sumBits and count have to go first in the struct to
|
||||
// guarantee alignment for atomic operations.
|
||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
sumBits uint64
|
||||
count uint64
|
||||
|
||||
selfCollector
|
||||
// Note that there is no mutex required.
|
||||
|
||||
desc *Desc
|
||||
|
||||
upperBounds []float64
|
||||
counts []uint64
|
||||
|
||||
labelPairs []*dto.LabelPair
|
||||
}
|
||||
|
||||
func (h *histogram) Desc() *Desc {
|
||||
return h.desc
|
||||
}
|
||||
|
||||
func (h *histogram) Observe(v float64) {
|
||||
// TODO(beorn7): For small numbers of buckets (<30), a linear search is
|
||||
// slightly faster than the binary search. If we really care, we could
|
||||
// switch from one search strategy to the other depending on the number
|
||||
// of buckets.
|
||||
//
|
||||
// Microbenchmarks (BenchmarkHistogramNoLabels):
|
||||
// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
|
||||
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
|
||||
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
|
||||
i := sort.SearchFloat64s(h.upperBounds, v)
|
||||
if i < len(h.counts) {
|
||||
atomic.AddUint64(&h.counts[i], 1)
|
||||
}
|
||||
atomic.AddUint64(&h.count, 1)
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&h.sumBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
||||
if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *histogram) Write(out *dto.Metric) error {
|
||||
his := &dto.Histogram{}
|
||||
buckets := make([]*dto.Bucket, len(h.upperBounds))
|
||||
|
||||
his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits)))
|
||||
his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count))
|
||||
var count uint64
|
||||
for i, upperBound := range h.upperBounds {
|
||||
count += atomic.LoadUint64(&h.counts[i])
|
||||
buckets[i] = &dto.Bucket{
|
||||
CumulativeCount: proto.Uint64(count),
|
||||
UpperBound: proto.Float64(upperBound),
|
||||
}
|
||||
}
|
||||
his.Bucket = buckets
|
||||
out.Histogram = his
|
||||
out.Label = h.labelPairs
|
||||
return nil
|
||||
}
|
||||
|
||||
// HistogramVec is a Collector that bundles a set of Histograms that all share the
|
||||
// same Desc, but have different values for their variable labels. This is used
|
||||
// if you want to count the same thing partitioned by various dimensions
|
||||
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
||||
// instances with NewHistogramVec.
|
||||
type HistogramVec struct {
|
||||
*MetricVec
|
||||
}
|
||||
|
||||
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
|
||||
// partitioned by the given label names. At least one label name must be
|
||||
// provided.
|
||||
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
|
||||
desc := NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
labelNames,
|
||||
opts.ConstLabels,
|
||||
)
|
||||
return &HistogramVec{
|
||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
return newHistogram(desc, opts, lvs...)
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// GetMetricWithLabelValues replaces the method of the same name in
|
||||
// MetricVec. The difference is that this method returns a Histogram and not a
|
||||
// Metric so that no type conversion is required.
|
||||
func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) {
|
||||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
|
||||
if metric != nil {
|
||||
return metric.(Histogram), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
||||
// difference is that this method returns a Histogram and not a Metric so that no
|
||||
// type conversion is required.
|
||||
func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) {
|
||||
metric, err := m.MetricVec.GetMetricWith(labels)
|
||||
if metric != nil {
|
||||
return metric.(Histogram), err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
||||
// error, WithLabelValues allows shortcuts like
|
||||
// myVec.WithLabelValues("404", "GET").Observe(42.21)
|
||||
func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram {
|
||||
return m.MetricVec.WithLabelValues(lvs...).(Histogram)
|
||||
}
|
||||
|
||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||
// returned an error. By not returning an error, With allows shortcuts like
|
||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
|
||||
func (m *HistogramVec) With(labels Labels) Histogram {
|
||||
return m.MetricVec.With(labels).(Histogram)
|
||||
}
|
||||
|
||||
type constHistogram struct {
|
||||
desc *Desc
|
||||
count uint64
|
||||
sum float64
|
||||
buckets map[float64]uint64
|
||||
labelPairs []*dto.LabelPair
|
||||
}
|
||||
|
||||
func (h *constHistogram) Desc() *Desc {
|
||||
return h.desc
|
||||
}
|
||||
|
||||
func (h *constHistogram) Write(out *dto.Metric) error {
|
||||
his := &dto.Histogram{}
|
||||
buckets := make([]*dto.Bucket, 0, len(h.buckets))
|
||||
|
||||
his.SampleCount = proto.Uint64(h.count)
|
||||
his.SampleSum = proto.Float64(h.sum)
|
||||
|
||||
for upperBound, count := range h.buckets {
|
||||
buckets = append(buckets, &dto.Bucket{
|
||||
CumulativeCount: proto.Uint64(count),
|
||||
UpperBound: proto.Float64(upperBound),
|
||||
})
|
||||
}
|
||||
|
||||
if len(buckets) > 0 {
|
||||
sort.Sort(buckSort(buckets))
|
||||
}
|
||||
his.Bucket = buckets
|
||||
|
||||
out.Histogram = his
|
||||
out.Label = h.labelPairs
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewConstHistogram returns a metric representing a Prometheus histogram with
|
||||
// fixed values for the count, sum, and bucket counts. As those parameters
|
||||
// cannot be changed, the returned value does not implement the Histogram
|
||||
// interface (but only the Metric interface). Users of this package will not
|
||||
// have much use for it in regular operations. However, when implementing custom
|
||||
// Collectors, it is useful as a throw-away metric that is generated on the fly
|
||||
// to send it to Prometheus in the Collect method.
|
||||
//
|
||||
// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
|
||||
// bucket.
|
||||
//
|
||||
// NewConstHistogram returns an error if the length of labelValues is not
|
||||
// consistent with the variable labels in Desc.
|
||||
func NewConstHistogram(
|
||||
desc *Desc,
|
||||
count uint64,
|
||||
sum float64,
|
||||
buckets map[float64]uint64,
|
||||
labelValues ...string,
|
||||
) (Metric, error) {
|
||||
if len(desc.variableLabels) != len(labelValues) {
|
||||
return nil, errInconsistentCardinality
|
||||
}
|
||||
return &constHistogram{
|
||||
desc: desc,
|
||||
count: count,
|
||||
sum: sum,
|
||||
buckets: buckets,
|
||||
labelPairs: makeLabelPairs(desc, labelValues),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MustNewConstHistogram is a version of NewConstHistogram that panics where
|
||||
// NewConstMetric would have returned an error.
|
||||
func MustNewConstHistogram(
|
||||
desc *Desc,
|
||||
count uint64,
|
||||
sum float64,
|
||||
buckets map[float64]uint64,
|
||||
labelValues ...string,
|
||||
) Metric {
|
||||
m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type buckSort []*dto.Bucket
|
||||
|
||||
func (s buckSort) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s buckSort) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s buckSort) Less(i, j int) bool {
|
||||
return s[i].GetUpperBound() < s[j].GetUpperBound()
|
||||
}
|
490
vendor/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
490
vendor/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
|
@ -1,490 +0,0 @@
|
|||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/common/expfmt"
|
||||
)
|
||||
|
||||
// TODO(beorn7): Remove this whole file. It is a partial mirror of
|
||||
// promhttp/http.go (to avoid circular import chains) where everything HTTP
|
||||
// related should live. The functions here are just for avoiding
|
||||
// breakage. Everything is deprecated.
|
||||
|
||||
const (
|
||||
contentTypeHeader = "Content-Type"
|
||||
contentLengthHeader = "Content-Length"
|
||||
contentEncodingHeader = "Content-Encoding"
|
||||
acceptEncodingHeader = "Accept-Encoding"
|
||||
)
|
||||
|
||||
var bufPool sync.Pool
|
||||
|
||||
func getBuf() *bytes.Buffer {
|
||||
buf := bufPool.Get()
|
||||
if buf == nil {
|
||||
return &bytes.Buffer{}
|
||||
}
|
||||
return buf.(*bytes.Buffer)
|
||||
}
|
||||
|
||||
func giveBuf(buf *bytes.Buffer) {
|
||||
buf.Reset()
|
||||
bufPool.Put(buf)
|
||||
}
|
||||
|
||||
// Handler returns an HTTP handler for the DefaultGatherer. It is
|
||||
// already instrumented with InstrumentHandler (using "prometheus" as handler
|
||||
// name).
|
||||
//
|
||||
// Deprecated: Please note the issues described in the doc comment of
|
||||
// InstrumentHandler. You might want to consider using promhttp.Handler instead
|
||||
// (which is non instrumented).
|
||||
func Handler() http.Handler {
|
||||
return InstrumentHandler("prometheus", UninstrumentedHandler())
|
||||
}
|
||||
|
||||
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
|
||||
//
|
||||
// Deprecated: Use promhttp.Handler instead. See there for further documentation.
|
||||
func UninstrumentedHandler() http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
mfs, err := DefaultGatherer.Gather()
|
||||
if err != nil {
|
||||
http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
contentType := expfmt.Negotiate(req.Header)
|
||||
buf := getBuf()
|
||||
defer giveBuf(buf)
|
||||
writer, encoding := decorateWriter(req, buf)
|
||||
enc := expfmt.NewEncoder(writer, contentType)
|
||||
var lastErr error
|
||||
for _, mf := range mfs {
|
||||
if err := enc.Encode(mf); err != nil {
|
||||
lastErr = err
|
||||
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
if closer, ok := writer.(io.Closer); ok {
|
||||
closer.Close()
|
||||
}
|
||||
if lastErr != nil && buf.Len() == 0 {
|
||||
http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
header := w.Header()
|
||||
header.Set(contentTypeHeader, string(contentType))
|
||||
header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
|
||||
if encoding != "" {
|
||||
header.Set(contentEncodingHeader, encoding)
|
||||
}
|
||||
w.Write(buf.Bytes())
|
||||
})
|
||||
}
|
||||
|
||||
// decorateWriter wraps a writer to handle gzip compression if requested. It
|
||||
// returns the decorated writer and the appropriate "Content-Encoding" header
|
||||
// (which is empty if no compression is enabled).
|
||||
func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
|
||||
header := request.Header.Get(acceptEncodingHeader)
|
||||
parts := strings.Split(header, ",")
|
||||
for _, part := range parts {
|
||||
part := strings.TrimSpace(part)
|
||||
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
|
||||
return gzip.NewWriter(writer), "gzip"
|
||||
}
|
||||
}
|
||||
return writer, ""
|
||||
}
|
||||
|
||||
var instLabels = []string{"method", "code"}
|
||||
|
||||
type nower interface {
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
type nowFunc func() time.Time
|
||||
|
||||
func (n nowFunc) Now() time.Time {
|
||||
return n()
|
||||
}
|
||||
|
||||
var now nower = nowFunc(func() time.Time {
|
||||
return time.Now()
|
||||
})
|
||||
|
||||
func nowSeries(t ...time.Time) nower {
|
||||
return nowFunc(func() time.Time {
|
||||
defer func() {
|
||||
t = t[1:]
|
||||
}()
|
||||
|
||||
return t[0]
|
||||
})
|
||||
}
|
||||
|
||||
// InstrumentHandler wraps the given HTTP handler for instrumentation. It
|
||||
// registers four metric collectors (if not already done) and reports HTTP
|
||||
// metrics to the (newly or already) registered collectors: http_requests_total
|
||||
// (CounterVec), http_request_duration_microseconds (Summary),
|
||||
// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
|
||||
// has a constant label named "handler" with the provided handlerName as
|
||||
// value. http_requests_total is a metric vector partitioned by HTTP method
|
||||
// (label name "method") and HTTP status code (label name "code").
|
||||
//
|
||||
// Deprecated: InstrumentHandler has several issues:
|
||||
//
|
||||
// - It uses Summaries rather than Histograms. Summaries are not useful if
|
||||
// aggregation across multiple instances is required.
|
||||
//
|
||||
// - It uses microseconds as unit, which is deprecated and should be replaced by
|
||||
// seconds.
|
||||
//
|
||||
// - The size of the request is calculated in a separate goroutine. Since this
|
||||
// calculator requires access to the request header, it creates a race with
|
||||
// any writes to the header performed during request handling.
|
||||
// httputil.ReverseProxy is a prominent example for a handler
|
||||
// performing such writes.
|
||||
//
|
||||
// Upcoming versions of this package will provide ways of instrumenting HTTP
|
||||
// handlers that are more flexible and have fewer issues. Please prefer direct
|
||||
// instrumentation in the meantime.
|
||||
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
|
||||
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
|
||||
}
|
||||
|
||||
// InstrumentHandlerFunc wraps the given function for instrumentation. It
|
||||
// otherwise works in the same way as InstrumentHandler (and shares the same
|
||||
// issues).
|
||||
//
|
||||
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
|
||||
// InstrumentHandler is.
|
||||
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
||||
return InstrumentHandlerFuncWithOpts(
|
||||
SummaryOpts{
|
||||
Subsystem: "http",
|
||||
ConstLabels: Labels{"handler": handlerName},
|
||||
},
|
||||
handlerFunc,
|
||||
)
|
||||
}
|
||||
|
||||
// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
|
||||
// issues) but provides more flexibility (at the cost of a more complex call
|
||||
// syntax). As InstrumentHandler, this function registers four metric
|
||||
// collectors, but it uses the provided SummaryOpts to create them. However, the
|
||||
// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
|
||||
// by "requests_total", "request_duration_microseconds", "request_size_bytes",
|
||||
// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
|
||||
// help string. The names of the variable labels of the http_requests_total
|
||||
// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
|
||||
//
|
||||
// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
|
||||
// behavior of InstrumentHandler:
|
||||
//
|
||||
// prometheus.InstrumentHandlerWithOpts(
|
||||
// prometheus.SummaryOpts{
|
||||
// Subsystem: "http",
|
||||
// ConstLabels: prometheus.Labels{"handler": handlerName},
|
||||
// },
|
||||
// handler,
|
||||
// )
|
||||
//
|
||||
// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
|
||||
// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
|
||||
// and all its fields are set to the equally named fields in the provided
|
||||
// SummaryOpts.
|
||||
//
|
||||
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
|
||||
// InstrumentHandler is.
|
||||
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
|
||||
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
|
||||
}
|
||||
|
||||
// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
|
||||
// the same issues) but provides more flexibility (at the cost of a more complex
|
||||
// call syntax). See InstrumentHandlerWithOpts for details how the provided
|
||||
// SummaryOpts are used.
|
||||
//
|
||||
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
|
||||
// as InstrumentHandler is.
|
||||
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
||||
reqCnt := NewCounterVec(
|
||||
CounterOpts{
|
||||
Namespace: opts.Namespace,
|
||||
Subsystem: opts.Subsystem,
|
||||
Name: "requests_total",
|
||||
Help: "Total number of HTTP requests made.",
|
||||
ConstLabels: opts.ConstLabels,
|
||||
},
|
||||
instLabels,
|
||||
)
|
||||
|
||||
opts.Name = "request_duration_microseconds"
|
||||
opts.Help = "The HTTP request latencies in microseconds."
|
||||
reqDur := NewSummary(opts)
|
||||
|
||||
opts.Name = "request_size_bytes"
|
||||
opts.Help = "The HTTP request sizes in bytes."
|
||||
reqSz := NewSummary(opts)
|
||||
|
||||
opts.Name = "response_size_bytes"
|
||||
opts.Help = "The HTTP response sizes in bytes."
|
||||
resSz := NewSummary(opts)
|
||||
|
||||
regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec)
|
||||
regReqDur := MustRegisterOrGet(reqDur).(Summary)
|
||||
regReqSz := MustRegisterOrGet(reqSz).(Summary)
|
||||
regResSz := MustRegisterOrGet(resSz).(Summary)
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
now := time.Now()
|
||||
|
||||
delegate := &responseWriterDelegator{ResponseWriter: w}
|
||||
out := make(chan int)
|
||||
urlLen := 0
|
||||
if r.URL != nil {
|
||||
urlLen = len(r.URL.String())
|
||||
}
|
||||
go computeApproximateRequestSize(r, out, urlLen)
|
||||
|
||||
_, cn := w.(http.CloseNotifier)
|
||||
_, fl := w.(http.Flusher)
|
||||
_, hj := w.(http.Hijacker)
|
||||
_, rf := w.(io.ReaderFrom)
|
||||
var rw http.ResponseWriter
|
||||
if cn && fl && hj && rf {
|
||||
rw = &fancyResponseWriterDelegator{delegate}
|
||||
} else {
|
||||
rw = delegate
|
||||
}
|
||||
handlerFunc(rw, r)
|
||||
|
||||
elapsed := float64(time.Since(now)) / float64(time.Microsecond)
|
||||
|
||||
method := sanitizeMethod(r.Method)
|
||||
code := sanitizeCode(delegate.status)
|
||||
regReqCnt.WithLabelValues(method, code).Inc()
|
||||
regReqDur.Observe(elapsed)
|
||||
regResSz.Observe(float64(delegate.written))
|
||||
regReqSz.Observe(float64(<-out))
|
||||
})
|
||||
}
|
||||
|
||||
func computeApproximateRequestSize(r *http.Request, out chan int, s int) {
|
||||
s += len(r.Method)
|
||||
s += len(r.Proto)
|
||||
for name, values := range r.Header {
|
||||
s += len(name)
|
||||
for _, value := range values {
|
||||
s += len(value)
|
||||
}
|
||||
}
|
||||
s += len(r.Host)
|
||||
|
||||
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
|
||||
|
||||
if r.ContentLength != -1 {
|
||||
s += int(r.ContentLength)
|
||||
}
|
||||
out <- s
|
||||
}
|
||||
|
||||
type responseWriterDelegator struct {
|
||||
http.ResponseWriter
|
||||
|
||||
handler, method string
|
||||
status int
|
||||
written int64
|
||||
wroteHeader bool
|
||||
}
|
||||
|
||||
func (r *responseWriterDelegator) WriteHeader(code int) {
|
||||
r.status = code
|
||||
r.wroteHeader = true
|
||||
r.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
func (r *responseWriterDelegator) Write(b []byte) (int, error) {
|
||||
if !r.wroteHeader {
|
||||
r.WriteHeader(http.StatusOK)
|
||||
}
|
||||
n, err := r.ResponseWriter.Write(b)
|
||||
r.written += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
type fancyResponseWriterDelegator struct {
|
||||
*responseWriterDelegator
|
||||
}
|
||||
|
||||
func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
|
||||
return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
||||
}
|
||||
|
||||
func (f *fancyResponseWriterDelegator) Flush() {
|
||||
f.ResponseWriter.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
return f.ResponseWriter.(http.Hijacker).Hijack()
|
||||
}
|
||||
|
||||
func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) {
|
||||
if !f.wroteHeader {
|
||||
f.WriteHeader(http.StatusOK)
|
||||
}
|
||||
n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r)
|
||||
f.written += n
|
||||
return n, err
|
||||
}
|
||||
|
||||
func sanitizeMethod(m string) string {
|
||||
switch m {
|
||||
case "GET", "get":
|
||||
return "get"
|
||||
case "PUT", "put":
|
||||
return "put"
|
||||
case "HEAD", "head":
|
||||
return "head"
|
||||
case "POST", "post":
|
||||
return "post"
|
||||
case "DELETE", "delete":
|
||||
return "delete"
|
||||
case "CONNECT", "connect":
|
||||
return "connect"
|
||||
case "OPTIONS", "options":
|
||||
return "options"
|
||||
case "NOTIFY", "notify":
|
||||
return "notify"
|
||||
default:
|
||||
return strings.ToLower(m)
|
||||
}
|
||||
}
|
||||
|
||||
func sanitizeCode(s int) string {
|
||||
switch s {
|
||||
case 100:
|
||||
return "100"
|
||||
case 101:
|
||||
return "101"
|
||||
|
||||
case 200:
|
||||
return "200"
|
||||
case 201:
|
||||
return "201"
|
||||
case 202:
|
||||
return "202"
|
||||
case 203:
|
||||
return "203"
|
||||
case 204:
|
||||
return "204"
|
||||
case 205:
|
||||
return "205"
|
||||
case 206:
|
||||
return "206"
|
||||
|
||||
case 300:
|
||||
return "300"
|
||||
case 301:
|
||||
return "301"
|
||||
case 302:
|
||||
return "302"
|
||||
case 304:
|
||||
return "304"
|
||||
case 305:
|
||||
return "305"
|
||||
case 307:
|
||||
return "307"
|
||||
|
||||
case 400:
|
||||
return "400"
|
||||
case 401:
|
||||
return "401"
|
||||
case 402:
|
||||
return "402"
|
||||
case 403:
|
||||
return "403"
|
||||
case 404:
|
||||
return "404"
|
||||
case 405:
|
||||
return "405"
|
||||
case 406:
|
||||
return "406"
|
||||
case 407:
|
||||
return "407"
|
||||
case 408:
|
||||
return "408"
|
||||
case 409:
|
||||
return "409"
|
||||
case 410:
|
||||
return "410"
|
||||
case 411:
|
||||
return "411"
|
||||
case 412:
|
||||
return "412"
|
||||
case 413:
|
||||
return "413"
|
||||
case 414:
|
||||
return "414"
|
||||
case 415:
|
||||
return "415"
|
||||
case 416:
|
||||
return "416"
|
||||
case 417:
|
||||
return "417"
|
||||
case 418:
|
||||
return "418"
|
||||
|
||||
case 500:
|
||||
return "500"
|
||||
case 501:
|
||||
return "501"
|
||||
case 502:
|
||||
return "502"
|
||||
case 503:
|
||||
return "503"
|
||||
case 504:
|
||||
return "504"
|
||||
case 505:
|
||||
return "505"
|
||||
|
||||
case 428:
|
||||
return "428"
|
||||
case 429:
|
||||
return "429"
|
||||
case 431:
|
||||
return "431"
|
||||
case 511:
|
||||
return "511"
|
||||
|
||||
default:
|
||||
return strconv.Itoa(s)
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue