1
0
Fork 0
mirror of https://github.com/Luzifer/nginx-sso.git synced 2024-10-18 07:34:22 +00:00

Remove vendored libraries

Signed-off-by: Knut Ahlers <knut@ahlers.me>
This commit is contained in:
Knut Ahlers 2020-06-30 00:35:34 +02:00
parent e28a06726a
commit f9118beb79
Signed by: luzifer
GPG key ID: DC2729FDD34BE99E
1036 changed files with 0 additions and 423952 deletions

202
vendor/cloud.google.com/go/LICENSE generated vendored
View file

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,12 +0,0 @@
{
"name": "metadata",
"name_pretty": "Google Compute Engine Metadata API",
"product_documentation": "https://cloud.google.com/compute/docs/storing-retrieving-metadata",
"client_documentation": "https://godoc.org/cloud.google.com/go/compute/metadata",
"release_level": "ga",
"language": "go",
"repo": "googleapis/google-cloud-go",
"distribution_name": "cloud.google.com/go/compute/metadata",
"api_id": "compute:metadata",
"requires_billing": false
}

View file

@ -1,526 +0,0 @@
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package metadata provides access to Google Compute Engine (GCE)
// metadata and API service accounts.
//
// This package is a wrapper around the GCE metadata service,
// as documented at https://developers.google.com/compute/docs/metadata.
package metadata // import "cloud.google.com/go/compute/metadata"
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"runtime"
"strings"
"sync"
"time"
)
const (
// metadataIP is the documented metadata server IP address.
metadataIP = "169.254.169.254"
// metadataHostEnv is the environment variable specifying the
// GCE metadata hostname. If empty, the default value of
// metadataIP ("169.254.169.254") is used instead.
// This is variable name is not defined by any spec, as far as
// I know; it was made up for the Go package.
metadataHostEnv = "GCE_METADATA_HOST"
userAgent = "gcloud-golang/0.1"
)
type cachedValue struct {
k string
trim bool
mu sync.Mutex
v string
}
var (
projID = &cachedValue{k: "project/project-id", trim: true}
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
instID = &cachedValue{k: "instance/id", trim: true}
)
var (
defaultClient = &Client{hc: &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
ResponseHeaderTimeout: 2 * time.Second,
},
}}
subscribeClient = &Client{hc: &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
},
}}
)
// NotDefinedError is returned when requested metadata is not defined.
//
// The underlying string is the suffix after "/computeMetadata/v1/".
//
// This error is not returned if the value is defined to be the empty
// string.
type NotDefinedError string
func (suffix NotDefinedError) Error() string {
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
}
func (c *cachedValue) get(cl *Client) (v string, err error) {
defer c.mu.Unlock()
c.mu.Lock()
if c.v != "" {
return c.v, nil
}
if c.trim {
v, err = cl.getTrimmed(c.k)
} else {
v, err = cl.Get(c.k)
}
if err == nil {
c.v = v
}
return
}
var (
onGCEOnce sync.Once
onGCE bool
)
// OnGCE reports whether this process is running on Google Compute Engine.
func OnGCE() bool {
onGCEOnce.Do(initOnGCE)
return onGCE
}
func initOnGCE() {
onGCE = testOnGCE()
}
func testOnGCE() bool {
// The user explicitly said they're on GCE, so trust them.
if os.Getenv(metadataHostEnv) != "" {
return true
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
resc := make(chan bool, 2)
// Try two strategies in parallel.
// See https://github.com/googleapis/google-cloud-go/issues/194
go func() {
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
req.Header.Set("User-Agent", userAgent)
res, err := defaultClient.hc.Do(req.WithContext(ctx))
if err != nil {
resc <- false
return
}
defer res.Body.Close()
resc <- res.Header.Get("Metadata-Flavor") == "Google"
}()
go func() {
addrs, err := net.LookupHost("metadata.google.internal")
if err != nil || len(addrs) == 0 {
resc <- false
return
}
resc <- strsContains(addrs, metadataIP)
}()
tryHarder := systemInfoSuggestsGCE()
if tryHarder {
res := <-resc
if res {
// The first strategy succeeded, so let's use it.
return true
}
// Wait for either the DNS or metadata server probe to
// contradict the other one and say we are running on
// GCE. Give it a lot of time to do so, since the system
// info already suggests we're running on a GCE BIOS.
timer := time.NewTimer(5 * time.Second)
defer timer.Stop()
select {
case res = <-resc:
return res
case <-timer.C:
// Too slow. Who knows what this system is.
return false
}
}
// There's no hint from the system info that we're running on
// GCE, so use the first probe's result as truth, whether it's
// true or false. The goal here is to optimize for speed for
// users who are NOT running on GCE. We can't assume that
// either a DNS lookup or an HTTP request to a blackholed IP
// address is fast. Worst case this should return when the
// metaClient's Transport.ResponseHeaderTimeout or
// Transport.Dial.Timeout fires (in two seconds).
return <-resc
}
// systemInfoSuggestsGCE reports whether the local system (without
// doing network requests) suggests that we're running on GCE. If this
// returns true, testOnGCE tries a bit harder to reach its metadata
// server.
func systemInfoSuggestsGCE() bool {
if runtime.GOOS != "linux" {
// We don't have any non-Linux clues available, at least yet.
return false
}
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
name := strings.TrimSpace(string(slurp))
return name == "Google" || name == "Google Compute Engine"
}
// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
// ResponseHeaderTimeout).
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
return subscribeClient.Subscribe(suffix, fn)
}
// Get calls Client.Get on the default client.
func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
// ProjectID returns the current instance's project ID string.
func ProjectID() (string, error) { return defaultClient.ProjectID() }
// NumericProjectID returns the current instance's numeric project ID.
func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
// InternalIP returns the instance's primary internal IP address.
func InternalIP() (string, error) { return defaultClient.InternalIP() }
// ExternalIP returns the instance's primary external (public) IP address.
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
// Email calls Client.Email on the default client.
func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) }
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func Hostname() (string, error) { return defaultClient.Hostname() }
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
// InstanceID returns the current VM's numeric instance ID.
func InstanceID() (string, error) { return defaultClient.InstanceID() }
// InstanceName returns the current VM's instance ID string.
func InstanceName() (string, error) { return defaultClient.InstanceName() }
// Zone returns the current VM's zone, such as "us-central1-b".
func Zone() (string, error) { return defaultClient.Zone() }
// InstanceAttributes calls Client.InstanceAttributes on the default client.
func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
// ProjectAttributes calls Client.ProjectAttributes on the default client.
func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
func InstanceAttributeValue(attr string) (string, error) {
return defaultClient.InstanceAttributeValue(attr)
}
// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
func ProjectAttributeValue(attr string) (string, error) {
return defaultClient.ProjectAttributeValue(attr)
}
// Scopes calls Client.Scopes on the default client.
func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
func strsContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}
// A Client provides metadata.
type Client struct {
hc *http.Client
}
// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
// will use the given http.Client instead of the default client.
func NewClient(c *http.Client) *Client {
return &Client{hc: c}
}
// getETag returns a value from the metadata service as well as the associated ETag.
// This func is otherwise equivalent to Get.
func (c *Client) getETag(suffix string) (value, etag string, err error) {
// Using a fixed IP makes it very difficult to spoof the metadata service in
// a container, which is an important use-case for local testing of cloud
// deployments. To enable spoofing of the metadata service, the environment
// variable GCE_METADATA_HOST is first inspected to decide where metadata
// requests shall go.
host := os.Getenv(metadataHostEnv)
if host == "" {
// Using 169.254.169.254 instead of "metadata" here because Go
// binaries built with the "netgo" tag and without cgo won't
// know the search suffix for "metadata" is
// ".google.internal", and this IP address is documented as
// being stable anyway.
host = metadataIP
}
u := "http://" + host + "/computeMetadata/v1/" + suffix
req, _ := http.NewRequest("GET", u, nil)
req.Header.Set("Metadata-Flavor", "Google")
req.Header.Set("User-Agent", userAgent)
res, err := c.hc.Do(req)
if err != nil {
return "", "", err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
all, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", err
}
if res.StatusCode != 200 {
return "", "", &Error{Code: res.StatusCode, Message: string(all)}
}
return string(all), res.Header.Get("Etag"), nil
}
// Get returns a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
//
// If the GCE_METADATA_HOST environment variable is not defined, a default of
// 169.254.169.254 will be used instead.
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
func (c *Client) Get(suffix string) (string, error) {
val, _, err := c.getETag(suffix)
return val, err
}
func (c *Client) getTrimmed(suffix string) (s string, err error) {
s, err = c.Get(suffix)
s = strings.TrimSpace(s)
return
}
func (c *Client) lines(suffix string) ([]string, error) {
j, err := c.Get(suffix)
if err != nil {
return nil, err
}
s := strings.Split(strings.TrimSpace(j), "\n")
for i := range s {
s[i] = strings.TrimSpace(s[i])
}
return s, nil
}
// ProjectID returns the current instance's project ID string.
func (c *Client) ProjectID() (string, error) { return projID.get(c) }
// NumericProjectID returns the current instance's numeric project ID.
func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
// InstanceID returns the current VM's numeric instance ID.
func (c *Client) InstanceID() (string, error) { return instID.get(c) }
// InternalIP returns the instance's primary internal IP address.
func (c *Client) InternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/ip")
}
// Email returns the email address associated with the service account.
// The account may be empty or the string "default" to use the instance's
// main account.
func (c *Client) Email(serviceAccount string) (string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email")
}
// ExternalIP returns the instance's primary external (public) IP address.
func (c *Client) ExternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func (c *Client) Hostname() (string, error) {
return c.getTrimmed("instance/hostname")
}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func (c *Client) InstanceTags() ([]string, error) {
var s []string
j, err := c.Get("instance/tags")
if err != nil {
return nil, err
}
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
return nil, err
}
return s, nil
}
// InstanceName returns the current VM's instance ID string.
func (c *Client) InstanceName() (string, error) {
host, err := c.Hostname()
if err != nil {
return "", err
}
return strings.Split(host, ".")[0], nil
}
// Zone returns the current VM's zone, such as "us-central1-b".
func (c *Client) Zone() (string, error) {
zone, err := c.getTrimmed("instance/zone")
// zone is of the form "projects/<projNum>/zones/<zoneName>".
if err != nil {
return "", err
}
return zone[strings.LastIndex(zone, "/")+1:], nil
}
// InstanceAttributes returns the list of user-defined attributes,
// assigned when initially creating a GCE VM instance. The value of an
// attribute can be obtained with InstanceAttributeValue.
func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
// ProjectAttributes returns the list of user-defined attributes
// applying to the project as a whole, not just this VM. The value of
// an attribute can be obtained with ProjectAttributeValue.
func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
// InstanceAttributeValue returns the value of the provided VM
// instance attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
return c.Get("instance/attributes/" + attr)
}
// ProjectAttributeValue returns the value of the provided
// project attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
return c.Get("project/attributes/" + attr)
}
// Scopes returns the service account scopes for the given account.
// The account may be empty or the string "default" to use the instance's
// main account.
func (c *Client) Scopes(serviceAccount string) ([]string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
}
// Subscribe subscribes to a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
// The suffix may contain query parameters.
//
// Subscribe calls fn with the latest metadata value indicated by the provided
// suffix. If the metadata value is deleted, fn is called with the empty string
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
// is deleted. Subscribe returns the error value returned from the last call to
// fn, which may be nil when ok == false.
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
const failedSubscribeSleep = time.Second * 5
// First check to see if the metadata value exists at all.
val, lastETag, err := c.getETag(suffix)
if err != nil {
return err
}
if err := fn(val, true); err != nil {
return err
}
ok := true
if strings.ContainsRune(suffix, '?') {
suffix += "&wait_for_change=true&last_etag="
} else {
suffix += "?wait_for_change=true&last_etag="
}
for {
val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
if err != nil {
if _, deleted := err.(NotDefinedError); !deleted {
time.Sleep(failedSubscribeSleep)
continue // Retry on other errors.
}
ok = false
}
lastETag = etag
if err := fn(val, ok); err != nil || !ok {
return err
}
}
}
// Error contains an error response from the server.
type Error struct {
// Code is the HTTP response status code.
Code int
// Message is the server response message.
Message string
}
func (e *Error) Error() string {
return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message)
}

View file

@ -1,5 +0,0 @@
/yubigo
/yubigo.sublime-project
/yubigo.sublime-workspace
/test
.apikey

View file

@ -1,22 +0,0 @@
Copyright (c) 2012, Geert-Johan Riemer
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,105 +0,0 @@
## yubigo
Yubigo is a Yubikey client API library that provides an easy way to integrate the Yubikey into any Go application.
## Installation
Installation is simple. Use go get:
`go get github.com/GeertJohan/yubigo`
## Usage
Make sure to import the library: `import "github.com/GeertJohan/yubigo"`
For use with the default Yubico servers, make sure you have an API key. [Request a key][getapikey].
**Basic OTP checking usage:**
```go
// create a new yubiAuth instance with id and key
yubiAuth, err := yubigo.NewYubiAuth("1234", "fdsaffqaf4vrc2q3cds=")
if err != nil {
// probably an invalid key was given
log.Fatalln(err)
}
// verify an OTP string
result, ok, err := yubiAuth.Verify("ccccccbetgjevivbklihljgtbenbfrefccveiglnjfbc")
if err != nil {
log.Fatalln(err)
}
if ok {
// succes!! The OTP is valid!
log.Printf("Used query was: %s\n", result.GetRequestQuery()) // this query string includes the url of the api-server that responded first.
} else {
// fail! The OTP is invalid or has been used before.
log.Println("The given OTP is invalid!!!")
}
```
**Use your own HTTP Client with fine-tuned config:**
While the library works out of the box, it's not recommended to use the default http client.
It is better to configure your own http client with useful timeouts.
For example:
```go
yubigo.HTTPClient = &http.Client{
Timeout: time.Second * 15,
Transport: &http.Transport{
MaxConnsPerHost: 20,
MaxIdleConnsPerHost: 5,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 60 * time.Second,
}).DialContext,
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
},
}
```
**Do not verify HTTPS certificate:**
```go
// Disable HTTPS cert verification. Use true to enable again.
yubiAuth.HttpsVerifyCertificate(false)
```
**HTTP instead of HTTPS:**
```go
// Disable HTTPS. Use true to enable again.
yubiAuth.UseHttps(false)
```
**Custom API server:**
```go
// Set a list of n servers, each server as host + path.
// Do not prepend with protocol
yubiAuth.SetApiServerList("api0.server.com/api/verify", "api1.server.com/api/verify", "otherserver.com/api/verify")
```
## Licence
This project is licensed under a Simplified BSD license. Please read the [LICENSE file][license].
## Todo
- Test files
- More documentation
- Getters/Setters for some options on the YubiAuth object.
## Protocol & Package documentation
This project is implementing a pure-Go Yubico OTP Validation Client and is following the [Yubico Validation Protocol Version 2.0][validationProtocolV20].
You will find "go doc"-like [package documentation at go.pkgdoc.org][pkgdoc].
[license]: https://github.com/GeertJohan/yubigo/blob/master/LICENSE
[getapikey]: https://upgrade.yubico.com/getapikey/
[pkgdoc]: http://go.pkgdoc.org/github.com/GeertJohan/yubigo
[validationProtocolV20]: http://code.google.com/p/yubikey-val-server-php/wiki/ValidationProtocolV20

View file

@ -1,580 +0,0 @@
package yubigo
import (
"bufio"
"context"
"crypto/hmac"
"crypto/sha1"
"crypto/tls"
"encoding/base64"
"errors"
"fmt"
"io"
"log"
"math/rand"
"net/http"
"regexp"
"sort"
"strings"
"sync"
"time"
)
var (
dvorakToQwerty = strings.NewReplacer(
"j", "c", "x", "b", "e", "d", ".", "e", "u", "f", "i", "g", "d", "h", "c", "i",
"h", "j", "t", "k", "n", "l", "b", "n", "p", "r", "y", "t", "g", "u", "k", "v",
"J", "C", "X", "B", "E", "D", ".", "E", "U", "F", "I", "G", "D", "H", "C", "I",
"H", "J", "T", "K", "N", "L", "B", "N", "P", "R", "Y", "T", "G", "U", "K", "V")
matchDvorak = regexp.MustCompile(`^[jxe.uidchtnbpygkJXE.UIDCHTNBPYGK]{32,48}$`)
matchQwerty = regexp.MustCompile(`^[cbdefghijklnrtuvCBDEFGHIJKLNRTUV]{32,48}$`)
signatureUrlFix = regexp.MustCompile(`\+`)
)
// Package variable used to override the http client used for communication
// with Yubico. If nil the standard http.Client will be used - if overriding
// you need to ensure the transport options are set.
var HTTPClient *http.Client = nil
// Parse and verify the given OTP string into prefix (identity) and ciphertext.
// Function returns a non-nil error when given OTP is not in valid format.
// NOTE: This function does NOT verify if the OTP is correct and unused/unique.
func ParseOTP(otp string) (prefix string, ciphertext string, err error) {
if len(otp) < 32 || len(otp) > 48 {
err = errors.New("OTP has wrong length.")
return
}
// When otp matches dvorak-otp, then translate to qwerty.
if matchDvorak.MatchString(otp) {
otp = dvorakToQwerty.Replace(otp)
}
// Verify that otp matches qwerty expectations
if !matchQwerty.MatchString(otp) {
err = errors.New("Given string is not a valid Yubikey OTP. It contains invalid characters and/or the length is wrong.")
return
}
l := len(otp)
prefix = otp[0 : l-32]
ciphertext = otp[l-32 : l]
return
}
type YubiAuth struct {
id string
key []byte
apiServerList []string
protocol string
verifyCertificate bool
workers []*verifyWorker
use sync.Mutex
debug bool
}
type verifyWorker struct {
ya *YubiAuth // YubiAuth this worker belongs to
id int // Worker id
client *http.Client // http client standing by ready for work
apiServer string // API server URL
work chan *workRequest // Channel on which the worker receives work
stop chan bool // Channel for stop signal
}
type workRequest struct {
ctx context.Context
paramString *string
resultChan chan *workResult
}
type workResult struct {
response *http.Response
requestQuery string
err error // indicates a failing server/network. This doesn't mean the OTP is invalid.
}
func (vw *verifyWorker) process() {
if vw.ya.debug {
log.Printf("worker[%d]: Started.\n", vw.id)
}
for {
select {
case w := <-vw.work:
// Create url
url := vw.ya.protocol + vw.apiServer + *w.paramString
if vw.ya.debug {
log.Printf("worker[%d]: Have work. Requesting: %s\n", vw.id, url)
}
// Create request
request, err := http.NewRequest("GET", url, nil)
if err != nil {
w.resultChan <- &workResult{
response: nil,
requestQuery: url,
err: fmt.Errorf("Could not create http request. Error: %s\n", err),
}
continue
}
request.Header.Add("User-Agent", "github.com/GeertJohan/yubigo")
// Call server with cancel context
request = request.WithContext(w.ctx)
response, err := vw.client.Do(request)
// If we received an error from the client, return that (wrapped) on the channel.
if err != nil {
// skip cancellation errors
if w.ctx.Err() == context.Canceled {
continue
}
w.resultChan <- &workResult{
response: nil,
requestQuery: url,
err: fmt.Errorf("Http client error: %s\n", err),
}
if vw.ya.debug {
log.Printf("worker[%d]: Http client error: %s", vw.id, err)
}
continue
}
// It seems everything is ok! return the response (wrapped) on the channel.
if vw.ya.debug {
log.Printf("worker[%d] Received result from api server. Sending on channel.", vw.id)
}
w.resultChan <- &workResult{
response: response,
requestQuery: url,
err: nil,
}
continue
case <-vw.stop:
if vw.ya.debug {
log.Printf("worker[%d]: received stop signal.\n", vw.id)
}
return
}
}
}
// Create a yubiAuth instance with given API-id and API-key.
// Returns an error when the key could not be base64 decoded.
// To use yubigo with the Yubico Web Service (default api servers), create an API id+key here: https://upgrade.yubico.com/getapikey/
// Debugging is disabled. For debugging: use NewYubiAuthDebug(..)
func NewYubiAuth(id string, key string) (auth *YubiAuth, err error) {
return NewYubiAuthDebug(id, key, false)
}
// Create a yubiAuth instance for given API-id and API-key.
// Has third parameter `debug`. When debug is true this YubiAuth instance will spam the console with logging messages.
// Returns an error when the key could not be base64 decoded.
// To use yubigo with the Yubico Web Service (default api servers), create an API id+key here: https://upgrade.yubico.com/getapikey/
func NewYubiAuthDebug(id string, key string, debug bool) (auth *YubiAuth, err error) {
keyBytes, err := base64.StdEncoding.DecodeString(key)
if err != nil {
err = fmt.Errorf("Given key seems to be invalid. Could not base64_decode. Error: %s\n", err)
return
}
if debug {
log.Printf("NewYubiAuthDebug: Given key is base64 decodable. Creating new YubiAuth instance with api id '%s'.\n", id)
}
auth = &YubiAuth{
id: id,
key: keyBytes,
apiServerList: []string{"api.yubico.com/wsapi/2.0/verify",
"api2.yubico.com/wsapi/2.0/verify",
"api3.yubico.com/wsapi/2.0/verify",
"api4.yubico.com/wsapi/2.0/verify",
"api5.yubico.com/wsapi/2.0/verify"},
protocol: "https://",
verifyCertificate: true,
debug: debug,
}
if debug {
log.Printf("NewYubiAuthDebug: Using yubico web servers: %#v\n", auth.apiServerList)
log.Println("NewYubiAuthDebug: Going to build workers.")
}
// Build workers
auth.buildWorkers()
// All done :)
return
}
// Stops existing workers and creates new ones.
func (ya *YubiAuth) buildWorkers() {
// Unexported (internal) method, so no locking.
// create tls config
tlsConfig := &tls.Config{}
if !ya.verifyCertificate {
tlsConfig.InsecureSkipVerify = true
}
// stop all existing workers
for _, worker := range ya.workers {
worker.stop <- true
}
// create new (empty) slice with exact capacity
ya.workers = make([]*verifyWorker, 0, len(ya.apiServerList))
// start new workers. One for each apiServerString
for id, apiServer := range ya.apiServerList {
// create worker instance with new http.Client instance
worker := &verifyWorker{
ya: ya,
id: id,
apiServer: apiServer + "?",
work: make(chan *workRequest),
stop: make(chan bool),
}
if HTTPClient == nil {
worker.client = &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsConfig,
},
}
} else {
worker.client = HTTPClient
}
ya.workers = append(ya.workers, worker)
// start worker process in new goroutine
go worker.process()
}
}
// Use this method to specify a list of servers for verification.
// Each server string should contain host + path.
// Example: "api.yubico.com/wsapi/2.0/verify".
func (ya *YubiAuth) SetApiServerList(urls ...string) {
// Lock
ya.use.Lock()
defer ya.use.Unlock()
// save setting
ya.apiServerList = urls
// rebuild workers (api server url's have changed)
ya.buildWorkers()
}
// Retrieve the the ist of servers that are being used for verification.
func (ya *YubiAuth) GetApiServerList() []string {
return ya.apiServerList
}
// Enable or disable the use of https
func (ya *YubiAuth) UseHttps(useHttps bool) {
// Lock
ya.use.Lock()
defer ya.use.Unlock()
// change setting
if useHttps {
ya.protocol = "https://"
} else {
ya.protocol = "http://"
}
// no need to rebuild workers, they re-read ya.protocol on each request.
}
// Enable or disable https certificate verification
// Disable this at your own risk.
func (ya *YubiAuth) HttpsVerifyCertificate(verifyCertificate bool) {
// Lock
ya.use.Lock()
defer ya.use.Unlock()
// save setting
ya.verifyCertificate = verifyCertificate
// rebuild workers (client has to be changed)
ya.buildWorkers()
}
// The verify method calls the API with given OTP and returns if the OTP is valid or not.
// This method will return an error if something unexpected happens
// If no error was returned, the returned 'ok bool' indicates if the OTP is valid
// if the 'ok bool' is true, additional informtion can be found in the returned YubiResponse object
func (ya *YubiAuth) Verify(otp string) (yr *YubiResponse, ok bool, err error) {
// Lock
ya.use.Lock()
defer ya.use.Unlock()
// check the OTP
_, _, err = ParseOTP(otp)
if err != nil {
return nil, false, err
}
// create slice to store parameters for this verification request
paramSlice := make([]string, 0)
paramSlice = append(paramSlice, "id="+ya.id)
paramSlice = append(paramSlice, "otp="+otp)
// Create 40 characters nonce
rand.Seed(time.Now().UnixNano())
k := make([]rune, 40)
for i := 0; i < 40; i++ {
c := rand.Intn(35)
if c < 10 {
c += 48 // numbers (0-9) (0+48 == 48 == '0', 9+48 == 57 == '9')
} else {
c += 87 // lower case alphabets (a-z) (10+87 == 97 == 'a', 35+87 == 122 = 'z')
}
k[i] = rune(c)
}
nonce := string(k)
paramSlice = append(paramSlice, "nonce="+nonce)
// These settings are hardcoded in the library for now.
//++ TODO(GeertJohan): add these values to the yubiAuth object and create getters/setters
// paramSlice = append(paramSlice, "timestamp=1")
paramSlice = append(paramSlice, "sl=secure")
//++ TODO(GeertJohan): Add timeout support?
//++ //paramSlice = append(paramSlice, "timeout=")
// sort the slice
sort.Strings(paramSlice)
// create parameter string
paramString := strings.Join(paramSlice, "&")
// generate signature
if len(ya.key) > 0 {
hmacenc := hmac.New(sha1.New, ya.key)
_, err := hmacenc.Write([]byte(paramString))
if err != nil {
return nil, false, fmt.Errorf("Could not calculate signature. Error: %s\n", err)
}
signature := base64.StdEncoding.EncodeToString(hmacenc.Sum([]byte{}))
signature = signatureUrlFix.ReplaceAllString(signature, `%2B`)
paramString = paramString + "&h=" + signature
}
// create result channel, buffersize equals the amount of workers.
resultChan := make(chan *workResult, len(ya.workers))
// create a cancel context to cancel http requests while we already have a good result
ctx, cancel := context.WithCancel(context.Background())
// create workRequest instance
wr := &workRequest{
ctx:ctx,
paramString: &paramString,
resultChan: resultChan,
}
// send workRequest to each worker
for _, worker := range ya.workers {
worker.work <- wr
}
// count the errors so we can handle when all servers fail (network fail for instance)
errCount := 0
// local result var, will contain the first result we have
var result *workResult
// keep looping until we have a good result
for {
// listen for result from a worker
result = <-resultChan
// check for error
if result.err != nil {
// increment error counter
errCount++
if ya.debug {
// debug logging
log.Printf("A server (%s) gave error back: %s\n", result.requestQuery, result.err)
}
if errCount == len(ya.apiServerList) {
// All workers are done, there's nothing left to try. we return an error.
return nil, false, errors.New("None of the servers responded properly.")
}
// we have an error, but not all workers responded yet, so lets wait for the next result.
continue
}
// create a yubiResult from the workers response.
yr, err = newYubiResponse(result)
if err != nil {
return nil, false, err
}
// Check for "REPLAYED_REQUEST" result.
if status, _ := yr.resultParameters["status"]; status == "REPLAYED_REQUEST" {
// The result status is "REPLAYED_REQUEST".
// This means that the server for this request got sync with an other server before our request.
// Lets wait for the result from the other server.
// See: https://forum.yubico.com/viewtopic.php?f=3&t=701
// increment error counter
errCount++
if ya.debug {
// debug logging
log.Println("Got replayed request: ", result.response.Body)
}
if errCount == len(ya.apiServerList) {
// All workers are done, there' is nothing left to try. We return an error.
return nil, false, errors.New("None of the servers responded properly.")
}
// We have a replayed request, but not all workers responded yet, so lets wait for the next result.
continue
}
// we have a result that satisfies us so we can cancel the other clients
cancel()
// No error or REPLAYED_REQUEST. Seems like we have a proper result.
break
}
// check status
status, ok := yr.resultParameters["status"]
if !ok || status != "OK" {
switch status {
case "BAD_OTP":
return yr, false, nil
case "REPLAYED_OTP":
return yr, false, errors.New("The OTP is valid, but has been used before. If you receive this error, you might be the victim of a man-in-the-middle attack.")
case "BAD_SIGNATURE":
return yr, false, errors.New("Signature verification at the api server failed. The used id/key combination could be invalid or is not activated (yet).")
case "NO_SUCH_CLIENT":
return yr, false, errors.New("The api server does not accept the given id. It might be invalid or is not activated (yet).")
case "OPERATION_NOT_ALLOWED":
return yr, false, errors.New("The api server does not allow the given api id to verify OTPs.")
case "BACKEND_ERROR":
return yr, false, errors.New("The api server seems to be broken. Please contact the api servers system administration (yubico servers? contact yubico).")
case "NOT_ENOUGH_ANSWERS":
return yr, false, errors.New("The api server could not get requested number of syncs during before timeout")
case "REPLAYED_REQUEST":
panic("Unexpected. This status should've been catched in the worker response loop.")
return yr, false, errors.New("The api server has seen this unique request before. If you receive this error, you might be the victim of a man-in-the-middle attack.")
default:
return yr, false, fmt.Errorf("Unknown status parameter (%s) sent by api server.", status)
}
}
// check otp
otpCheck, ok := yr.resultParameters["otp"]
if !ok || otp != otpCheck {
return nil, false, errors.New("Could not validate otp value from server response.")
}
// check nonce
nonceCheck, ok := yr.resultParameters["nonce"]
if !ok || nonce != nonceCheck {
return nil, false, errors.New("Could not validate nonce value from server response.")
}
// check attached signature with remake of that signature, if key is actually in use.
if len(ya.key) > 0 {
receivedSignature, ok := yr.resultParameters["h"]
if !ok || len(receivedSignature) == 0 {
return nil, false, errors.New("No signature hash was attached by the api server, we do expect one though. This might be a hacking attempt.")
}
// create a slice with the same size-1 as the parameters map (we're leaving the hash itself out of it's replica calculation)
receivedValuesSlice := make([]string, 0, len(yr.resultParameters)-1)
for key, value := range yr.resultParameters {
if key != "h" {
receivedValuesSlice = append(receivedValuesSlice, key+"="+value)
}
}
sort.Strings(receivedValuesSlice)
receivedValuesString := strings.Join(receivedValuesSlice, "&")
hmacenc := hmac.New(sha1.New, ya.key)
_, err := hmacenc.Write([]byte(receivedValuesString))
if err != nil {
return nil, false, fmt.Errorf("Could not calculate signature replica. Error: %s\n", err)
}
recievedSignatureReplica := base64.StdEncoding.EncodeToString(hmacenc.Sum([]byte{}))
if receivedSignature != recievedSignatureReplica {
return nil, false, errors.New("The received signature hash is not valid. This might be a hacking attempt.")
}
}
// we're done!
yr.validOTP = true
return yr, true, nil
}
// Contains details about yubikey OTP verification.
type YubiResponse struct {
requestQuery string
resultParameters map[string]string
validOTP bool
}
func newYubiResponse(result *workResult) (*YubiResponse, error) {
bodyReader := bufio.NewReader(result.response.Body)
yr := &YubiResponse{}
yr.resultParameters = make(map[string]string)
yr.requestQuery = result.requestQuery
for {
// read through the response lines
line, err := bodyReader.ReadString('\n')
// handle error, which at one point should be an expected io.EOF (end of file)
if err != nil {
if err == io.EOF {
break // successfully done with reading lines, lets break this for loop
}
return nil, fmt.Errorf("Could not read result body from the server. Error: %s\n", err)
}
// parse result lines, split on first '=', trim \n and \r
keyvalue := strings.SplitN(line, "=", 2)
if len(keyvalue) == 2 {
yr.resultParameters[keyvalue[0]] = strings.Trim(keyvalue[1], "\n\r")
}
}
return yr, nil
}
// Returns wether the verification was successful
func (yr *YubiResponse) IsValidOTP() bool {
return yr.validOTP
}
// Get the requestQuery that was used during verification.
func (yr *YubiResponse) GetRequestQuery() string {
return yr.requestQuery
}
// Retrieve a parameter from the api's response
func (yr *YubiResponse) GetResultParameter(key string) (value string) {
value, ok := yr.resultParameters[key]
if !ok {
value = ""
}
return value
}

View file

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016- Knut Ahlers <knut@ahlers.me>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,21 +0,0 @@
package str
// AppendIfMissing adds a string to a slice when it's not present yet
func AppendIfMissing(slice []string, s string) []string {
for _, e := range slice {
if e == s {
return slice
}
}
return append(slice, s)
}
// StringInSlice checks for the existence of a string in the slice
func StringInSlice(a string, list []string) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}

View file

@ -1,12 +0,0 @@
language: go
go:
- 1.6.x
- 1.7.x
- 1.8.x
- 1.9.x
- 1.10.x
- 1.11.x
- tip
script: go test -v -race -cover ./...

View file

@ -1,26 +0,0 @@
# 2.2.1 / 2019-02-04
* Add go module information
# 2.2.0 / 2018-09-18
* Add support for time.Time flags
# 2.1.0 / 2018-08-02
* Add AutoEnv feature
# 2.0.0 / 2018-08-02
* Breaking: Ensure an empty default string does not yield a slice with 1 element
Though this is a just a tiny change it does change the default behaviour, so I'm marking this as a breaking change. You should ensure your code is fine with the changes.
# 1.2.0 / 2017-06-19
* Add ParseAndValidate method
# 1.1.0 / 2016-06-28
* Support time.Duration config parameters
* Added goreportcard badge
* Added testcase for using bool with ENV and default

View file

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2015- Knut Ahlers <knut@ahlers.me>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,88 +0,0 @@
[![Build Status](https://travis-ci.org/Luzifer/rconfig.svg?branch=master)](https://travis-ci.org/Luzifer/rconfig)
[![Go Report Card](https://goreportcard.com/badge/github.com/Luzifer/rconfig)](https://goreportcard.com/report/github.com/Luzifer/rconfig)
[![Documentation](https://badges.fyi/static/godoc/reference/5272B4)](https://godoc.org/github.com/Luzifer/rconfig)
![](https://badges.fyi/github/license/Luzifer/rconfig)
[![](https://badges.fyi/github/latest-tag/Luzifer/rconfig)](https://gopkg.in/Luzifer/rconfig.v2)
## Description
> Package rconfig implements a CLI configuration reader with struct-embedded defaults, environment variables and posix compatible flag parsing using the [pflag](https://github.com/spf13/pflag) library.
## Installation
Install by running:
```
go get -u github.com/Luzifer/rconfig
```
OR fetch a specific version:
```
go get -u gopkg.in/luzifer/rconfig.v2
```
Run tests by running:
```
go test -v -race -cover github.com/Luzifer/rconfig
```
## Usage
A very simple usecase is to just configure a struct inside the vars section of your `main.go` and to parse the commandline flags from the `main()` function:
```go
package main
import (
"fmt"
"github.com/Luzifer/rconfig"
)
var (
cfg = struct {
Username string `default:"unknown" flag:"user" description:"Your name"`
Details struct {
Age int `default:"25" flag:"age" env:"age" description:"Your age"`
}
}{}
)
func main() {
rconfig.Parse(&cfg)
fmt.Printf("Hello %s, happy birthday for your %dth birthday.",
cfg.Username,
cfg.Details.Age)
}
```
### Provide variable defaults by using a file
Given you have a file `~/.myapp.yml` containing some secrets or usernames (for the example below username is assumed to be "luzifer") as a default configuration for your application you can use this source code to load the defaults from that file using the `vardefault` tag in your configuration struct.
The order of the directives (lower number = higher precedence):
1. Flags provided in command line
1. Environment variables
1. Variable defaults (`vardefault` tag in the struct)
1. `default` tag in the struct
```go
var cfg = struct {
Username string `vardefault:"username" flag:"username" description:"Your username"`
}
func main() {
rconfig.SetVariableDefaults(rconfig.VarDefaultsFromYAMLFile("~/.myapp.yml"))
rconfig.Parse(&cfg)
fmt.Printf("Username = %s", cfg.Username)
// Output: Username = luzifer
}
```
## More info
You can see the full reference documentation of the rconfig package [at godoc.org](https://godoc.org/github.com/Luzifer/rconfig), or through go's standard documentation system by running `godoc -http=:6060` and browsing to [http://localhost:6060/pkg/github.com/Luzifer/rconfig](http://localhost:6060/pkg/github.com/Luzifer/rconfig) after installation.

View file

@ -1,64 +0,0 @@
package rconfig
import "strings"
type characterClass [2]rune
func (c characterClass) Contains(r rune) bool {
return c[0] <= r && c[1] >= r
}
type characterClasses []characterClass
func (c characterClasses) Contains(r rune) bool {
for _, cc := range c {
if cc.Contains(r) {
return true
}
}
return false
}
var (
charGroupUpperLetter = characterClass{'A', 'Z'}
charGroupLowerLetter = characterClass{'a', 'z'}
charGroupNumber = characterClass{'0', '9'}
charGroupLowerNumber = characterClasses{charGroupLowerLetter, charGroupNumber}
)
func deriveEnvVarName(s string) string {
var (
words []string
word []rune
)
for _, l := range s {
switch {
case charGroupUpperLetter.Contains(l):
if len(word) > 0 && charGroupLowerNumber.Contains(word[len(word)-1]) {
words = append(words, string(word))
word = []rune{}
}
word = append(word, l)
case charGroupLowerLetter.Contains(l):
if len(word) > 1 && charGroupUpperLetter.Contains(word[len(word)-1]) {
words = append(words, string(word[0:len(word)-1]))
word = word[len(word)-1:]
}
word = append(word, l)
case charGroupNumber.Contains(l):
word = append(word, l)
default:
if len(word) > 0 {
words = append(words, string(word))
}
word = []rune{}
}
}
words = append(words, string(word))
return strings.ToUpper(strings.Join(words, "_"))
}

View file

@ -1,456 +0,0 @@
// Package rconfig implements a CLI configuration reader with struct-embedded
// defaults, environment variables and posix compatible flag parsing using
// the pflag library.
package rconfig
import (
"errors"
"fmt"
"os"
"reflect"
"strconv"
"strings"
"time"
"github.com/spf13/pflag"
validator "gopkg.in/validator.v2"
)
type afterFunc func() error
var (
autoEnv bool
fs *pflag.FlagSet
variableDefaults map[string]string
timeParserFormats = []string{
// Default constants
time.RFC3339Nano, time.RFC3339,
time.RFC1123Z, time.RFC1123,
time.RFC822Z, time.RFC822,
time.RFC850, time.RubyDate, time.UnixDate, time.ANSIC,
"2006-01-02 15:04:05.999999999 -0700 MST",
// More uncommon time formats
"2006-01-02 15:04:05", "2006-01-02 15:04:05Z07:00", // Simplified ISO time format
"01/02/2006 15:04:05", "01/02/2006 15:04:05Z07:00", // US time format
"02.01.2006 15:04:05", "02.01.2006 15:04:05Z07:00", // DE time format
}
)
func init() {
variableDefaults = make(map[string]string)
}
// Parse takes the pointer to a struct filled with variables which should be read
// from ENV, default or flag. The precedence in this is flag > ENV > default. So
// if a flag is specified on the CLI it will overwrite the ENV and otherwise ENV
// overwrites the default specified.
//
// For your configuration struct you can use the following struct-tags to control
// the behavior of rconfig:
//
// default: Set a default value
// vardefault: Read the default value from the variable defaults
// env: Read the value from this environment variable
// flag: Flag to read in format "long,short" (for example "listen,l")
// description: A help text for Usage output to guide your users
//
// The format you need to specify those values you can see in the example to this
// function.
//
func Parse(config interface{}) error {
return parse(config, nil)
}
// ParseAndValidate works exactly like Parse but implements an additional run of
// the go-validator package on the configuration struct. Therefore additonal struct
// tags are supported like described in the readme file of the go-validator package:
//
// https://github.com/go-validator/validator/tree/v2#usage
func ParseAndValidate(config interface{}) error {
return parseAndValidate(config, nil)
}
// Args returns the non-flag command-line arguments.
func Args() []string {
return fs.Args()
}
// AddTimeParserFormats adds custom formats to parse time.Time fields
func AddTimeParserFormats(f ...string) {
timeParserFormats = append(timeParserFormats, f...)
}
// AutoEnv enables or disables automated env variable guessing. If no `env` struct
// tag was set and AutoEnv is enabled the env variable name is derived from the
// name of the field: `MyFieldName` will get `MY_FIELD_NAME`
func AutoEnv(enable bool) {
autoEnv = enable
}
// Usage prints a basic usage with the corresponding defaults for the flags to
// os.Stdout. The defaults are derived from the `default` struct-tag and the ENV.
func Usage() {
if fs != nil && fs.Parsed() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
fs.PrintDefaults()
}
}
// SetVariableDefaults presets the parser with a map of default values to be used
// when specifying the vardefault tag
func SetVariableDefaults(defaults map[string]string) {
variableDefaults = defaults
}
func parseAndValidate(in interface{}, args []string) error {
if err := parse(in, args); err != nil {
return err
}
return validator.Validate(in)
}
func parse(in interface{}, args []string) error {
if args == nil {
args = os.Args
}
fs = pflag.NewFlagSet(os.Args[0], pflag.ExitOnError)
afterFuncs, err := execTags(in, fs)
if err != nil {
return err
}
if err := fs.Parse(args); err != nil {
return err
}
if afterFuncs != nil {
for _, f := range afterFuncs {
if err := f(); err != nil {
return err
}
}
}
return nil
}
func execTags(in interface{}, fs *pflag.FlagSet) ([]afterFunc, error) {
if reflect.TypeOf(in).Kind() != reflect.Ptr {
return nil, errors.New("Calling parser with non-pointer")
}
if reflect.ValueOf(in).Elem().Kind() != reflect.Struct {
return nil, errors.New("Calling parser with pointer to non-struct")
}
afterFuncs := []afterFunc{}
st := reflect.ValueOf(in).Elem()
for i := 0; i < st.NumField(); i++ {
valField := st.Field(i)
typeField := st.Type().Field(i)
if typeField.Tag.Get("default") == "" && typeField.Tag.Get("env") == "" && typeField.Tag.Get("flag") == "" && typeField.Type.Kind() != reflect.Struct {
// None of our supported tags is present and it's not a sub-struct
continue
}
value := varDefault(typeField.Tag.Get("vardefault"), typeField.Tag.Get("default"))
value = envDefault(typeField, value)
parts := strings.Split(typeField.Tag.Get("flag"), ",")
switch typeField.Type {
case reflect.TypeOf(time.Duration(0)):
v, err := time.ParseDuration(value)
if err != nil {
if value == "" {
v = time.Duration(0)
} else {
return nil, err
}
}
if typeField.Tag.Get("flag") != "" {
if len(parts) == 1 {
fs.DurationVar(valField.Addr().Interface().(*time.Duration), parts[0], v, typeField.Tag.Get("description"))
} else {
fs.DurationVarP(valField.Addr().Interface().(*time.Duration), parts[0], parts[1], v, typeField.Tag.Get("description"))
}
} else {
valField.Set(reflect.ValueOf(v))
}
continue
case reflect.TypeOf(time.Time{}):
var sVar string
if typeField.Tag.Get("flag") != "" {
if len(parts) == 1 {
fs.StringVar(&sVar, parts[0], value, typeField.Tag.Get("description"))
} else {
fs.StringVarP(&sVar, parts[0], parts[1], value, typeField.Tag.Get("description"))
}
} else {
sVar = value
}
afterFuncs = append(afterFuncs, func(valField reflect.Value, sVar *string) func() error {
return func() error {
if *sVar == "" {
// No time, no problem
return nil
}
// Check whether we could have a timestamp
if ts, err := strconv.ParseInt(*sVar, 10, 64); err == nil {
t := time.Unix(ts, 0)
valField.Set(reflect.ValueOf(t))
return nil
}
// We haven't so lets walk through possible time formats
matched := false
for _, tf := range timeParserFormats {
if t, err := time.Parse(tf, *sVar); err == nil {
matched = true
valField.Set(reflect.ValueOf(t))
return nil
}
}
if !matched {
return fmt.Errorf("Value %q did not match expected time formats", *sVar)
}
return nil
}
}(valField, &sVar))
continue
}
switch typeField.Type.Kind() {
case reflect.String:
if typeField.Tag.Get("flag") != "" {
if len(parts) == 1 {
fs.StringVar(valField.Addr().Interface().(*string), parts[0], value, typeField.Tag.Get("description"))
} else {
fs.StringVarP(valField.Addr().Interface().(*string), parts[0], parts[1], value, typeField.Tag.Get("description"))
}
} else {
valField.SetString(value)
}
case reflect.Bool:
v := value == "true"
if typeField.Tag.Get("flag") != "" {
if len(parts) == 1 {
fs.BoolVar(valField.Addr().Interface().(*bool), parts[0], v, typeField.Tag.Get("description"))
} else {
fs.BoolVarP(valField.Addr().Interface().(*bool), parts[0], parts[1], v, typeField.Tag.Get("description"))
}
} else {
valField.SetBool(v)
}
case reflect.Int, reflect.Int8, reflect.Int32, reflect.Int64:
vt, err := strconv.ParseInt(value, 10, 64)
if err != nil {
if value == "" {
vt = 0
} else {
return nil, err
}
}
if typeField.Tag.Get("flag") != "" {
registerFlagInt(typeField.Type.Kind(), fs, valField.Addr().Interface(), parts, vt, typeField.Tag.Get("description"))
} else {
valField.SetInt(vt)
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
vt, err := strconv.ParseUint(value, 10, 64)
if err != nil {
if value == "" {
vt = 0
} else {
return nil, err
}
}
if typeField.Tag.Get("flag") != "" {
registerFlagUint(typeField.Type.Kind(), fs, valField.Addr().Interface(), parts, vt, typeField.Tag.Get("description"))
} else {
valField.SetUint(vt)
}
case reflect.Float32, reflect.Float64:
vt, err := strconv.ParseFloat(value, 64)
if err != nil {
if value == "" {
vt = 0.0
} else {
return nil, err
}
}
if typeField.Tag.Get("flag") != "" {
registerFlagFloat(typeField.Type.Kind(), fs, valField.Addr().Interface(), parts, vt, typeField.Tag.Get("description"))
} else {
valField.SetFloat(vt)
}
case reflect.Struct:
afs, err := execTags(valField.Addr().Interface(), fs)
if err != nil {
return nil, err
}
afterFuncs = append(afterFuncs, afs...)
case reflect.Slice:
switch typeField.Type.Elem().Kind() {
case reflect.Int:
def := []int{}
for _, v := range strings.Split(value, ",") {
it, err := strconv.ParseInt(strings.TrimSpace(v), 10, 64)
if err != nil {
return nil, err
}
def = append(def, int(it))
}
if len(parts) == 1 {
fs.IntSliceVar(valField.Addr().Interface().(*[]int), parts[0], def, typeField.Tag.Get("description"))
} else {
fs.IntSliceVarP(valField.Addr().Interface().(*[]int), parts[0], parts[1], def, typeField.Tag.Get("description"))
}
case reflect.String:
del := typeField.Tag.Get("delimiter")
if len(del) == 0 {
del = ","
}
var def = []string{}
if value != "" {
def = strings.Split(value, del)
}
if len(parts) == 1 {
fs.StringSliceVar(valField.Addr().Interface().(*[]string), parts[0], def, typeField.Tag.Get("description"))
} else {
fs.StringSliceVarP(valField.Addr().Interface().(*[]string), parts[0], parts[1], def, typeField.Tag.Get("description"))
}
}
}
}
return afterFuncs, nil
}
func registerFlagFloat(t reflect.Kind, fs *pflag.FlagSet, field interface{}, parts []string, vt float64, desc string) {
switch t {
case reflect.Float32:
if len(parts) == 1 {
fs.Float32Var(field.(*float32), parts[0], float32(vt), desc)
} else {
fs.Float32VarP(field.(*float32), parts[0], parts[1], float32(vt), desc)
}
case reflect.Float64:
if len(parts) == 1 {
fs.Float64Var(field.(*float64), parts[0], float64(vt), desc)
} else {
fs.Float64VarP(field.(*float64), parts[0], parts[1], float64(vt), desc)
}
}
}
func registerFlagInt(t reflect.Kind, fs *pflag.FlagSet, field interface{}, parts []string, vt int64, desc string) {
switch t {
case reflect.Int:
if len(parts) == 1 {
fs.IntVar(field.(*int), parts[0], int(vt), desc)
} else {
fs.IntVarP(field.(*int), parts[0], parts[1], int(vt), desc)
}
case reflect.Int8:
if len(parts) == 1 {
fs.Int8Var(field.(*int8), parts[0], int8(vt), desc)
} else {
fs.Int8VarP(field.(*int8), parts[0], parts[1], int8(vt), desc)
}
case reflect.Int32:
if len(parts) == 1 {
fs.Int32Var(field.(*int32), parts[0], int32(vt), desc)
} else {
fs.Int32VarP(field.(*int32), parts[0], parts[1], int32(vt), desc)
}
case reflect.Int64:
if len(parts) == 1 {
fs.Int64Var(field.(*int64), parts[0], int64(vt), desc)
} else {
fs.Int64VarP(field.(*int64), parts[0], parts[1], int64(vt), desc)
}
}
}
func registerFlagUint(t reflect.Kind, fs *pflag.FlagSet, field interface{}, parts []string, vt uint64, desc string) {
switch t {
case reflect.Uint:
if len(parts) == 1 {
fs.UintVar(field.(*uint), parts[0], uint(vt), desc)
} else {
fs.UintVarP(field.(*uint), parts[0], parts[1], uint(vt), desc)
}
case reflect.Uint8:
if len(parts) == 1 {
fs.Uint8Var(field.(*uint8), parts[0], uint8(vt), desc)
} else {
fs.Uint8VarP(field.(*uint8), parts[0], parts[1], uint8(vt), desc)
}
case reflect.Uint16:
if len(parts) == 1 {
fs.Uint16Var(field.(*uint16), parts[0], uint16(vt), desc)
} else {
fs.Uint16VarP(field.(*uint16), parts[0], parts[1], uint16(vt), desc)
}
case reflect.Uint32:
if len(parts) == 1 {
fs.Uint32Var(field.(*uint32), parts[0], uint32(vt), desc)
} else {
fs.Uint32VarP(field.(*uint32), parts[0], parts[1], uint32(vt), desc)
}
case reflect.Uint64:
if len(parts) == 1 {
fs.Uint64Var(field.(*uint64), parts[0], uint64(vt), desc)
} else {
fs.Uint64VarP(field.(*uint64), parts[0], parts[1], uint64(vt), desc)
}
}
}
func envDefault(field reflect.StructField, def string) string {
value := def
env := field.Tag.Get("env")
if env == "" && autoEnv {
env = deriveEnvVarName(field.Name)
}
if env != "" {
if e := os.Getenv(env); e != "" {
value = e
}
}
return value
}
func varDefault(name, def string) string {
value := def
if name != "" {
if v, ok := variableDefaults[name]; ok {
value = v
}
}
return value
}

View file

@ -1,7 +0,0 @@
module github.com/Luzifer/rconfig/v2
require (
github.com/spf13/pflag v1.0.3
gopkg.in/validator.v2 v2.0.0-20180514200540-135c24b11c19
gopkg.in/yaml.v2 v2.2.2
)

View file

@ -1,7 +0,0 @@
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/validator.v2 v2.0.0-20180514200540-135c24b11c19 h1:WB265cn5OpO+hK3pikC9hpP1zI/KTwmyMFKloW9eOVc=
gopkg.in/validator.v2 v2.0.0-20180514200540-135c24b11c19/go.mod h1:o4V0GXN9/CAmCsvJ0oXYZvrZOe7syiDZSN1GWGZTGzc=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View file

@ -1,27 +0,0 @@
package rconfig
import (
"io/ioutil"
"gopkg.in/yaml.v2"
)
// VarDefaultsFromYAMLFile reads contents of a file and calls VarDefaultsFromYAML
func VarDefaultsFromYAMLFile(filename string) map[string]string {
data, err := ioutil.ReadFile(filename)
if err != nil {
return make(map[string]string)
}
return VarDefaultsFromYAML(data)
}
// VarDefaultsFromYAML creates a vardefaults map from YAML raw data
func VarDefaultsFromYAML(in []byte) map[string]string {
out := make(map[string]string)
err := yaml.Unmarshal(in, &out)
if err != nil {
return make(map[string]string)
}
return out
}

View file

@ -1 +0,0 @@
.vscode/

View file

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Florian Sundermann
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,53 +0,0 @@
[![Join the chat at https://gitter.im/golang-barcode/Lobby](https://badges.gitter.im/golang-barcode/Lobby.svg)](https://gitter.im/golang-barcode/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
## Introduction ##
This is a package for GO which can be used to create different types of barcodes.
## Supported Barcode Types ##
* 2 of 5
* Aztec Code
* Codabar
* Code 128
* Code 39
* Code 93
* Datamatrix
* EAN 13
* EAN 8
* PDF 417
* QR Code
## Example ##
This is a simple example on how to create a QR-Code and write it to a png-file
```go
package main
import (
"image/png"
"os"
"github.com/boombuler/barcode"
"github.com/boombuler/barcode/qr"
)
func main() {
// Create the barcode
qrCode, _ := qr.Encode("Hello World", qr.M, qr.Auto)
// Scale the barcode to 200x200 pixels
qrCode, _ = barcode.Scale(qrCode, 200, 200)
// create the output file
file, _ := os.Create("qrcode.png")
defer file.Close()
// encode the barcode as png
png.Encode(file, qrCode)
}
```
## Documentation ##
See [GoDoc](https://godoc.org/github.com/boombuler/barcode)
To create a barcode use the Encode function from one of the subpackages.

View file

@ -1,42 +0,0 @@
package barcode
import "image"
const (
TypeAztec = "Aztec"
TypeCodabar = "Codabar"
TypeCode128 = "Code 128"
TypeCode39 = "Code 39"
TypeCode93 = "Code 93"
TypeDataMatrix = "DataMatrix"
TypeEAN8 = "EAN 8"
TypeEAN13 = "EAN 13"
TypePDF = "PDF417"
TypeQR = "QR Code"
Type2of5 = "2 of 5"
Type2of5Interleaved = "2 of 5 (interleaved)"
)
// Contains some meta information about a barcode
type Metadata struct {
// the name of the barcode kind
CodeKind string
// contains 1 for 1D barcodes or 2 for 2D barcodes
Dimensions byte
}
// a rendered and encoded barcode
type Barcode interface {
image.Image
// returns some meta information about the barcode
Metadata() Metadata
// the data that was encoded in this barcode
Content() string
}
// Additional interface that some barcodes might implement to provide
// the value of its checksum.
type BarcodeIntCS interface {
Barcode
CheckSum() int
}

View file

@ -1 +0,0 @@
module github.com/boombuler/barcode

View file

@ -1,66 +0,0 @@
package qr
import (
"errors"
"fmt"
"strings"
"github.com/boombuler/barcode/utils"
)
const charSet string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ $%*+-./:"
func stringToAlphaIdx(content string) <-chan int {
result := make(chan int)
go func() {
for _, r := range content {
idx := strings.IndexRune(charSet, r)
result <- idx
if idx < 0 {
break
}
}
close(result)
}()
return result
}
func encodeAlphaNumeric(content string, ecl ErrorCorrectionLevel) (*utils.BitList, *versionInfo, error) {
contentLenIsOdd := len(content)%2 == 1
contentBitCount := (len(content) / 2) * 11
if contentLenIsOdd {
contentBitCount += 6
}
vi := findSmallestVersionInfo(ecl, alphaNumericMode, contentBitCount)
if vi == nil {
return nil, nil, errors.New("To much data to encode")
}
res := new(utils.BitList)
res.AddBits(int(alphaNumericMode), 4)
res.AddBits(len(content), vi.charCountBits(alphaNumericMode))
encoder := stringToAlphaIdx(content)
for idx := 0; idx < len(content)/2; idx++ {
c1 := <-encoder
c2 := <-encoder
if c1 < 0 || c2 < 0 {
return nil, nil, fmt.Errorf("\"%s\" can not be encoded as %s", content, AlphaNumeric)
}
res.AddBits(c1*45+c2, 11)
}
if contentLenIsOdd {
c := <-encoder
if c < 0 {
return nil, nil, fmt.Errorf("\"%s\" can not be encoded as %s", content, AlphaNumeric)
}
res.AddBits(c, 6)
}
addPaddingAndTerminator(res, vi)
return res, vi, nil
}

View file

@ -1,23 +0,0 @@
package qr
import (
"fmt"
"github.com/boombuler/barcode/utils"
)
func encodeAuto(content string, ecl ErrorCorrectionLevel) (*utils.BitList, *versionInfo, error) {
bits, vi, _ := Numeric.getEncoder()(content, ecl)
if bits != nil && vi != nil {
return bits, vi, nil
}
bits, vi, _ = AlphaNumeric.getEncoder()(content, ecl)
if bits != nil && vi != nil {
return bits, vi, nil
}
bits, vi, _ = Unicode.getEncoder()(content, ecl)
if bits != nil && vi != nil {
return bits, vi, nil
}
return nil, nil, fmt.Errorf("No encoding found to encode \"%s\"", content)
}

View file

@ -1,59 +0,0 @@
package qr
type block struct {
data []byte
ecc []byte
}
type blockList []*block
func splitToBlocks(data <-chan byte, vi *versionInfo) blockList {
result := make(blockList, vi.NumberOfBlocksInGroup1+vi.NumberOfBlocksInGroup2)
for b := 0; b < int(vi.NumberOfBlocksInGroup1); b++ {
blk := new(block)
blk.data = make([]byte, vi.DataCodeWordsPerBlockInGroup1)
for cw := 0; cw < int(vi.DataCodeWordsPerBlockInGroup1); cw++ {
blk.data[cw] = <-data
}
blk.ecc = ec.calcECC(blk.data, vi.ErrorCorrectionCodewordsPerBlock)
result[b] = blk
}
for b := 0; b < int(vi.NumberOfBlocksInGroup2); b++ {
blk := new(block)
blk.data = make([]byte, vi.DataCodeWordsPerBlockInGroup2)
for cw := 0; cw < int(vi.DataCodeWordsPerBlockInGroup2); cw++ {
blk.data[cw] = <-data
}
blk.ecc = ec.calcECC(blk.data, vi.ErrorCorrectionCodewordsPerBlock)
result[int(vi.NumberOfBlocksInGroup1)+b] = blk
}
return result
}
func (bl blockList) interleave(vi *versionInfo) []byte {
var maxCodewordCount int
if vi.DataCodeWordsPerBlockInGroup1 > vi.DataCodeWordsPerBlockInGroup2 {
maxCodewordCount = int(vi.DataCodeWordsPerBlockInGroup1)
} else {
maxCodewordCount = int(vi.DataCodeWordsPerBlockInGroup2)
}
resultLen := (vi.DataCodeWordsPerBlockInGroup1+vi.ErrorCorrectionCodewordsPerBlock)*vi.NumberOfBlocksInGroup1 +
(vi.DataCodeWordsPerBlockInGroup2+vi.ErrorCorrectionCodewordsPerBlock)*vi.NumberOfBlocksInGroup2
result := make([]byte, 0, resultLen)
for i := 0; i < maxCodewordCount; i++ {
for b := 0; b < len(bl); b++ {
if len(bl[b].data) > i {
result = append(result, bl[b].data[i])
}
}
}
for i := 0; i < int(vi.ErrorCorrectionCodewordsPerBlock); i++ {
for b := 0; b < len(bl); b++ {
result = append(result, bl[b].ecc[i])
}
}
return result
}

View file

@ -1,416 +0,0 @@
// Package qr can be used to create QR barcodes.
package qr
import (
"image"
"github.com/boombuler/barcode"
"github.com/boombuler/barcode/utils"
)
type encodeFn func(content string, eccLevel ErrorCorrectionLevel) (*utils.BitList, *versionInfo, error)
// Encoding mode for QR Codes.
type Encoding byte
const (
// Auto will choose ths best matching encoding
Auto Encoding = iota
// Numeric encoding only encodes numbers [0-9]
Numeric
// AlphaNumeric encoding only encodes uppercase letters, numbers and [Space], $, %, *, +, -, ., /, :
AlphaNumeric
// Unicode encoding encodes the string as utf-8
Unicode
// only for testing purpose
unknownEncoding
)
func (e Encoding) getEncoder() encodeFn {
switch e {
case Auto:
return encodeAuto
case Numeric:
return encodeNumeric
case AlphaNumeric:
return encodeAlphaNumeric
case Unicode:
return encodeUnicode
}
return nil
}
func (e Encoding) String() string {
switch e {
case Auto:
return "Auto"
case Numeric:
return "Numeric"
case AlphaNumeric:
return "AlphaNumeric"
case Unicode:
return "Unicode"
}
return ""
}
// Encode returns a QR barcode with the given content, error correction level and uses the given encoding
func Encode(content string, level ErrorCorrectionLevel, mode Encoding) (barcode.Barcode, error) {
bits, vi, err := mode.getEncoder()(content, level)
if err != nil {
return nil, err
}
blocks := splitToBlocks(bits.IterateBytes(), vi)
data := blocks.interleave(vi)
result := render(data, vi)
result.content = content
return result, nil
}
func render(data []byte, vi *versionInfo) *qrcode {
dim := vi.modulWidth()
results := make([]*qrcode, 8)
for i := 0; i < 8; i++ {
results[i] = newBarcode(dim)
}
occupied := newBarcode(dim)
setAll := func(x int, y int, val bool) {
occupied.Set(x, y, true)
for i := 0; i < 8; i++ {
results[i].Set(x, y, val)
}
}
drawFinderPatterns(vi, setAll)
drawAlignmentPatterns(occupied, vi, setAll)
//Timing Pattern:
var i int
for i = 0; i < dim; i++ {
if !occupied.Get(i, 6) {
setAll(i, 6, i%2 == 0)
}
if !occupied.Get(6, i) {
setAll(6, i, i%2 == 0)
}
}
// Dark Module
setAll(8, dim-8, true)
drawVersionInfo(vi, setAll)
drawFormatInfo(vi, -1, occupied.Set)
for i := 0; i < 8; i++ {
drawFormatInfo(vi, i, results[i].Set)
}
// Write the data
var curBitNo int
for pos := range iterateModules(occupied) {
var curBit bool
if curBitNo < len(data)*8 {
curBit = ((data[curBitNo/8] >> uint(7-(curBitNo%8))) & 1) == 1
} else {
curBit = false
}
for i := 0; i < 8; i++ {
setMasked(pos.X, pos.Y, curBit, i, results[i].Set)
}
curBitNo++
}
lowestPenalty := ^uint(0)
lowestPenaltyIdx := -1
for i := 0; i < 8; i++ {
p := results[i].calcPenalty()
if p < lowestPenalty {
lowestPenalty = p
lowestPenaltyIdx = i
}
}
return results[lowestPenaltyIdx]
}
func setMasked(x, y int, val bool, mask int, set func(int, int, bool)) {
switch mask {
case 0:
val = val != (((y + x) % 2) == 0)
break
case 1:
val = val != ((y % 2) == 0)
break
case 2:
val = val != ((x % 3) == 0)
break
case 3:
val = val != (((y + x) % 3) == 0)
break
case 4:
val = val != (((y/2 + x/3) % 2) == 0)
break
case 5:
val = val != (((y*x)%2)+((y*x)%3) == 0)
break
case 6:
val = val != ((((y*x)%2)+((y*x)%3))%2 == 0)
break
case 7:
val = val != ((((y+x)%2)+((y*x)%3))%2 == 0)
}
set(x, y, val)
}
func iterateModules(occupied *qrcode) <-chan image.Point {
result := make(chan image.Point)
allPoints := make(chan image.Point)
go func() {
curX := occupied.dimension - 1
curY := occupied.dimension - 1
isUpward := true
for true {
if isUpward {
allPoints <- image.Pt(curX, curY)
allPoints <- image.Pt(curX-1, curY)
curY--
if curY < 0 {
curY = 0
curX -= 2
if curX == 6 {
curX--
}
if curX < 0 {
break
}
isUpward = false
}
} else {
allPoints <- image.Pt(curX, curY)
allPoints <- image.Pt(curX-1, curY)
curY++
if curY >= occupied.dimension {
curY = occupied.dimension - 1
curX -= 2
if curX == 6 {
curX--
}
isUpward = true
if curX < 0 {
break
}
}
}
}
close(allPoints)
}()
go func() {
for pt := range allPoints {
if !occupied.Get(pt.X, pt.Y) {
result <- pt
}
}
close(result)
}()
return result
}
func drawFinderPatterns(vi *versionInfo, set func(int, int, bool)) {
dim := vi.modulWidth()
drawPattern := func(xoff int, yoff int) {
for x := -1; x < 8; x++ {
for y := -1; y < 8; y++ {
val := (x == 0 || x == 6 || y == 0 || y == 6 || (x > 1 && x < 5 && y > 1 && y < 5)) && (x <= 6 && y <= 6 && x >= 0 && y >= 0)
if x+xoff >= 0 && x+xoff < dim && y+yoff >= 0 && y+yoff < dim {
set(x+xoff, y+yoff, val)
}
}
}
}
drawPattern(0, 0)
drawPattern(0, dim-7)
drawPattern(dim-7, 0)
}
func drawAlignmentPatterns(occupied *qrcode, vi *versionInfo, set func(int, int, bool)) {
drawPattern := func(xoff int, yoff int) {
for x := -2; x <= 2; x++ {
for y := -2; y <= 2; y++ {
val := x == -2 || x == 2 || y == -2 || y == 2 || (x == 0 && y == 0)
set(x+xoff, y+yoff, val)
}
}
}
positions := vi.alignmentPatternPlacements()
for _, x := range positions {
for _, y := range positions {
if occupied.Get(x, y) {
continue
}
drawPattern(x, y)
}
}
}
var formatInfos = map[ErrorCorrectionLevel]map[int][]bool{
L: {
0: []bool{true, true, true, false, true, true, true, true, true, false, false, false, true, false, false},
1: []bool{true, true, true, false, false, true, false, true, true, true, true, false, false, true, true},
2: []bool{true, true, true, true, true, false, true, true, false, true, false, true, false, true, false},
3: []bool{true, true, true, true, false, false, false, true, false, false, true, true, true, false, true},
4: []bool{true, true, false, false, true, true, false, false, false, true, false, true, true, true, true},
5: []bool{true, true, false, false, false, true, true, false, false, false, true, true, false, false, false},
6: []bool{true, true, false, true, true, false, false, false, true, false, false, false, false, false, true},
7: []bool{true, true, false, true, false, false, true, false, true, true, true, false, true, true, false},
},
M: {
0: []bool{true, false, true, false, true, false, false, false, false, false, true, false, false, true, false},
1: []bool{true, false, true, false, false, false, true, false, false, true, false, false, true, false, true},
2: []bool{true, false, true, true, true, true, false, false, true, true, true, true, true, false, false},
3: []bool{true, false, true, true, false, true, true, false, true, false, false, true, false, true, true},
4: []bool{true, false, false, false, true, false, true, true, true, true, true, true, false, false, true},
5: []bool{true, false, false, false, false, false, false, true, true, false, false, true, true, true, false},
6: []bool{true, false, false, true, true, true, true, true, false, false, true, false, true, true, true},
7: []bool{true, false, false, true, false, true, false, true, false, true, false, false, false, false, false},
},
Q: {
0: []bool{false, true, true, false, true, false, true, false, true, false, true, true, true, true, true},
1: []bool{false, true, true, false, false, false, false, false, true, true, false, true, false, false, false},
2: []bool{false, true, true, true, true, true, true, false, false, true, true, false, false, false, true},
3: []bool{false, true, true, true, false, true, false, false, false, false, false, false, true, true, false},
4: []bool{false, true, false, false, true, false, false, true, false, true, true, false, true, false, false},
5: []bool{false, true, false, false, false, false, true, true, false, false, false, false, false, true, true},
6: []bool{false, true, false, true, true, true, false, true, true, false, true, true, false, true, false},
7: []bool{false, true, false, true, false, true, true, true, true, true, false, true, true, false, true},
},
H: {
0: []bool{false, false, true, false, true, true, false, true, false, false, false, true, false, false, true},
1: []bool{false, false, true, false, false, true, true, true, false, true, true, true, true, true, false},
2: []bool{false, false, true, true, true, false, false, true, true, true, false, false, true, true, true},
3: []bool{false, false, true, true, false, false, true, true, true, false, true, false, false, false, false},
4: []bool{false, false, false, false, true, true, true, false, true, true, false, false, false, true, false},
5: []bool{false, false, false, false, false, true, false, false, true, false, true, false, true, false, true},
6: []bool{false, false, false, true, true, false, true, false, false, false, false, true, true, false, false},
7: []bool{false, false, false, true, false, false, false, false, false, true, true, true, false, true, true},
},
}
func drawFormatInfo(vi *versionInfo, usedMask int, set func(int, int, bool)) {
var formatInfo []bool
if usedMask == -1 {
formatInfo = []bool{true, true, true, true, true, true, true, true, true, true, true, true, true, true, true} // Set all to true cause -1 --> occupied mask.
} else {
formatInfo = formatInfos[vi.Level][usedMask]
}
if len(formatInfo) == 15 {
dim := vi.modulWidth()
set(0, 8, formatInfo[0])
set(1, 8, formatInfo[1])
set(2, 8, formatInfo[2])
set(3, 8, formatInfo[3])
set(4, 8, formatInfo[4])
set(5, 8, formatInfo[5])
set(7, 8, formatInfo[6])
set(8, 8, formatInfo[7])
set(8, 7, formatInfo[8])
set(8, 5, formatInfo[9])
set(8, 4, formatInfo[10])
set(8, 3, formatInfo[11])
set(8, 2, formatInfo[12])
set(8, 1, formatInfo[13])
set(8, 0, formatInfo[14])
set(8, dim-1, formatInfo[0])
set(8, dim-2, formatInfo[1])
set(8, dim-3, formatInfo[2])
set(8, dim-4, formatInfo[3])
set(8, dim-5, formatInfo[4])
set(8, dim-6, formatInfo[5])
set(8, dim-7, formatInfo[6])
set(dim-8, 8, formatInfo[7])
set(dim-7, 8, formatInfo[8])
set(dim-6, 8, formatInfo[9])
set(dim-5, 8, formatInfo[10])
set(dim-4, 8, formatInfo[11])
set(dim-3, 8, formatInfo[12])
set(dim-2, 8, formatInfo[13])
set(dim-1, 8, formatInfo[14])
}
}
var versionInfoBitsByVersion = map[byte][]bool{
7: []bool{false, false, false, true, true, true, true, true, false, false, true, false, false, true, false, true, false, false},
8: []bool{false, false, true, false, false, false, false, true, false, true, true, false, true, true, true, true, false, false},
9: []bool{false, false, true, false, false, true, true, false, true, false, true, false, false, true, true, false, false, true},
10: []bool{false, false, true, false, true, false, false, true, false, false, true, true, false, true, false, false, true, true},
11: []bool{false, false, true, false, true, true, true, false, true, true, true, true, true, true, false, true, true, false},
12: []bool{false, false, true, true, false, false, false, true, true, true, false, true, true, false, false, false, true, false},
13: []bool{false, false, true, true, false, true, true, false, false, false, false, true, false, false, false, true, true, true},
14: []bool{false, false, true, true, true, false, false, true, true, false, false, false, false, false, true, true, false, true},
15: []bool{false, false, true, true, true, true, true, false, false, true, false, false, true, false, true, false, false, false},
16: []bool{false, true, false, false, false, false, true, false, true, true, false, true, true, true, true, false, false, false},
17: []bool{false, true, false, false, false, true, false, true, false, false, false, true, false, true, true, true, false, true},
18: []bool{false, true, false, false, true, false, true, false, true, false, false, false, false, true, false, true, true, true},
19: []bool{false, true, false, false, true, true, false, true, false, true, false, false, true, true, false, false, true, false},
20: []bool{false, true, false, true, false, false, true, false, false, true, true, false, true, false, false, true, true, false},
21: []bool{false, true, false, true, false, true, false, true, true, false, true, false, false, false, false, false, true, true},
22: []bool{false, true, false, true, true, false, true, false, false, false, true, true, false, false, true, false, false, true},
23: []bool{false, true, false, true, true, true, false, true, true, true, true, true, true, false, true, true, false, false},
24: []bool{false, true, true, false, false, false, true, true, true, false, true, true, false, false, false, true, false, false},
25: []bool{false, true, true, false, false, true, false, false, false, true, true, true, true, false, false, false, false, true},
26: []bool{false, true, true, false, true, false, true, true, true, true, true, false, true, false, true, false, true, true},
27: []bool{false, true, true, false, true, true, false, false, false, false, true, false, false, false, true, true, true, false},
28: []bool{false, true, true, true, false, false, true, true, false, false, false, false, false, true, true, false, true, false},
29: []bool{false, true, true, true, false, true, false, false, true, true, false, false, true, true, true, true, true, true},
30: []bool{false, true, true, true, true, false, true, true, false, true, false, true, true, true, false, true, false, true},
31: []bool{false, true, true, true, true, true, false, false, true, false, false, true, false, true, false, false, false, false},
32: []bool{true, false, false, false, false, false, true, false, false, true, true, true, false, true, false, true, false, true},
33: []bool{true, false, false, false, false, true, false, true, true, false, true, true, true, true, false, false, false, false},
34: []bool{true, false, false, false, true, false, true, false, false, false, true, false, true, true, true, false, true, false},
35: []bool{true, false, false, false, true, true, false, true, true, true, true, false, false, true, true, true, true, true},
36: []bool{true, false, false, true, false, false, true, false, true, true, false, false, false, false, true, false, true, true},
37: []bool{true, false, false, true, false, true, false, true, false, false, false, false, true, false, true, true, true, false},
38: []bool{true, false, false, true, true, false, true, false, true, false, false, true, true, false, false, true, false, false},
39: []bool{true, false, false, true, true, true, false, true, false, true, false, true, false, false, false, false, false, true},
40: []bool{true, false, true, false, false, false, true, true, false, false, false, true, true, false, true, false, false, true},
}
func drawVersionInfo(vi *versionInfo, set func(int, int, bool)) {
versionInfoBits, ok := versionInfoBitsByVersion[vi.Version]
if ok && len(versionInfoBits) > 0 {
for i := 0; i < len(versionInfoBits); i++ {
x := (vi.modulWidth() - 11) + i%3
y := i / 3
set(x, y, versionInfoBits[len(versionInfoBits)-i-1])
set(y, x, versionInfoBits[len(versionInfoBits)-i-1])
}
}
}
func addPaddingAndTerminator(bl *utils.BitList, vi *versionInfo) {
for i := 0; i < 4 && bl.Len() < vi.totalDataBytes()*8; i++ {
bl.AddBit(false)
}
for bl.Len()%8 != 0 {
bl.AddBit(false)
}
for i := 0; bl.Len() < vi.totalDataBytes()*8; i++ {
if i%2 == 0 {
bl.AddByte(236)
} else {
bl.AddByte(17)
}
}
}

View file

@ -1,29 +0,0 @@
package qr
import (
"github.com/boombuler/barcode/utils"
)
type errorCorrection struct {
rs *utils.ReedSolomonEncoder
}
var ec = newErrorCorrection()
func newErrorCorrection() *errorCorrection {
fld := utils.NewGaloisField(285, 256, 0)
return &errorCorrection{utils.NewReedSolomonEncoder(fld)}
}
func (ec *errorCorrection) calcECC(data []byte, eccCount byte) []byte {
dataInts := make([]int, len(data))
for i := 0; i < len(data); i++ {
dataInts[i] = int(data[i])
}
res := ec.rs.Encode(dataInts, int(eccCount))
result := make([]byte, len(res))
for i := 0; i < len(res); i++ {
result[i] = byte(res[i])
}
return result
}

View file

@ -1,56 +0,0 @@
package qr
import (
"errors"
"fmt"
"strconv"
"github.com/boombuler/barcode/utils"
)
func encodeNumeric(content string, ecl ErrorCorrectionLevel) (*utils.BitList, *versionInfo, error) {
contentBitCount := (len(content) / 3) * 10
switch len(content) % 3 {
case 1:
contentBitCount += 4
case 2:
contentBitCount += 7
}
vi := findSmallestVersionInfo(ecl, numericMode, contentBitCount)
if vi == nil {
return nil, nil, errors.New("To much data to encode")
}
res := new(utils.BitList)
res.AddBits(int(numericMode), 4)
res.AddBits(len(content), vi.charCountBits(numericMode))
for pos := 0; pos < len(content); pos += 3 {
var curStr string
if pos+3 <= len(content) {
curStr = content[pos : pos+3]
} else {
curStr = content[pos:]
}
i, err := strconv.Atoi(curStr)
if err != nil || i < 0 {
return nil, nil, fmt.Errorf("\"%s\" can not be encoded as %s", content, Numeric)
}
var bitCnt byte
switch len(curStr) % 3 {
case 0:
bitCnt = 10
case 1:
bitCnt = 4
break
case 2:
bitCnt = 7
break
}
res.AddBits(i, bitCnt)
}
addPaddingAndTerminator(res, vi)
return res, vi, nil
}

View file

@ -1,166 +0,0 @@
package qr
import (
"image"
"image/color"
"math"
"github.com/boombuler/barcode"
"github.com/boombuler/barcode/utils"
)
type qrcode struct {
dimension int
data *utils.BitList
content string
}
func (qr *qrcode) Content() string {
return qr.content
}
func (qr *qrcode) Metadata() barcode.Metadata {
return barcode.Metadata{barcode.TypeQR, 2}
}
func (qr *qrcode) ColorModel() color.Model {
return color.Gray16Model
}
func (qr *qrcode) Bounds() image.Rectangle {
return image.Rect(0, 0, qr.dimension, qr.dimension)
}
func (qr *qrcode) At(x, y int) color.Color {
if qr.Get(x, y) {
return color.Black
}
return color.White
}
func (qr *qrcode) Get(x, y int) bool {
return qr.data.GetBit(x*qr.dimension + y)
}
func (qr *qrcode) Set(x, y int, val bool) {
qr.data.SetBit(x*qr.dimension+y, val)
}
func (qr *qrcode) calcPenalty() uint {
return qr.calcPenaltyRule1() + qr.calcPenaltyRule2() + qr.calcPenaltyRule3() + qr.calcPenaltyRule4()
}
func (qr *qrcode) calcPenaltyRule1() uint {
var result uint
for x := 0; x < qr.dimension; x++ {
checkForX := false
var cntX uint
checkForY := false
var cntY uint
for y := 0; y < qr.dimension; y++ {
if qr.Get(x, y) == checkForX {
cntX++
} else {
checkForX = !checkForX
if cntX >= 5 {
result += cntX - 2
}
cntX = 1
}
if qr.Get(y, x) == checkForY {
cntY++
} else {
checkForY = !checkForY
if cntY >= 5 {
result += cntY - 2
}
cntY = 1
}
}
if cntX >= 5 {
result += cntX - 2
}
if cntY >= 5 {
result += cntY - 2
}
}
return result
}
func (qr *qrcode) calcPenaltyRule2() uint {
var result uint
for x := 0; x < qr.dimension-1; x++ {
for y := 0; y < qr.dimension-1; y++ {
check := qr.Get(x, y)
if qr.Get(x, y+1) == check && qr.Get(x+1, y) == check && qr.Get(x+1, y+1) == check {
result += 3
}
}
}
return result
}
func (qr *qrcode) calcPenaltyRule3() uint {
pattern1 := []bool{true, false, true, true, true, false, true, false, false, false, false}
pattern2 := []bool{false, false, false, false, true, false, true, true, true, false, true}
var result uint
for x := 0; x <= qr.dimension-len(pattern1); x++ {
for y := 0; y < qr.dimension; y++ {
pattern1XFound := true
pattern2XFound := true
pattern1YFound := true
pattern2YFound := true
for i := 0; i < len(pattern1); i++ {
iv := qr.Get(x+i, y)
if iv != pattern1[i] {
pattern1XFound = false
}
if iv != pattern2[i] {
pattern2XFound = false
}
iv = qr.Get(y, x+i)
if iv != pattern1[i] {
pattern1YFound = false
}
if iv != pattern2[i] {
pattern2YFound = false
}
}
if pattern1XFound || pattern2XFound {
result += 40
}
if pattern1YFound || pattern2YFound {
result += 40
}
}
}
return result
}
func (qr *qrcode) calcPenaltyRule4() uint {
totalNum := qr.data.Len()
trueCnt := 0
for i := 0; i < totalNum; i++ {
if qr.data.GetBit(i) {
trueCnt++
}
}
percDark := float64(trueCnt) * 100 / float64(totalNum)
floor := math.Abs(math.Floor(percDark/5) - 10)
ceil := math.Abs(math.Ceil(percDark/5) - 10)
return uint(math.Min(floor, ceil) * 10)
}
func newBarcode(dim int) *qrcode {
res := new(qrcode)
res.dimension = dim
res.data = utils.NewBitList(dim * dim)
return res
}

View file

@ -1,27 +0,0 @@
package qr
import (
"errors"
"github.com/boombuler/barcode/utils"
)
func encodeUnicode(content string, ecl ErrorCorrectionLevel) (*utils.BitList, *versionInfo, error) {
data := []byte(content)
vi := findSmallestVersionInfo(ecl, byteMode, len(data)*8)
if vi == nil {
return nil, nil, errors.New("To much data to encode")
}
// It's not correct to add the unicode bytes to the result directly but most readers can't handle the
// required ECI header...
res := new(utils.BitList)
res.AddBits(int(byteMode), 4)
res.AddBits(len(content), vi.charCountBits(byteMode))
for _, b := range data {
res.AddByte(b)
}
addPaddingAndTerminator(res, vi)
return res, vi, nil
}

View file

@ -1,310 +0,0 @@
package qr
import "math"
// ErrorCorrectionLevel indicates the amount of "backup data" stored in the QR code
type ErrorCorrectionLevel byte
const (
// L recovers 7% of data
L ErrorCorrectionLevel = iota
// M recovers 15% of data
M
// Q recovers 25% of data
Q
// H recovers 30% of data
H
)
func (ecl ErrorCorrectionLevel) String() string {
switch ecl {
case L:
return "L"
case M:
return "M"
case Q:
return "Q"
case H:
return "H"
}
return "unknown"
}
type encodingMode byte
const (
numericMode encodingMode = 1
alphaNumericMode encodingMode = 2
byteMode encodingMode = 4
kanjiMode encodingMode = 8
)
type versionInfo struct {
Version byte
Level ErrorCorrectionLevel
ErrorCorrectionCodewordsPerBlock byte
NumberOfBlocksInGroup1 byte
DataCodeWordsPerBlockInGroup1 byte
NumberOfBlocksInGroup2 byte
DataCodeWordsPerBlockInGroup2 byte
}
var versionInfos = []*versionInfo{
&versionInfo{1, L, 7, 1, 19, 0, 0},
&versionInfo{1, M, 10, 1, 16, 0, 0},
&versionInfo{1, Q, 13, 1, 13, 0, 0},
&versionInfo{1, H, 17, 1, 9, 0, 0},
&versionInfo{2, L, 10, 1, 34, 0, 0},
&versionInfo{2, M, 16, 1, 28, 0, 0},
&versionInfo{2, Q, 22, 1, 22, 0, 0},
&versionInfo{2, H, 28, 1, 16, 0, 0},
&versionInfo{3, L, 15, 1, 55, 0, 0},
&versionInfo{3, M, 26, 1, 44, 0, 0},
&versionInfo{3, Q, 18, 2, 17, 0, 0},
&versionInfo{3, H, 22, 2, 13, 0, 0},
&versionInfo{4, L, 20, 1, 80, 0, 0},
&versionInfo{4, M, 18, 2, 32, 0, 0},
&versionInfo{4, Q, 26, 2, 24, 0, 0},
&versionInfo{4, H, 16, 4, 9, 0, 0},
&versionInfo{5, L, 26, 1, 108, 0, 0},
&versionInfo{5, M, 24, 2, 43, 0, 0},
&versionInfo{5, Q, 18, 2, 15, 2, 16},
&versionInfo{5, H, 22, 2, 11, 2, 12},
&versionInfo{6, L, 18, 2, 68, 0, 0},
&versionInfo{6, M, 16, 4, 27, 0, 0},
&versionInfo{6, Q, 24, 4, 19, 0, 0},
&versionInfo{6, H, 28, 4, 15, 0, 0},
&versionInfo{7, L, 20, 2, 78, 0, 0},
&versionInfo{7, M, 18, 4, 31, 0, 0},
&versionInfo{7, Q, 18, 2, 14, 4, 15},
&versionInfo{7, H, 26, 4, 13, 1, 14},
&versionInfo{8, L, 24, 2, 97, 0, 0},
&versionInfo{8, M, 22, 2, 38, 2, 39},
&versionInfo{8, Q, 22, 4, 18, 2, 19},
&versionInfo{8, H, 26, 4, 14, 2, 15},
&versionInfo{9, L, 30, 2, 116, 0, 0},
&versionInfo{9, M, 22, 3, 36, 2, 37},
&versionInfo{9, Q, 20, 4, 16, 4, 17},
&versionInfo{9, H, 24, 4, 12, 4, 13},
&versionInfo{10, L, 18, 2, 68, 2, 69},
&versionInfo{10, M, 26, 4, 43, 1, 44},
&versionInfo{10, Q, 24, 6, 19, 2, 20},
&versionInfo{10, H, 28, 6, 15, 2, 16},
&versionInfo{11, L, 20, 4, 81, 0, 0},
&versionInfo{11, M, 30, 1, 50, 4, 51},
&versionInfo{11, Q, 28, 4, 22, 4, 23},
&versionInfo{11, H, 24, 3, 12, 8, 13},
&versionInfo{12, L, 24, 2, 92, 2, 93},
&versionInfo{12, M, 22, 6, 36, 2, 37},
&versionInfo{12, Q, 26, 4, 20, 6, 21},
&versionInfo{12, H, 28, 7, 14, 4, 15},
&versionInfo{13, L, 26, 4, 107, 0, 0},
&versionInfo{13, M, 22, 8, 37, 1, 38},
&versionInfo{13, Q, 24, 8, 20, 4, 21},
&versionInfo{13, H, 22, 12, 11, 4, 12},
&versionInfo{14, L, 30, 3, 115, 1, 116},
&versionInfo{14, M, 24, 4, 40, 5, 41},
&versionInfo{14, Q, 20, 11, 16, 5, 17},
&versionInfo{14, H, 24, 11, 12, 5, 13},
&versionInfo{15, L, 22, 5, 87, 1, 88},
&versionInfo{15, M, 24, 5, 41, 5, 42},
&versionInfo{15, Q, 30, 5, 24, 7, 25},
&versionInfo{15, H, 24, 11, 12, 7, 13},
&versionInfo{16, L, 24, 5, 98, 1, 99},
&versionInfo{16, M, 28, 7, 45, 3, 46},
&versionInfo{16, Q, 24, 15, 19, 2, 20},
&versionInfo{16, H, 30, 3, 15, 13, 16},
&versionInfo{17, L, 28, 1, 107, 5, 108},
&versionInfo{17, M, 28, 10, 46, 1, 47},
&versionInfo{17, Q, 28, 1, 22, 15, 23},
&versionInfo{17, H, 28, 2, 14, 17, 15},
&versionInfo{18, L, 30, 5, 120, 1, 121},
&versionInfo{18, M, 26, 9, 43, 4, 44},
&versionInfo{18, Q, 28, 17, 22, 1, 23},
&versionInfo{18, H, 28, 2, 14, 19, 15},
&versionInfo{19, L, 28, 3, 113, 4, 114},
&versionInfo{19, M, 26, 3, 44, 11, 45},
&versionInfo{19, Q, 26, 17, 21, 4, 22},
&versionInfo{19, H, 26, 9, 13, 16, 14},
&versionInfo{20, L, 28, 3, 107, 5, 108},
&versionInfo{20, M, 26, 3, 41, 13, 42},
&versionInfo{20, Q, 30, 15, 24, 5, 25},
&versionInfo{20, H, 28, 15, 15, 10, 16},
&versionInfo{21, L, 28, 4, 116, 4, 117},
&versionInfo{21, M, 26, 17, 42, 0, 0},
&versionInfo{21, Q, 28, 17, 22, 6, 23},
&versionInfo{21, H, 30, 19, 16, 6, 17},
&versionInfo{22, L, 28, 2, 111, 7, 112},
&versionInfo{22, M, 28, 17, 46, 0, 0},
&versionInfo{22, Q, 30, 7, 24, 16, 25},
&versionInfo{22, H, 24, 34, 13, 0, 0},
&versionInfo{23, L, 30, 4, 121, 5, 122},
&versionInfo{23, M, 28, 4, 47, 14, 48},
&versionInfo{23, Q, 30, 11, 24, 14, 25},
&versionInfo{23, H, 30, 16, 15, 14, 16},
&versionInfo{24, L, 30, 6, 117, 4, 118},
&versionInfo{24, M, 28, 6, 45, 14, 46},
&versionInfo{24, Q, 30, 11, 24, 16, 25},
&versionInfo{24, H, 30, 30, 16, 2, 17},
&versionInfo{25, L, 26, 8, 106, 4, 107},
&versionInfo{25, M, 28, 8, 47, 13, 48},
&versionInfo{25, Q, 30, 7, 24, 22, 25},
&versionInfo{25, H, 30, 22, 15, 13, 16},
&versionInfo{26, L, 28, 10, 114, 2, 115},
&versionInfo{26, M, 28, 19, 46, 4, 47},
&versionInfo{26, Q, 28, 28, 22, 6, 23},
&versionInfo{26, H, 30, 33, 16, 4, 17},
&versionInfo{27, L, 30, 8, 122, 4, 123},
&versionInfo{27, M, 28, 22, 45, 3, 46},
&versionInfo{27, Q, 30, 8, 23, 26, 24},
&versionInfo{27, H, 30, 12, 15, 28, 16},
&versionInfo{28, L, 30, 3, 117, 10, 118},
&versionInfo{28, M, 28, 3, 45, 23, 46},
&versionInfo{28, Q, 30, 4, 24, 31, 25},
&versionInfo{28, H, 30, 11, 15, 31, 16},
&versionInfo{29, L, 30, 7, 116, 7, 117},
&versionInfo{29, M, 28, 21, 45, 7, 46},
&versionInfo{29, Q, 30, 1, 23, 37, 24},
&versionInfo{29, H, 30, 19, 15, 26, 16},
&versionInfo{30, L, 30, 5, 115, 10, 116},
&versionInfo{30, M, 28, 19, 47, 10, 48},
&versionInfo{30, Q, 30, 15, 24, 25, 25},
&versionInfo{30, H, 30, 23, 15, 25, 16},
&versionInfo{31, L, 30, 13, 115, 3, 116},
&versionInfo{31, M, 28, 2, 46, 29, 47},
&versionInfo{31, Q, 30, 42, 24, 1, 25},
&versionInfo{31, H, 30, 23, 15, 28, 16},
&versionInfo{32, L, 30, 17, 115, 0, 0},
&versionInfo{32, M, 28, 10, 46, 23, 47},
&versionInfo{32, Q, 30, 10, 24, 35, 25},
&versionInfo{32, H, 30, 19, 15, 35, 16},
&versionInfo{33, L, 30, 17, 115, 1, 116},
&versionInfo{33, M, 28, 14, 46, 21, 47},
&versionInfo{33, Q, 30, 29, 24, 19, 25},
&versionInfo{33, H, 30, 11, 15, 46, 16},
&versionInfo{34, L, 30, 13, 115, 6, 116},
&versionInfo{34, M, 28, 14, 46, 23, 47},
&versionInfo{34, Q, 30, 44, 24, 7, 25},
&versionInfo{34, H, 30, 59, 16, 1, 17},
&versionInfo{35, L, 30, 12, 121, 7, 122},
&versionInfo{35, M, 28, 12, 47, 26, 48},
&versionInfo{35, Q, 30, 39, 24, 14, 25},
&versionInfo{35, H, 30, 22, 15, 41, 16},
&versionInfo{36, L, 30, 6, 121, 14, 122},
&versionInfo{36, M, 28, 6, 47, 34, 48},
&versionInfo{36, Q, 30, 46, 24, 10, 25},
&versionInfo{36, H, 30, 2, 15, 64, 16},
&versionInfo{37, L, 30, 17, 122, 4, 123},
&versionInfo{37, M, 28, 29, 46, 14, 47},
&versionInfo{37, Q, 30, 49, 24, 10, 25},
&versionInfo{37, H, 30, 24, 15, 46, 16},
&versionInfo{38, L, 30, 4, 122, 18, 123},
&versionInfo{38, M, 28, 13, 46, 32, 47},
&versionInfo{38, Q, 30, 48, 24, 14, 25},
&versionInfo{38, H, 30, 42, 15, 32, 16},
&versionInfo{39, L, 30, 20, 117, 4, 118},
&versionInfo{39, M, 28, 40, 47, 7, 48},
&versionInfo{39, Q, 30, 43, 24, 22, 25},
&versionInfo{39, H, 30, 10, 15, 67, 16},
&versionInfo{40, L, 30, 19, 118, 6, 119},
&versionInfo{40, M, 28, 18, 47, 31, 48},
&versionInfo{40, Q, 30, 34, 24, 34, 25},
&versionInfo{40, H, 30, 20, 15, 61, 16},
}
func (vi *versionInfo) totalDataBytes() int {
g1Data := int(vi.NumberOfBlocksInGroup1) * int(vi.DataCodeWordsPerBlockInGroup1)
g2Data := int(vi.NumberOfBlocksInGroup2) * int(vi.DataCodeWordsPerBlockInGroup2)
return (g1Data + g2Data)
}
func (vi *versionInfo) charCountBits(m encodingMode) byte {
switch m {
case numericMode:
if vi.Version < 10 {
return 10
} else if vi.Version < 27 {
return 12
}
return 14
case alphaNumericMode:
if vi.Version < 10 {
return 9
} else if vi.Version < 27 {
return 11
}
return 13
case byteMode:
if vi.Version < 10 {
return 8
}
return 16
case kanjiMode:
if vi.Version < 10 {
return 8
} else if vi.Version < 27 {
return 10
}
return 12
default:
return 0
}
}
func (vi *versionInfo) modulWidth() int {
return ((int(vi.Version) - 1) * 4) + 21
}
func (vi *versionInfo) alignmentPatternPlacements() []int {
if vi.Version == 1 {
return make([]int, 0)
}
first := 6
last := vi.modulWidth() - 7
space := float64(last - first)
count := int(math.Ceil(space/28)) + 1
result := make([]int, count)
result[0] = first
result[len(result)-1] = last
if count > 2 {
step := int(math.Ceil(float64(last-first) / float64(count-1)))
if step%2 == 1 {
frac := float64(last-first) / float64(count-1)
_, x := math.Modf(frac)
if x >= 0.5 {
frac = math.Ceil(frac)
} else {
frac = math.Floor(frac)
}
if int(frac)%2 == 0 {
step--
} else {
step++
}
}
for i := 1; i <= count-2; i++ {
result[i] = last - (step * (count - 1 - i))
}
}
return result
}
func findSmallestVersionInfo(ecl ErrorCorrectionLevel, mode encodingMode, dataBits int) *versionInfo {
dataBits = dataBits + 4 // mode indicator
for _, vi := range versionInfos {
if vi.Level == ecl {
if (vi.totalDataBytes() * 8) >= (dataBits + int(vi.charCountBits(mode))) {
return vi
}
}
}
return nil
}

View file

@ -1,134 +0,0 @@
package barcode
import (
"errors"
"fmt"
"image"
"image/color"
"math"
)
type wrapFunc func(x, y int) color.Color
type scaledBarcode struct {
wrapped Barcode
wrapperFunc wrapFunc
rect image.Rectangle
}
type intCSscaledBC struct {
scaledBarcode
}
func (bc *scaledBarcode) Content() string {
return bc.wrapped.Content()
}
func (bc *scaledBarcode) Metadata() Metadata {
return bc.wrapped.Metadata()
}
func (bc *scaledBarcode) ColorModel() color.Model {
return bc.wrapped.ColorModel()
}
func (bc *scaledBarcode) Bounds() image.Rectangle {
return bc.rect
}
func (bc *scaledBarcode) At(x, y int) color.Color {
return bc.wrapperFunc(x, y)
}
func (bc *intCSscaledBC) CheckSum() int {
if cs, ok := bc.wrapped.(BarcodeIntCS); ok {
return cs.CheckSum()
}
return 0
}
// Scale returns a resized barcode with the given width and height.
func Scale(bc Barcode, width, height int) (Barcode, error) {
switch bc.Metadata().Dimensions {
case 1:
return scale1DCode(bc, width, height)
case 2:
return scale2DCode(bc, width, height)
}
return nil, errors.New("unsupported barcode format")
}
func newScaledBC(wrapped Barcode, wrapperFunc wrapFunc, rect image.Rectangle) Barcode {
result := &scaledBarcode{
wrapped: wrapped,
wrapperFunc: wrapperFunc,
rect: rect,
}
if _, ok := wrapped.(BarcodeIntCS); ok {
return &intCSscaledBC{*result}
}
return result
}
func scale2DCode(bc Barcode, width, height int) (Barcode, error) {
orgBounds := bc.Bounds()
orgWidth := orgBounds.Max.X - orgBounds.Min.X
orgHeight := orgBounds.Max.Y - orgBounds.Min.Y
factor := int(math.Min(float64(width)/float64(orgWidth), float64(height)/float64(orgHeight)))
if factor <= 0 {
return nil, fmt.Errorf("can not scale barcode to an image smaller than %dx%d", orgWidth, orgHeight)
}
offsetX := (width - (orgWidth * factor)) / 2
offsetY := (height - (orgHeight * factor)) / 2
wrap := func(x, y int) color.Color {
if x < offsetX || y < offsetY {
return color.White
}
x = (x - offsetX) / factor
y = (y - offsetY) / factor
if x >= orgWidth || y >= orgHeight {
return color.White
}
return bc.At(x, y)
}
return newScaledBC(
bc,
wrap,
image.Rect(0, 0, width, height),
), nil
}
func scale1DCode(bc Barcode, width, height int) (Barcode, error) {
orgBounds := bc.Bounds()
orgWidth := orgBounds.Max.X - orgBounds.Min.X
factor := int(float64(width) / float64(orgWidth))
if factor <= 0 {
return nil, fmt.Errorf("can not scale barcode to an image smaller than %dx1", orgWidth)
}
offsetX := (width - (orgWidth * factor)) / 2
wrap := func(x, y int) color.Color {
if x < offsetX {
return color.White
}
x = (x - offsetX) / factor
if x >= orgWidth {
return color.White
}
return bc.At(x, 0)
}
return newScaledBC(
bc,
wrap,
image.Rect(0, 0, width, height),
), nil
}

View file

@ -1,57 +0,0 @@
// Package utils contain some utilities which are needed to create barcodes
package utils
import (
"image"
"image/color"
"github.com/boombuler/barcode"
)
type base1DCode struct {
*BitList
kind string
content string
}
type base1DCodeIntCS struct {
base1DCode
checksum int
}
func (c *base1DCode) Content() string {
return c.content
}
func (c *base1DCode) Metadata() barcode.Metadata {
return barcode.Metadata{c.kind, 1}
}
func (c *base1DCode) ColorModel() color.Model {
return color.Gray16Model
}
func (c *base1DCode) Bounds() image.Rectangle {
return image.Rect(0, 0, c.Len(), 1)
}
func (c *base1DCode) At(x, y int) color.Color {
if c.GetBit(x) {
return color.Black
}
return color.White
}
func (c *base1DCodeIntCS) CheckSum() int {
return c.checksum
}
// New1DCodeIntCheckSum creates a new 1D barcode where the bars are represented by the bits in the bars BitList
func New1DCodeIntCheckSum(codeKind, content string, bars *BitList, checksum int) barcode.BarcodeIntCS {
return &base1DCodeIntCS{base1DCode{bars, codeKind, content}, checksum}
}
// New1DCode creates a new 1D barcode where the bars are represented by the bits in the bars BitList
func New1DCode(codeKind, content string, bars *BitList) barcode.Barcode {
return &base1DCode{bars, codeKind, content}
}

View file

@ -1,119 +0,0 @@
package utils
// BitList is a list that contains bits
type BitList struct {
count int
data []int32
}
// NewBitList returns a new BitList with the given length
// all bits are initialize with false
func NewBitList(capacity int) *BitList {
bl := new(BitList)
bl.count = capacity
x := 0
if capacity%32 != 0 {
x = 1
}
bl.data = make([]int32, capacity/32+x)
return bl
}
// Len returns the number of contained bits
func (bl *BitList) Len() int {
return bl.count
}
func (bl *BitList) grow() {
growBy := len(bl.data)
if growBy < 128 {
growBy = 128
} else if growBy >= 1024 {
growBy = 1024
}
nd := make([]int32, len(bl.data)+growBy)
copy(nd, bl.data)
bl.data = nd
}
// AddBit appends the given bits to the end of the list
func (bl *BitList) AddBit(bits ...bool) {
for _, bit := range bits {
itmIndex := bl.count / 32
for itmIndex >= len(bl.data) {
bl.grow()
}
bl.SetBit(bl.count, bit)
bl.count++
}
}
// SetBit sets the bit at the given index to the given value
func (bl *BitList) SetBit(index int, value bool) {
itmIndex := index / 32
itmBitShift := 31 - (index % 32)
if value {
bl.data[itmIndex] = bl.data[itmIndex] | 1<<uint(itmBitShift)
} else {
bl.data[itmIndex] = bl.data[itmIndex] & ^(1 << uint(itmBitShift))
}
}
// GetBit returns the bit at the given index
func (bl *BitList) GetBit(index int) bool {
itmIndex := index / 32
itmBitShift := 31 - (index % 32)
return ((bl.data[itmIndex] >> uint(itmBitShift)) & 1) == 1
}
// AddByte appends all 8 bits of the given byte to the end of the list
func (bl *BitList) AddByte(b byte) {
for i := 7; i >= 0; i-- {
bl.AddBit(((b >> uint(i)) & 1) == 1)
}
}
// AddBits appends the last (LSB) 'count' bits of 'b' the the end of the list
func (bl *BitList) AddBits(b int, count byte) {
for i := int(count) - 1; i >= 0; i-- {
bl.AddBit(((b >> uint(i)) & 1) == 1)
}
}
// GetBytes returns all bits of the BitList as a []byte
func (bl *BitList) GetBytes() []byte {
len := bl.count >> 3
if (bl.count % 8) != 0 {
len++
}
result := make([]byte, len)
for i := 0; i < len; i++ {
shift := (3 - (i % 4)) * 8
result[i] = (byte)((bl.data[i/4] >> uint(shift)) & 0xFF)
}
return result
}
// IterateBytes iterates through all bytes contained in the BitList
func (bl *BitList) IterateBytes() <-chan byte {
res := make(chan byte)
go func() {
c := bl.count
shift := 24
i := 0
for c > 0 {
res <- byte((bl.data[i] >> uint(shift)) & 0xFF)
shift -= 8
if shift < 0 {
shift = 24
i++
}
c -= 8
}
close(res)
}()
return res
}

View file

@ -1,65 +0,0 @@
package utils
// GaloisField encapsulates galois field arithmetics
type GaloisField struct {
Size int
Base int
ALogTbl []int
LogTbl []int
}
// NewGaloisField creates a new galois field
func NewGaloisField(pp, fieldSize, b int) *GaloisField {
result := new(GaloisField)
result.Size = fieldSize
result.Base = b
result.ALogTbl = make([]int, fieldSize)
result.LogTbl = make([]int, fieldSize)
x := 1
for i := 0; i < fieldSize; i++ {
result.ALogTbl[i] = x
x = x * 2
if x >= fieldSize {
x = (x ^ pp) & (fieldSize - 1)
}
}
for i := 0; i < fieldSize; i++ {
result.LogTbl[result.ALogTbl[i]] = int(i)
}
return result
}
func (gf *GaloisField) Zero() *GFPoly {
return NewGFPoly(gf, []int{0})
}
// AddOrSub add or substract two numbers
func (gf *GaloisField) AddOrSub(a, b int) int {
return a ^ b
}
// Multiply multiplys two numbers
func (gf *GaloisField) Multiply(a, b int) int {
if a == 0 || b == 0 {
return 0
}
return gf.ALogTbl[(gf.LogTbl[a]+gf.LogTbl[b])%(gf.Size-1)]
}
// Divide divides two numbers
func (gf *GaloisField) Divide(a, b int) int {
if b == 0 {
panic("divide by zero")
} else if a == 0 {
return 0
}
return gf.ALogTbl[(gf.LogTbl[a]-gf.LogTbl[b])%(gf.Size-1)]
}
func (gf *GaloisField) Invers(num int) int {
return gf.ALogTbl[(gf.Size-1)-gf.LogTbl[num]]
}

View file

@ -1,103 +0,0 @@
package utils
type GFPoly struct {
gf *GaloisField
Coefficients []int
}
func (gp *GFPoly) Degree() int {
return len(gp.Coefficients) - 1
}
func (gp *GFPoly) Zero() bool {
return gp.Coefficients[0] == 0
}
// GetCoefficient returns the coefficient of x ^ degree
func (gp *GFPoly) GetCoefficient(degree int) int {
return gp.Coefficients[gp.Degree()-degree]
}
func (gp *GFPoly) AddOrSubstract(other *GFPoly) *GFPoly {
if gp.Zero() {
return other
} else if other.Zero() {
return gp
}
smallCoeff := gp.Coefficients
largeCoeff := other.Coefficients
if len(smallCoeff) > len(largeCoeff) {
largeCoeff, smallCoeff = smallCoeff, largeCoeff
}
sumDiff := make([]int, len(largeCoeff))
lenDiff := len(largeCoeff) - len(smallCoeff)
copy(sumDiff, largeCoeff[:lenDiff])
for i := lenDiff; i < len(largeCoeff); i++ {
sumDiff[i] = int(gp.gf.AddOrSub(int(smallCoeff[i-lenDiff]), int(largeCoeff[i])))
}
return NewGFPoly(gp.gf, sumDiff)
}
func (gp *GFPoly) MultByMonominal(degree int, coeff int) *GFPoly {
if coeff == 0 {
return gp.gf.Zero()
}
size := len(gp.Coefficients)
result := make([]int, size+degree)
for i := 0; i < size; i++ {
result[i] = int(gp.gf.Multiply(int(gp.Coefficients[i]), int(coeff)))
}
return NewGFPoly(gp.gf, result)
}
func (gp *GFPoly) Multiply(other *GFPoly) *GFPoly {
if gp.Zero() || other.Zero() {
return gp.gf.Zero()
}
aCoeff := gp.Coefficients
aLen := len(aCoeff)
bCoeff := other.Coefficients
bLen := len(bCoeff)
product := make([]int, aLen+bLen-1)
for i := 0; i < aLen; i++ {
ac := int(aCoeff[i])
for j := 0; j < bLen; j++ {
bc := int(bCoeff[j])
product[i+j] = int(gp.gf.AddOrSub(int(product[i+j]), gp.gf.Multiply(ac, bc)))
}
}
return NewGFPoly(gp.gf, product)
}
func (gp *GFPoly) Divide(other *GFPoly) (quotient *GFPoly, remainder *GFPoly) {
quotient = gp.gf.Zero()
remainder = gp
fld := gp.gf
denomLeadTerm := other.GetCoefficient(other.Degree())
inversDenomLeadTerm := fld.Invers(int(denomLeadTerm))
for remainder.Degree() >= other.Degree() && !remainder.Zero() {
degreeDiff := remainder.Degree() - other.Degree()
scale := int(fld.Multiply(int(remainder.GetCoefficient(remainder.Degree())), inversDenomLeadTerm))
term := other.MultByMonominal(degreeDiff, scale)
itQuot := NewMonominalPoly(fld, degreeDiff, scale)
quotient = quotient.AddOrSubstract(itQuot)
remainder = remainder.AddOrSubstract(term)
}
return
}
func NewMonominalPoly(field *GaloisField, degree int, coeff int) *GFPoly {
if coeff == 0 {
return field.Zero()
}
result := make([]int, degree+1)
result[0] = coeff
return NewGFPoly(field, result)
}
func NewGFPoly(field *GaloisField, coefficients []int) *GFPoly {
for len(coefficients) > 1 && coefficients[0] == 0 {
coefficients = coefficients[1:]
}
return &GFPoly{field, coefficients}
}

View file

@ -1,44 +0,0 @@
package utils
import (
"sync"
)
type ReedSolomonEncoder struct {
gf *GaloisField
polynomes []*GFPoly
m *sync.Mutex
}
func NewReedSolomonEncoder(gf *GaloisField) *ReedSolomonEncoder {
return &ReedSolomonEncoder{
gf, []*GFPoly{NewGFPoly(gf, []int{1})}, new(sync.Mutex),
}
}
func (rs *ReedSolomonEncoder) getPolynomial(degree int) *GFPoly {
rs.m.Lock()
defer rs.m.Unlock()
if degree >= len(rs.polynomes) {
last := rs.polynomes[len(rs.polynomes)-1]
for d := len(rs.polynomes); d <= degree; d++ {
next := last.Multiply(NewGFPoly(rs.gf, []int{1, rs.gf.ALogTbl[d-1+rs.gf.Base]}))
rs.polynomes = append(rs.polynomes, next)
last = next
}
}
return rs.polynomes[degree]
}
func (rs *ReedSolomonEncoder) Encode(data []int, eccCount int) []int {
generator := rs.getPolynomial(eccCount)
info := NewGFPoly(rs.gf, data)
info = info.MultByMonominal(eccCount, 1)
_, remainder := info.Divide(generator)
result := make([]int, eccCount)
numZero := int(eccCount) - len(remainder.Coefficients)
copy(result[numZero:], remainder.Coefficients)
return result
}

View file

@ -1,19 +0,0 @@
package utils
// RuneToInt converts a rune between '0' and '9' to an integer between 0 and 9
// If the rune is outside of this range -1 is returned.
func RuneToInt(r rune) int {
if r >= '0' && r <= '9' {
return int(r - '0')
}
return -1
}
// IntToRune converts a digit 0 - 9 to the rune '0' - '9'. If the given int is outside
// of this range 'F' is returned!
func IntToRune(i int) rune {
if i >= 0 && i <= 9 {
return rune(i + '0')
}
return 'F'
}

View file

@ -1,2 +0,0 @@
/bin
/gopath

View file

@ -1,16 +0,0 @@
language: go
go:
- "1.9"
- "1.10"
install:
- go get -v -t github.com/coreos/go-oidc/...
- go get golang.org/x/tools/cmd/cover
- go get github.com/golang/lint/golint
script:
- ./test
notifications:
email: false

View file

@ -1,71 +0,0 @@
# How to Contribute
CoreOS projects are [Apache 2.0 licensed](LICENSE) and accept contributions via
GitHub pull requests. This document outlines some of the conventions on
development workflow, commit message formatting, contact points and other
resources to make it easier to get your contribution accepted.
# Certificate of Origin
By contributing to this project you agree to the Developer Certificate of
Origin (DCO). This document was created by the Linux Kernel community and is a
simple statement that you, as a contributor, have the legal right to make the
contribution. See the [DCO](DCO) file for details.
# Email and Chat
The project currently uses the general CoreOS email list and IRC channel:
- Email: [coreos-dev](https://groups.google.com/forum/#!forum/coreos-dev)
- IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) IRC channel on freenode.org
Please avoid emailing maintainers found in the MAINTAINERS file directly. They
are very busy and read the mailing lists.
## Getting Started
- Fork the repository on GitHub
- Read the [README](README.md) for build and test instructions
- Play with the project, submit bugs, submit patches!
## Contribution Flow
This is a rough outline of what a contributor's workflow looks like:
- Create a topic branch from where you want to base your work (usually master).
- Make commits of logical units.
- Make sure your commit messages are in the proper format (see below).
- Push your changes to a topic branch in your fork of the repository.
- Make sure the tests pass, and add any new tests as appropriate.
- Submit a pull request to the original repository.
Thanks for your contributions!
### Format of the Commit Message
We follow a rough convention for commit messages that is designed to answer two
questions: what changed and why. The subject line should feature the what and
the body of the commit should describe the why.
```
scripts: add the test-cluster command
this uses tmux to setup a test cluster that you can easily kill and
start for debugging.
Fixes #38
```
The format can be described more formally as follows:
```
<subsystem>: <what changed>
<BLANK LINE>
<why this change was made>
<BLANK LINE>
<footer>
```
The first line is the subject and should be no longer than 70 characters, the
second line is always blank, and other lines should be wrapped at 80 characters.
This allows the message to be easier to read on GitHub as well as in various
git tools.

36
vendor/github.com/coreos/go-oidc/DCO generated vendored
View file

@ -1,36 +0,0 @@
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.

View file

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,3 +0,0 @@
Eric Chiang <ericchiang@google.com> (@ericchiang)
Mike Danese <mikedanese@google.com> (@mikedanese)
Rithu Leena John <rjohn@redhat.com> (@rithujohn191)

View file

@ -1,5 +0,0 @@
CoreOS Project
Copyright 2014 CoreOS, Inc
This product includes software developed at CoreOS, Inc.
(http://www.coreos.com/).

View file

@ -1,72 +0,0 @@
# go-oidc
[![GoDoc](https://godoc.org/github.com/coreos/go-oidc?status.svg)](https://godoc.org/github.com/coreos/go-oidc)
[![Build Status](https://travis-ci.org/coreos/go-oidc.png?branch=master)](https://travis-ci.org/coreos/go-oidc)
## OpenID Connect support for Go
This package enables OpenID Connect support for the [golang.org/x/oauth2](https://godoc.org/golang.org/x/oauth2) package.
```go
provider, err := oidc.NewProvider(ctx, "https://accounts.google.com")
if err != nil {
// handle error
}
// Configure an OpenID Connect aware OAuth2 client.
oauth2Config := oauth2.Config{
ClientID: clientID,
ClientSecret: clientSecret,
RedirectURL: redirectURL,
// Discovery returns the OAuth2 endpoints.
Endpoint: provider.Endpoint(),
// "openid" is a required scope for OpenID Connect flows.
Scopes: []string{oidc.ScopeOpenID, "profile", "email"},
}
```
OAuth2 redirects are unchanged.
```go
func handleRedirect(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, oauth2Config.AuthCodeURL(state), http.StatusFound)
}
```
The on responses, the provider can be used to verify ID Tokens.
```go
var verifier = provider.Verifier(&oidc.Config{ClientID: clientID})
func handleOAuth2Callback(w http.ResponseWriter, r *http.Request) {
// Verify state and errors.
oauth2Token, err := oauth2Config.Exchange(ctx, r.URL.Query().Get("code"))
if err != nil {
// handle error
}
// Extract the ID Token from OAuth2 token.
rawIDToken, ok := oauth2Token.Extra("id_token").(string)
if !ok {
// handle missing token
}
// Parse and verify ID Token payload.
idToken, err := verifier.Verify(ctx, rawIDToken)
if err != nil {
// handle error
}
// Extract custom claims
var claims struct {
Email string `json:"email"`
Verified bool `json:"email_verified"`
}
if err := idToken.Claims(&claims); err != nil {
// handle error
}
}
```

View file

@ -1,61 +0,0 @@
## CoreOS Community Code of Conduct
### Contributor Code of Conduct
As contributors and maintainers of this project, and in the interest of
fostering an open and welcoming community, we pledge to respect all people who
contribute through reporting issues, posting feature requests, updating
documentation, submitting pull requests or patches, and other activities.
We are committed to making participation in this project a harassment-free
experience for everyone, regardless of level of experience, gender, gender
identity and expression, sexual orientation, disability, personal appearance,
body size, race, ethnicity, age, religion, or nationality.
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery
* Personal attacks
* Trolling or insulting/derogatory comments
* Public or private harassment
* Publishing others' private information, such as physical or electronic addresses, without explicit permission
* Other unethical or unprofessional conduct.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct. By adopting this Code of Conduct,
project maintainers commit themselves to fairly and consistently applying these
principles to every aspect of managing this project. Project maintainers who do
not follow or enforce the Code of Conduct may be permanently removed from the
project team.
This code of conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community.
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting a project maintainer, Brandon Philips
<brandon.philips@coreos.com>, and/or Rithu John <rithu.john@coreos.com>.
This Code of Conduct is adapted from the Contributor Covenant
(http://contributor-covenant.org), version 1.2.0, available at
http://contributor-covenant.org/version/1/2/0/
### CoreOS Events Code of Conduct
CoreOS events are working conferences intended for professional networking and
collaboration in the CoreOS community. Attendees are expected to behave
according to professional standards and in accordance with their employers
policies on appropriate workplace behavior.
While at CoreOS events or related social networking opportunities, attendees
should not engage in discriminatory or offensive speech or actions including
but not limited to gender, sexuality, race, age, disability, or religion.
Speakers should be especially aware of these concerns.
CoreOS does not condone any statements by speakers contrary to these standards.
CoreOS reserves the right to deny entrance and/or eject from an event (without
refund) any individual found to be engaging in discriminatory or offensive
speech or actions.
Please bring any concerns to the immediate attention of designated on-site
staff, Brandon Philips <brandon.philips@coreos.com>, and/or Rithu John <rithu.john@coreos.com>.

View file

@ -1,20 +0,0 @@
// +build !golint
// Don't lint this file. We don't want to have to add a comment to each constant.
package oidc
const (
// JOSE asymmetric signing algorithm values as defined by RFC 7518
//
// see: https://tools.ietf.org/html/rfc7518#section-3.1
RS256 = "RS256" // RSASSA-PKCS-v1.5 using SHA-256
RS384 = "RS384" // RSASSA-PKCS-v1.5 using SHA-384
RS512 = "RS512" // RSASSA-PKCS-v1.5 using SHA-512
ES256 = "ES256" // ECDSA using P-256 and SHA-256
ES384 = "ES384" // ECDSA using P-384 and SHA-384
ES512 = "ES512" // ECDSA using P-521 and SHA-512
PS256 = "PS256" // RSASSA-PSS using SHA256 and MGF1-SHA256
PS384 = "PS384" // RSASSA-PSS using SHA384 and MGF1-SHA384
PS512 = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512
)

View file

@ -1,228 +0,0 @@
package oidc
import (
"context"
"errors"
"fmt"
"io/ioutil"
"net/http"
"sync"
"time"
"github.com/pquerna/cachecontrol"
jose "gopkg.in/square/go-jose.v2"
)
// keysExpiryDelta is the allowed clock skew between a client and the OpenID Connect
// server.
//
// When keys expire, they are valid for this amount of time after.
//
// If the keys have not expired, and an ID Token claims it was signed by a key not in
// the cache, if and only if the keys expire in this amount of time, the keys will be
// updated.
const keysExpiryDelta = 30 * time.Second
// NewRemoteKeySet returns a KeySet that can validate JSON web tokens by using HTTP
// GETs to fetch JSON web token sets hosted at a remote URL. This is automatically
// used by NewProvider using the URLs returned by OpenID Connect discovery, but is
// exposed for providers that don't support discovery or to prevent round trips to the
// discovery URL.
//
// The returned KeySet is a long lived verifier that caches keys based on cache-control
// headers. Reuse a common remote key set instead of creating new ones as needed.
//
// The behavior of the returned KeySet is undefined once the context is canceled.
func NewRemoteKeySet(ctx context.Context, jwksURL string) KeySet {
return newRemoteKeySet(ctx, jwksURL, time.Now)
}
func newRemoteKeySet(ctx context.Context, jwksURL string, now func() time.Time) *remoteKeySet {
if now == nil {
now = time.Now
}
return &remoteKeySet{jwksURL: jwksURL, ctx: ctx, now: now}
}
type remoteKeySet struct {
jwksURL string
ctx context.Context
now func() time.Time
// guard all other fields
mu sync.Mutex
// inflight suppresses parallel execution of updateKeys and allows
// multiple goroutines to wait for its result.
inflight *inflight
// A set of cached keys and their expiry.
cachedKeys []jose.JSONWebKey
expiry time.Time
}
// inflight is used to wait on some in-flight request from multiple goroutines.
type inflight struct {
doneCh chan struct{}
keys []jose.JSONWebKey
err error
}
func newInflight() *inflight {
return &inflight{doneCh: make(chan struct{})}
}
// wait returns a channel that multiple goroutines can receive on. Once it returns
// a value, the inflight request is done and result() can be inspected.
func (i *inflight) wait() <-chan struct{} {
return i.doneCh
}
// done can only be called by a single goroutine. It records the result of the
// inflight request and signals other goroutines that the result is safe to
// inspect.
func (i *inflight) done(keys []jose.JSONWebKey, err error) {
i.keys = keys
i.err = err
close(i.doneCh)
}
// result cannot be called until the wait() channel has returned a value.
func (i *inflight) result() ([]jose.JSONWebKey, error) {
return i.keys, i.err
}
func (r *remoteKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) {
jws, err := jose.ParseSigned(jwt)
if err != nil {
return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
}
return r.verify(ctx, jws)
}
func (r *remoteKeySet) verify(ctx context.Context, jws *jose.JSONWebSignature) ([]byte, error) {
// We don't support JWTs signed with multiple signatures.
keyID := ""
for _, sig := range jws.Signatures {
keyID = sig.Header.KeyID
break
}
keys, expiry := r.keysFromCache()
// Don't check expiry yet. This optimizes for when the provider is unavailable.
for _, key := range keys {
if keyID == "" || key.KeyID == keyID {
if payload, err := jws.Verify(&key); err == nil {
return payload, nil
}
}
}
if !r.now().Add(keysExpiryDelta).After(expiry) {
// Keys haven't expired, don't refresh.
return nil, errors.New("failed to verify id token signature")
}
keys, err := r.keysFromRemote(ctx)
if err != nil {
return nil, fmt.Errorf("fetching keys %v", err)
}
for _, key := range keys {
if keyID == "" || key.KeyID == keyID {
if payload, err := jws.Verify(&key); err == nil {
return payload, nil
}
}
}
return nil, errors.New("failed to verify id token signature")
}
func (r *remoteKeySet) keysFromCache() (keys []jose.JSONWebKey, expiry time.Time) {
r.mu.Lock()
defer r.mu.Unlock()
return r.cachedKeys, r.expiry
}
// keysFromRemote syncs the key set from the remote set, records the values in the
// cache, and returns the key set.
func (r *remoteKeySet) keysFromRemote(ctx context.Context) ([]jose.JSONWebKey, error) {
// Need to lock to inspect the inflight request field.
r.mu.Lock()
// If there's not a current inflight request, create one.
if r.inflight == nil {
r.inflight = newInflight()
// This goroutine has exclusive ownership over the current inflight
// request. It releases the resource by nil'ing the inflight field
// once the goroutine is done.
go func() {
// Sync keys and finish inflight when that's done.
keys, expiry, err := r.updateKeys()
r.inflight.done(keys, err)
// Lock to update the keys and indicate that there is no longer an
// inflight request.
r.mu.Lock()
defer r.mu.Unlock()
if err == nil {
r.cachedKeys = keys
r.expiry = expiry
}
// Free inflight so a different request can run.
r.inflight = nil
}()
}
inflight := r.inflight
r.mu.Unlock()
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-inflight.wait():
return inflight.result()
}
}
func (r *remoteKeySet) updateKeys() ([]jose.JSONWebKey, time.Time, error) {
req, err := http.NewRequest("GET", r.jwksURL, nil)
if err != nil {
return nil, time.Time{}, fmt.Errorf("oidc: can't create request: %v", err)
}
resp, err := doRequest(r.ctx, req)
if err != nil {
return nil, time.Time{}, fmt.Errorf("oidc: get keys failed %v", err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, time.Time{}, fmt.Errorf("unable to read response body: %v", err)
}
if resp.StatusCode != http.StatusOK {
return nil, time.Time{}, fmt.Errorf("oidc: get keys failed: %s %s", resp.Status, body)
}
var keySet jose.JSONWebKeySet
err = unmarshalResp(resp, body, &keySet)
if err != nil {
return nil, time.Time{}, fmt.Errorf("oidc: failed to decode keys: %v %s", err, body)
}
// If the server doesn't provide cache control headers, assume the
// keys expire immediately.
expiry := r.now()
_, e, err := cachecontrol.CachableResponse(req, resp, cachecontrol.Options{})
if err == nil && e.After(expiry) {
expiry = e
}
return keySet.Keys, expiry, nil
}

View file

@ -1,385 +0,0 @@
// Package oidc implements OpenID Connect client logic for the golang.org/x/oauth2 package.
package oidc
import (
"context"
"crypto/sha256"
"crypto/sha512"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"hash"
"io/ioutil"
"mime"
"net/http"
"strings"
"time"
"golang.org/x/oauth2"
jose "gopkg.in/square/go-jose.v2"
)
const (
// ScopeOpenID is the mandatory scope for all OpenID Connect OAuth2 requests.
ScopeOpenID = "openid"
// ScopeOfflineAccess is an optional scope defined by OpenID Connect for requesting
// OAuth2 refresh tokens.
//
// Support for this scope differs between OpenID Connect providers. For instance
// Google rejects it, favoring appending "access_type=offline" as part of the
// authorization request instead.
//
// See: https://openid.net/specs/openid-connect-core-1_0.html#OfflineAccess
ScopeOfflineAccess = "offline_access"
)
var (
errNoAtHash = errors.New("id token did not have an access token hash")
errInvalidAtHash = errors.New("access token hash does not match value in ID token")
)
// ClientContext returns a new Context that carries the provided HTTP client.
//
// This method sets the same context key used by the golang.org/x/oauth2 package,
// so the returned context works for that package too.
//
// myClient := &http.Client{}
// ctx := oidc.ClientContext(parentContext, myClient)
//
// // This will use the custom client
// provider, err := oidc.NewProvider(ctx, "https://accounts.example.com")
//
func ClientContext(ctx context.Context, client *http.Client) context.Context {
return context.WithValue(ctx, oauth2.HTTPClient, client)
}
func doRequest(ctx context.Context, req *http.Request) (*http.Response, error) {
client := http.DefaultClient
if c, ok := ctx.Value(oauth2.HTTPClient).(*http.Client); ok {
client = c
}
return client.Do(req.WithContext(ctx))
}
// Provider represents an OpenID Connect server's configuration.
type Provider struct {
issuer string
authURL string
tokenURL string
userInfoURL string
// Raw claims returned by the server.
rawClaims []byte
remoteKeySet KeySet
}
type cachedKeys struct {
keys []jose.JSONWebKey
expiry time.Time
}
type providerJSON struct {
Issuer string `json:"issuer"`
AuthURL string `json:"authorization_endpoint"`
TokenURL string `json:"token_endpoint"`
JWKSURL string `json:"jwks_uri"`
UserInfoURL string `json:"userinfo_endpoint"`
}
// NewProvider uses the OpenID Connect discovery mechanism to construct a Provider.
//
// The issuer is the URL identifier for the service. For example: "https://accounts.google.com"
// or "https://login.salesforce.com".
func NewProvider(ctx context.Context, issuer string) (*Provider, error) {
wellKnown := strings.TrimSuffix(issuer, "/") + "/.well-known/openid-configuration"
req, err := http.NewRequest("GET", wellKnown, nil)
if err != nil {
return nil, err
}
resp, err := doRequest(ctx, req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("unable to read response body: %v", err)
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("%s: %s", resp.Status, body)
}
var p providerJSON
err = unmarshalResp(resp, body, &p)
if err != nil {
return nil, fmt.Errorf("oidc: failed to decode provider discovery object: %v", err)
}
if p.Issuer != issuer {
return nil, fmt.Errorf("oidc: issuer did not match the issuer returned by provider, expected %q got %q", issuer, p.Issuer)
}
return &Provider{
issuer: p.Issuer,
authURL: p.AuthURL,
tokenURL: p.TokenURL,
userInfoURL: p.UserInfoURL,
rawClaims: body,
remoteKeySet: NewRemoteKeySet(ctx, p.JWKSURL),
}, nil
}
// Claims unmarshals raw fields returned by the server during discovery.
//
// var claims struct {
// ScopesSupported []string `json:"scopes_supported"`
// ClaimsSupported []string `json:"claims_supported"`
// }
//
// if err := provider.Claims(&claims); err != nil {
// // handle unmarshaling error
// }
//
// For a list of fields defined by the OpenID Connect spec see:
// https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
func (p *Provider) Claims(v interface{}) error {
if p.rawClaims == nil {
return errors.New("oidc: claims not set")
}
return json.Unmarshal(p.rawClaims, v)
}
// Endpoint returns the OAuth2 auth and token endpoints for the given provider.
func (p *Provider) Endpoint() oauth2.Endpoint {
return oauth2.Endpoint{AuthURL: p.authURL, TokenURL: p.tokenURL}
}
// UserInfo represents the OpenID Connect userinfo claims.
type UserInfo struct {
Subject string `json:"sub"`
Profile string `json:"profile"`
Email string `json:"email"`
EmailVerified bool `json:"email_verified"`
claims []byte
}
// Claims unmarshals the raw JSON object claims into the provided object.
func (u *UserInfo) Claims(v interface{}) error {
if u.claims == nil {
return errors.New("oidc: claims not set")
}
return json.Unmarshal(u.claims, v)
}
// UserInfo uses the token source to query the provider's user info endpoint.
func (p *Provider) UserInfo(ctx context.Context, tokenSource oauth2.TokenSource) (*UserInfo, error) {
if p.userInfoURL == "" {
return nil, errors.New("oidc: user info endpoint is not supported by this provider")
}
req, err := http.NewRequest("GET", p.userInfoURL, nil)
if err != nil {
return nil, fmt.Errorf("oidc: create GET request: %v", err)
}
token, err := tokenSource.Token()
if err != nil {
return nil, fmt.Errorf("oidc: get access token: %v", err)
}
token.SetAuthHeader(req)
resp, err := doRequest(ctx, req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("%s: %s", resp.Status, body)
}
var userInfo UserInfo
if err := json.Unmarshal(body, &userInfo); err != nil {
return nil, fmt.Errorf("oidc: failed to decode userinfo: %v", err)
}
userInfo.claims = body
return &userInfo, nil
}
// IDToken is an OpenID Connect extension that provides a predictable representation
// of an authorization event.
//
// The ID Token only holds fields OpenID Connect requires. To access additional
// claims returned by the server, use the Claims method.
type IDToken struct {
// The URL of the server which issued this token. OpenID Connect
// requires this value always be identical to the URL used for
// initial discovery.
//
// Note: Because of a known issue with Google Accounts' implementation
// this value may differ when using Google.
//
// See: https://developers.google.com/identity/protocols/OpenIDConnect#obtainuserinfo
Issuer string
// The client ID, or set of client IDs, that this token is issued for. For
// common uses, this is the client that initialized the auth flow.
//
// This package ensures the audience contains an expected value.
Audience []string
// A unique string which identifies the end user.
Subject string
// Expiry of the token. Ths package will not process tokens that have
// expired unless that validation is explicitly turned off.
Expiry time.Time
// When the token was issued by the provider.
IssuedAt time.Time
// Initial nonce provided during the authentication redirect.
//
// This package does NOT provided verification on the value of this field
// and it's the user's responsibility to ensure it contains a valid value.
Nonce string
// at_hash claim, if set in the ID token. Callers can verify an access token
// that corresponds to the ID token using the VerifyAccessToken method.
AccessTokenHash string
// signature algorithm used for ID token, needed to compute a verification hash of an
// access token
sigAlgorithm string
// Raw payload of the id_token.
claims []byte
// Map of distributed claim names to claim sources
distributedClaims map[string]claimSource
}
// Claims unmarshals the raw JSON payload of the ID Token into a provided struct.
//
// idToken, err := idTokenVerifier.Verify(rawIDToken)
// if err != nil {
// // handle error
// }
// var claims struct {
// Email string `json:"email"`
// EmailVerified bool `json:"email_verified"`
// }
// if err := idToken.Claims(&claims); err != nil {
// // handle error
// }
//
func (i *IDToken) Claims(v interface{}) error {
if i.claims == nil {
return errors.New("oidc: claims not set")
}
return json.Unmarshal(i.claims, v)
}
// VerifyAccessToken verifies that the hash of the access token that corresponds to the iD token
// matches the hash in the id token. It returns an error if the hashes don't match.
// It is the caller's responsibility to ensure that the optional access token hash is present for the ID token
// before calling this method. See https://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken
func (i *IDToken) VerifyAccessToken(accessToken string) error {
if i.AccessTokenHash == "" {
return errNoAtHash
}
var h hash.Hash
switch i.sigAlgorithm {
case RS256, ES256, PS256:
h = sha256.New()
case RS384, ES384, PS384:
h = sha512.New384()
case RS512, ES512, PS512:
h = sha512.New()
default:
return fmt.Errorf("oidc: unsupported signing algorithm %q", i.sigAlgorithm)
}
h.Write([]byte(accessToken)) // hash documents that Write will never return an error
sum := h.Sum(nil)[:h.Size()/2]
actual := base64.RawURLEncoding.EncodeToString(sum)
if actual != i.AccessTokenHash {
return errInvalidAtHash
}
return nil
}
type idToken struct {
Issuer string `json:"iss"`
Subject string `json:"sub"`
Audience audience `json:"aud"`
Expiry jsonTime `json:"exp"`
IssuedAt jsonTime `json:"iat"`
NotBefore *jsonTime `json:"nbf"`
Nonce string `json:"nonce"`
AtHash string `json:"at_hash"`
ClaimNames map[string]string `json:"_claim_names"`
ClaimSources map[string]claimSource `json:"_claim_sources"`
}
type claimSource struct {
Endpoint string `json:"endpoint"`
AccessToken string `json:"access_token"`
}
type audience []string
func (a *audience) UnmarshalJSON(b []byte) error {
var s string
if json.Unmarshal(b, &s) == nil {
*a = audience{s}
return nil
}
var auds []string
if err := json.Unmarshal(b, &auds); err != nil {
return err
}
*a = audience(auds)
return nil
}
type jsonTime time.Time
func (j *jsonTime) UnmarshalJSON(b []byte) error {
var n json.Number
if err := json.Unmarshal(b, &n); err != nil {
return err
}
var unix int64
if t, err := n.Int64(); err == nil {
unix = t
} else {
f, err := n.Float64()
if err != nil {
return err
}
unix = int64(f)
}
*j = jsonTime(time.Unix(unix, 0))
return nil
}
func unmarshalResp(r *http.Response, body []byte, v interface{}) error {
err := json.Unmarshal(body, &v)
if err == nil {
return nil
}
ct := r.Header.Get("Content-Type")
mediaType, _, parseErr := mime.ParseMediaType(ct)
if parseErr == nil && mediaType == "application/json" {
return fmt.Errorf("got Content-Type = application/json, but could not unmarshal as JSON: %v", err)
}
return fmt.Errorf("expected Content-Type = application/json, got %q: %v", ct, err)
}

View file

@ -1,16 +0,0 @@
#!/bin/bash
set -e
# Filter out any files with a !golint build tag.
LINTABLE=$( go list -tags=golint -f '
{{- range $i, $file := .GoFiles -}}
{{ $file }} {{ end }}
{{ range $i, $file := .TestGoFiles -}}
{{ $file }} {{ end }}' github.com/coreos/go-oidc )
go test -v -i -race github.com/coreos/go-oidc/...
go test -v -race github.com/coreos/go-oidc/...
golint -set_exit_status $LINTABLE
go vet github.com/coreos/go-oidc/...
go build -v ./example/...

View file

@ -1,327 +0,0 @@
package oidc
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
"golang.org/x/oauth2"
jose "gopkg.in/square/go-jose.v2"
)
const (
issuerGoogleAccounts = "https://accounts.google.com"
issuerGoogleAccountsNoScheme = "accounts.google.com"
)
// KeySet is a set of publc JSON Web Keys that can be used to validate the signature
// of JSON web tokens. This is expected to be backed by a remote key set through
// provider metadata discovery or an in-memory set of keys delivered out-of-band.
type KeySet interface {
// VerifySignature parses the JSON web token, verifies the signature, and returns
// the raw payload. Header and claim fields are validated by other parts of the
// package. For example, the KeySet does not need to check values such as signature
// algorithm, issuer, and audience since the IDTokenVerifier validates these values
// independently.
//
// If VerifySignature makes HTTP requests to verify the token, it's expected to
// use any HTTP client associated with the context through ClientContext.
VerifySignature(ctx context.Context, jwt string) (payload []byte, err error)
}
// IDTokenVerifier provides verification for ID Tokens.
type IDTokenVerifier struct {
keySet KeySet
config *Config
issuer string
}
// NewVerifier returns a verifier manually constructed from a key set and issuer URL.
//
// It's easier to use provider discovery to construct an IDTokenVerifier than creating
// one directly. This method is intended to be used with provider that don't support
// metadata discovery, or avoiding round trips when the key set URL is already known.
//
// This constructor can be used to create a verifier directly using the issuer URL and
// JSON Web Key Set URL without using discovery:
//
// keySet := oidc.NewRemoteKeySet(ctx, "https://www.googleapis.com/oauth2/v3/certs")
// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config)
//
// Since KeySet is an interface, this constructor can also be used to supply custom
// public key sources. For example, if a user wanted to supply public keys out-of-band
// and hold them statically in-memory:
//
// // Custom KeySet implementation.
// keySet := newStatisKeySet(publicKeys...)
//
// // Verifier uses the custom KeySet implementation.
// verifier := oidc.NewVerifier("https://auth.example.com", keySet, config)
//
func NewVerifier(issuerURL string, keySet KeySet, config *Config) *IDTokenVerifier {
return &IDTokenVerifier{keySet: keySet, config: config, issuer: issuerURL}
}
// Config is the configuration for an IDTokenVerifier.
type Config struct {
// Expected audience of the token. For a majority of the cases this is expected to be
// the ID of the client that initialized the login flow. It may occasionally differ if
// the provider supports the authorizing party (azp) claim.
//
// If not provided, users must explicitly set SkipClientIDCheck.
ClientID string
// If specified, only this set of algorithms may be used to sign the JWT.
//
// Since many providers only support RS256, SupportedSigningAlgs defaults to this value.
SupportedSigningAlgs []string
// If true, no ClientID check performed. Must be true if ClientID field is empty.
SkipClientIDCheck bool
// If true, token expiry is not checked.
SkipExpiryCheck bool
// SkipIssuerCheck is intended for specialized cases where the the caller wishes to
// defer issuer validation. When enabled, callers MUST independently verify the Token's
// Issuer is a known good value.
//
// Mismatched issuers often indicate client mis-configuration. If mismatches are
// unexpected, evaluate if the provided issuer URL is incorrect instead of enabling
// this option.
SkipIssuerCheck bool
// Time function to check Token expiry. Defaults to time.Now
Now func() time.Time
}
// Verifier returns an IDTokenVerifier that uses the provider's key set to verify JWTs.
//
// The returned IDTokenVerifier is tied to the Provider's context and its behavior is
// undefined once the Provider's context is canceled.
func (p *Provider) Verifier(config *Config) *IDTokenVerifier {
return NewVerifier(p.issuer, p.remoteKeySet, config)
}
func parseJWT(p string) ([]byte, error) {
parts := strings.Split(p, ".")
if len(parts) < 2 {
return nil, fmt.Errorf("oidc: malformed jwt, expected 3 parts got %d", len(parts))
}
payload, err := base64.RawURLEncoding.DecodeString(parts[1])
if err != nil {
return nil, fmt.Errorf("oidc: malformed jwt payload: %v", err)
}
return payload, nil
}
func contains(sli []string, ele string) bool {
for _, s := range sli {
if s == ele {
return true
}
}
return false
}
// Returns the Claims from the distributed JWT token
func resolveDistributedClaim(ctx context.Context, verifier *IDTokenVerifier, src claimSource) ([]byte, error) {
req, err := http.NewRequest("GET", src.Endpoint, nil)
if err != nil {
return nil, fmt.Errorf("malformed request: %v", err)
}
if src.AccessToken != "" {
req.Header.Set("Authorization", "Bearer "+src.AccessToken)
}
resp, err := doRequest(ctx, req)
if err != nil {
return nil, fmt.Errorf("oidc: Request to endpoint failed: %v", err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("unable to read response body: %v", err)
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("oidc: request failed: %v", resp.StatusCode)
}
token, err := verifier.Verify(ctx, string(body))
if err != nil {
return nil, fmt.Errorf("malformed response body: %v", err)
}
return token.claims, nil
}
func parseClaim(raw []byte, name string, v interface{}) error {
var parsed map[string]json.RawMessage
if err := json.Unmarshal(raw, &parsed); err != nil {
return err
}
val, ok := parsed[name]
if !ok {
return fmt.Errorf("claim doesn't exist: %s", name)
}
return json.Unmarshal([]byte(val), v)
}
// Verify parses a raw ID Token, verifies it's been signed by the provider, preforms
// any additional checks depending on the Config, and returns the payload.
//
// Verify does NOT do nonce validation, which is the callers responsibility.
//
// See: https://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation
//
// oauth2Token, err := oauth2Config.Exchange(ctx, r.URL.Query().Get("code"))
// if err != nil {
// // handle error
// }
//
// // Extract the ID Token from oauth2 token.
// rawIDToken, ok := oauth2Token.Extra("id_token").(string)
// if !ok {
// // handle error
// }
//
// token, err := verifier.Verify(ctx, rawIDToken)
//
func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDToken, error) {
jws, err := jose.ParseSigned(rawIDToken)
if err != nil {
return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
}
// Throw out tokens with invalid claims before trying to verify the token. This lets
// us do cheap checks before possibly re-syncing keys.
payload, err := parseJWT(rawIDToken)
if err != nil {
return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
}
var token idToken
if err := json.Unmarshal(payload, &token); err != nil {
return nil, fmt.Errorf("oidc: failed to unmarshal claims: %v", err)
}
distributedClaims := make(map[string]claimSource)
//step through the token to map claim names to claim sources"
for cn, src := range token.ClaimNames {
if src == "" {
return nil, fmt.Errorf("oidc: failed to obtain source from claim name")
}
s, ok := token.ClaimSources[src]
if !ok {
return nil, fmt.Errorf("oidc: source does not exist")
}
distributedClaims[cn] = s
}
t := &IDToken{
Issuer: token.Issuer,
Subject: token.Subject,
Audience: []string(token.Audience),
Expiry: time.Time(token.Expiry),
IssuedAt: time.Time(token.IssuedAt),
Nonce: token.Nonce,
AccessTokenHash: token.AtHash,
claims: payload,
distributedClaims: distributedClaims,
}
// Check issuer.
if !v.config.SkipIssuerCheck && t.Issuer != v.issuer {
// Google sometimes returns "accounts.google.com" as the issuer claim instead of
// the required "https://accounts.google.com". Detect this case and allow it only
// for Google.
//
// We will not add hooks to let other providers go off spec like this.
if !(v.issuer == issuerGoogleAccounts && t.Issuer == issuerGoogleAccountsNoScheme) {
return nil, fmt.Errorf("oidc: id token issued by a different provider, expected %q got %q", v.issuer, t.Issuer)
}
}
// If a client ID has been provided, make sure it's part of the audience. SkipClientIDCheck must be true if ClientID is empty.
//
// This check DOES NOT ensure that the ClientID is the party to which the ID Token was issued (i.e. Authorized party).
if !v.config.SkipClientIDCheck {
if v.config.ClientID != "" {
if !contains(t.Audience, v.config.ClientID) {
return nil, fmt.Errorf("oidc: expected audience %q got %q", v.config.ClientID, t.Audience)
}
} else {
return nil, fmt.Errorf("oidc: invalid configuration, clientID must be provided or SkipClientIDCheck must be set")
}
}
// If a SkipExpiryCheck is false, make sure token is not expired.
if !v.config.SkipExpiryCheck {
now := time.Now
if v.config.Now != nil {
now = v.config.Now
}
nowTime := now()
if t.Expiry.Before(nowTime) {
return nil, fmt.Errorf("oidc: token is expired (Token Expiry: %v)", t.Expiry)
}
// If nbf claim is provided in token, ensure that it is indeed in the past.
if token.NotBefore != nil {
nbfTime := time.Time(*token.NotBefore)
leeway := 1 * time.Minute
if nowTime.Add(leeway).Before(nbfTime) {
return nil, fmt.Errorf("oidc: current time %v before the nbf (not before) time: %v", nowTime, nbfTime)
}
}
}
switch len(jws.Signatures) {
case 0:
return nil, fmt.Errorf("oidc: id token not signed")
case 1:
default:
return nil, fmt.Errorf("oidc: multiple signatures on id token not supported")
}
sig := jws.Signatures[0]
supportedSigAlgs := v.config.SupportedSigningAlgs
if len(supportedSigAlgs) == 0 {
supportedSigAlgs = []string{RS256}
}
if !contains(supportedSigAlgs, sig.Header.Algorithm) {
return nil, fmt.Errorf("oidc: id token signed with unsupported algorithm, expected %q got %q", supportedSigAlgs, sig.Header.Algorithm)
}
t.sigAlgorithm = sig.Header.Algorithm
gotPayload, err := v.keySet.VerifySignature(ctx, rawIDToken)
if err != nil {
return nil, fmt.Errorf("failed to verify signature: %v", err)
}
// Ensure that the payload returned by the square actually matches the payload parsed earlier.
if !bytes.Equal(gotPayload, payload) {
return nil, errors.New("oidc: internal error, payload parsed did not match previous payload")
}
return t, nil
}
// Nonce returns an auth code option which requires the ID Token created by the
// OpenID Connect provider to contain the specified nonce.
func Nonce(nonce string) oauth2.AuthCodeOption {
return oauth2.SetAuthURLParam("nonce", nonce)
}

View file

@ -1 +0,0 @@
.idea/

View file

@ -1,25 +0,0 @@
Copyright (c) 2015, Duo Security, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,19 +0,0 @@
# Overview
**duo_api_golang** - Go language bindings for the Duo APIs (both auth and admin).
## Duo Auth API
The Auth API is a low-level, RESTful API for adding strong two-factor authentication to your website or application.
This module's API client implementation is *complete*; corresponding methods are exported for all available endpoints.
For more information see the [Auth API guide](https://duo.com/docs/authapi).
## Duo Admin API
The Admin API provides programmatic access to the administrative functionality of Duo Security's two-factor authentication platform.
This module's API client implementation is *incomplete*; methods for fetching most entity types are exported, but methods that modify entities have (mostly) not yet been implemented. PRs welcome!
For more information see the [Admin API guide](https://duo.com/docs/adminapi).

View file

@ -1,381 +0,0 @@
package authapi
import (
"encoding/json"
"net/url"
"strconv"
"github.com/duosecurity/duo_api_golang"
)
type AuthApi struct {
duoapi.DuoApi
}
// Build a new Duo Auth API object.
// api is a duoapi.DuoApi object used to make the Duo Rest API calls.
// Example: authapi.NewAuthApi(*duoapi.NewDuoApi(ikey,skey,host,userAgent,duoapi.SetTimeout(10*time.Second)))
func NewAuthApi(api duoapi.DuoApi) *AuthApi {
return &AuthApi{api}
}
// Leaving for backwards compatibility.
// The struct in use has been moved to the duoapi package, to be shared between the admin and authapi packages.
type StatResult struct {
Stat string
Code *int32
Message *string
Message_Detail *string
}
// Return object for the 'Ping' API call.
type PingResult struct {
duoapi.StatResult
Response struct {
Time int64
}
}
// Duo's Ping method. https://www.duosecurity.com/docs/authapi#/ping
// This is an unsigned Duo Rest API call which returns the Duo system's time.
// Use this method to determine whether your system time is in sync with Duo's.
func (api *AuthApi) Ping() (*PingResult, error) {
_, body, err := api.Call("GET", "/auth/v2/ping", nil, duoapi.UseTimeout)
if err != nil {
return nil, err
}
ret := &PingResult{}
if err = json.Unmarshal(body, ret); err != nil {
return nil, err
}
return ret, nil
}
// Return object for the 'Check' API call.
type CheckResult struct {
duoapi.StatResult
Response struct {
Time int64
}
}
// Call Duo's Check method. https://www.duosecurity.com/docs/authapi#/check
// Check is a signed Duo API call, which returns the Duo system's time.
// Use this method to determine whether your ikey, skey and host are correct,
// and whether your system time is in sync with Duo's.
func (api *AuthApi) Check() (*CheckResult, error) {
_, body, err := api.SignedCall("GET", "/auth/v2/check", nil, duoapi.UseTimeout)
if err != nil {
return nil, err
}
ret := &CheckResult{}
if err = json.Unmarshal(body, ret); err != nil {
return nil, err
}
return ret, nil
}
// Return object for the 'Logo' API call.
type LogoResult struct {
duoapi.StatResult
png *[]byte
}
// Duo's Logo method. https://www.duosecurity.com/docs/authapi#/logo
// If the API call is successful, the configured logo png is returned. Othwerwise,
// error information is returned in the LogoResult return value.
func (api *AuthApi) Logo() (*LogoResult, error) {
resp, body, err := api.SignedCall("GET", "/auth/v2/logo", nil, duoapi.UseTimeout)
if err != nil {
return nil, err
}
if resp.StatusCode == 200 {
ret := &LogoResult{StatResult: duoapi.StatResult{Stat: "OK"},
png: &body}
return ret, nil
}
ret := &LogoResult{}
if err = json.Unmarshal(body, ret); err != nil {
return nil, err
}
return ret, nil
}
// Optional parameter for the Enroll method.
func EnrollUsername(username string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("username", username)
}
}
// Optional parameter for the Enroll method.
func EnrollValidSeconds(secs uint64) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("valid_secs", strconv.FormatUint(secs, 10))
}
}
// Enroll return type.
type EnrollResult struct {
duoapi.StatResult
Response struct {
Activation_Barcode string
Activation_Code string
Expiration int64
User_Id string
Username string
}
}
// Duo's Enroll method. https://www.duosecurity.com/docs/authapi#/enroll
// Use EnrollUsername() to include the optional username parameter.
// Use EnrollValidSeconds() to change the default validation time limit that the
// user has to complete enrollment.
func (api *AuthApi) Enroll(options ...func(*url.Values)) (*EnrollResult, error) {
opts := url.Values{}
for _, o := range options {
o(&opts)
}
_, body, err := api.SignedCall("POST", "/auth/v2/enroll", opts, duoapi.UseTimeout)
if err != nil {
return nil, err
}
ret := &EnrollResult{}
if err = json.Unmarshal(body, ret); err != nil {
return nil, err
}
return ret, nil
}
// Response is "success", "invalid" or "waiting".
type EnrollStatusResult struct {
duoapi.StatResult
Response string
}
// Duo's EnrollStatus method. https://www.duosecurity.com/docs/authapi#/enroll_status
// Return the status of an outstanding Enrollment.
func (api *AuthApi) EnrollStatus(userid string,
activationCode string) (*EnrollStatusResult, error) {
queryArgs := url.Values{}
queryArgs.Set("user_id", userid)
queryArgs.Set("activation_code", activationCode)
_, body, err := api.SignedCall("POST",
"/auth/v2/enroll_status",
queryArgs,
duoapi.UseTimeout)
if err != nil {
return nil, err
}
ret := &EnrollStatusResult{}
if err = json.Unmarshal(body, ret); err != nil {
return nil, err
}
return ret, nil
}
// Preauth return type.
type PreauthResult struct {
duoapi.StatResult
Response struct {
Result string
Status_Msg string
Enroll_Portal_Url string
Devices []struct {
Device string
Type string
Name string
Number string
Capabilities []string
}
}
}
func PreauthUserId(userid string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("user_id", userid)
}
}
func PreauthUsername(username string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("username", username)
}
}
func PreauthIpAddr(ip string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("ipaddr", ip)
}
}
func PreauthTrustedToken(trustedtoken string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("trusted_device_token", trustedtoken)
}
}
// Duo's Preauth method. https://www.duosecurity.com/docs/authapi#/preauth
// options Optional values to include in the preauth call.
// Use PreauthUserId to specify the user_id parameter.
// Use PreauthUsername to specify the username parameter. You must
// specify PreauthUserId or PreauthUsername, but not both.
// Use PreauthIpAddr to include the ipaddr parameter, the ip address
// of the client attempting authroization.
// Use PreauthTrustedToken to specify the trusted_device_token parameter.
func (api *AuthApi) Preauth(options ...func(*url.Values)) (*PreauthResult, error) {
opts := url.Values{}
for _, o := range options {
o(&opts)
}
_, body, err := api.SignedCall("POST", "/auth/v2/preauth", opts, duoapi.UseTimeout)
if err != nil {
return nil, err
}
ret := &PreauthResult{}
if err = json.Unmarshal(body, ret); err != nil {
return nil, err
}
return ret, nil
}
func AuthUserId(userid string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("user_id", userid)
}
}
func AuthUsername(username string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("username", username)
}
}
func AuthIpAddr(ip string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("ipaddr", ip)
}
}
func AuthAsync() func(*url.Values) {
return func(opts *url.Values) {
opts.Set("async", "1")
}
}
func AuthDevice(device string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("device", device)
}
}
func AuthType(type_ string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("type", type_)
}
}
func AuthDisplayUsername(username string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("display_username", username)
}
}
func AuthPushinfo(pushinfo string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("pushinfo", pushinfo)
}
}
func AuthPasscode(passcode string) func(*url.Values) {
return func(opts *url.Values) {
opts.Set("passcode", passcode)
}
}
// Auth return type.
type AuthResult struct {
duoapi.StatResult
Response struct {
// Synchronous
Result string
Status string
Status_Msg string
Trusted_Device_Token string
// Asynchronous
Txid string
}
}
// Duo's Auth method. https://www.duosecurity.com/docs/authapi#/auth
// Factor must be one of 'auto', 'push', 'passcode', 'sms' or 'phone'.
// Use AuthUserId to specify the user_id.
// Use AuthUsername to speicy the username. You must specify either AuthUserId
// or AuthUsername, but not both.
// Use AuthIpAddr to include the client's IP address.
// Use AuthAsync to toggle whether the call blocks for the user's response or not.
// If used asynchronously, get the auth status with the AuthStatus method.
// When using factor 'push', use AuthDevice to specify the device ID to push to.
// When using factor 'push', use AuthType to display some extra auth text to the user.
// When using factor 'push', use AuthDisplayUsername to display some extra text
// to the user.
// When using factor 'push', use AuthPushInfo to include some URL-encoded key/value
// pairs to display to the user.
// When using factor 'passcode', use AuthPasscode to specify the passcode entered
// by the user.
// When using factor 'sms' or 'phone', use AuthDevice to specify which device
// should receive the SMS or phone call.
func (api *AuthApi) Auth(factor string, options ...func(*url.Values)) (*AuthResult, error) {
params := url.Values{}
for _, o := range options {
o(&params)
}
params.Set("factor", factor)
var apiOps []duoapi.DuoApiOption
if _, ok := params["async"]; ok == true {
apiOps = append(apiOps, duoapi.UseTimeout)
}
_, body, err := api.SignedCall("POST", "/auth/v2/auth", params, apiOps...)
if err != nil {
return nil, err
}
ret := &AuthResult{}
if err = json.Unmarshal(body, ret); err != nil {
return nil, err
}
return ret, nil
}
// AuthStatus return type.
type AuthStatusResult struct {
duoapi.StatResult
Response struct {
Result string
Status string
Status_Msg string
Trusted_Device_Token string
}
}
// Duo's auth_status method. https://www.duosecurity.com/docs/authapi#/auth_status
// When using the Auth call in async mode, use this method to retrieve the
// result of the authentication attempt.
// txid is returned by the Auth call.
func (api *AuthApi) AuthStatus(txid string) (*AuthStatusResult, error) {
opts := url.Values{}
opts.Set("txid", txid)
_, body, err := api.SignedCall("GET", "/auth/v2/auth_status", opts)
if err != nil {
return nil, err
}
ret := &AuthStatusResult{}
if err = json.Unmarshal(body, ret); err != nil {
return nil, err
}
return ret, nil
}

View file

@ -1,428 +0,0 @@
package duoapi
import (
"crypto/hmac"
"crypto/sha1"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/hex"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
"sort"
"strings"
"time"
)
const (
initialBackoffMS = 1000
maxBackoffMS = 32000
backoffFactor = 2
rateLimitHttpCode = 429
)
var spaceReplacer *strings.Replacer = strings.NewReplacer("+", "%20")
func canonParams(params url.Values) string {
// Values must be in sorted order
for key, val := range params {
sort.Strings(val)
params[key] = val
}
// Encode will place Keys in sorted order
ordered_params := params.Encode()
// Encoder turns spaces into +, but we need %XX escaping
return spaceReplacer.Replace(ordered_params)
}
func canonicalize(method string,
host string,
uri string,
params url.Values,
date string) string {
var canon [5]string
canon[0] = date
canon[1] = strings.ToUpper(method)
canon[2] = strings.ToLower(host)
canon[3] = uri
canon[4] = canonParams(params)
return strings.Join(canon[:], "\n")
}
func sign(ikey string,
skey string,
method string,
host string,
uri string,
date string,
params url.Values) string {
canon := canonicalize(method, host, uri, params, date)
mac := hmac.New(sha1.New, []byte(skey))
mac.Write([]byte(canon))
sig := hex.EncodeToString(mac.Sum(nil))
auth := ikey + ":" + sig
return "Basic " + base64.StdEncoding.EncodeToString([]byte(auth))
}
type DuoApi struct {
ikey string
skey string
host string
userAgent string
apiClient httpClient
authClient httpClient
sleepSvc sleepService
}
type httpClient interface {
Do(req *http.Request) (*http.Response, error)
}
type sleepService interface {
Sleep(duration time.Duration)
}
type timeSleepService struct{}
func (svc timeSleepService) Sleep(duration time.Duration) {
time.Sleep(duration + (time.Duration(rand.Intn(1000)) * time.Millisecond))
}
type apiOptions struct {
timeout time.Duration
insecure bool
proxy func(*http.Request) (*url.URL, error)
}
// Optional parameter for NewDuoApi, used to configure timeouts on API calls.
func SetTimeout(timeout time.Duration) func(*apiOptions) {
return func(opts *apiOptions) {
opts.timeout = timeout
return
}
}
// Optional parameter for testing only. Bypasses all TLS certificate validation.
func SetInsecure() func(*apiOptions) {
return func(opts *apiOptions) {
opts.insecure = true
}
}
// Optional parameter for NewDuoApi, used to configure an HTTP Connect proxy
// server for all outbound communications.
func SetProxy(proxy func(*http.Request) (*url.URL, error)) func(*apiOptions) {
return func(opts *apiOptions) {
opts.proxy = proxy
}
}
// Build an return a DuoApi struct.
// ikey is your Duo integration key
// skey is your Duo integration secret key
// host is your Duo host
// userAgent allows you to specify the user agent string used when making
// the web request to Duo.
// options are optional parameters. Use SetTimeout() to specify a timeout value
// for Rest API calls. Use SetProxy() to specify proxy settings for Duo API calls.
//
// Example: duoapi.NewDuoApi(ikey,skey,host,userAgent,duoapi.SetTimeout(10*time.Second))
func NewDuoApi(ikey string,
skey string,
host string,
userAgent string,
options ...func(*apiOptions)) *DuoApi {
opts := apiOptions{proxy: http.ProxyFromEnvironment}
for _, o := range options {
o(&opts)
}
// Certificate pinning
certPool := x509.NewCertPool()
certPool.AppendCertsFromPEM([]byte(duoPinnedCert))
tr := &http.Transport{
Proxy: opts.proxy,
TLSClientConfig: &tls.Config{
RootCAs: certPool,
InsecureSkipVerify: opts.insecure,
},
}
return &DuoApi{
ikey: ikey,
skey: skey,
host: host,
userAgent: userAgent,
apiClient: &http.Client{
Timeout: opts.timeout,
Transport: tr,
},
authClient: &http.Client{
Transport: tr,
},
sleepSvc: timeSleepService{},
}
}
type requestOptions struct {
timeout bool
}
type DuoApiOption func(*requestOptions)
// Pass to Request or SignedRequest to configure a timeout on the request
func UseTimeout(opts *requestOptions) {
opts.timeout = true
}
func (duoapi *DuoApi) buildOptions(options ...DuoApiOption) *requestOptions {
opts := &requestOptions{}
for _, o := range options {
o(opts)
}
return opts
}
// API calls will return a StatResult object. On success, Stat is 'OK'.
// On error, Stat is 'FAIL', and Code, Message, and Message_Detail
// contain error information.
type StatResult struct {
Stat string
Code *int32
Message *string
Message_Detail *string
}
// Make an unsigned Duo Rest API call. See Duo's online documentation
// for the available REST API's.
// method is POST or GET
// uri is the URI of the Duo Rest call
// params HTTP query parameters to include in the call.
// options Optional parameters. Use UseTimeout to toggle whether the
// Duo Rest API call should timeout or not.
//
// Example: duo.Call("GET", "/auth/v2/ping", nil, duoapi.UseTimeout)
func (duoapi *DuoApi) Call(method string,
uri string,
params url.Values,
options ...DuoApiOption) (*http.Response, []byte, error) {
url := url.URL{
Scheme: "https",
Host: duoapi.host,
Path: uri,
RawQuery: params.Encode(),
}
return duoapi.makeRetryableHttpCall(method, url, nil, nil, options...)
}
// Make a signed Duo Rest API call. See Duo's online documentation
// for the available REST API's.
// method is POST or GET
// uri is the URI of the Duo Rest call
// params HTTP query parameters to include in the call.
// options Optional parameters. Use UseTimeout to toggle whether the
// Duo Rest API call should timeout or not.
//
// Example: duo.SignedCall("GET", "/auth/v2/check", nil, duoapi.UseTimeout)
func (duoapi *DuoApi) SignedCall(method string,
uri string,
params url.Values,
options ...DuoApiOption) (*http.Response, []byte, error) {
now := time.Now().UTC().Format(time.RFC1123Z)
auth_sig := sign(duoapi.ikey, duoapi.skey, method, duoapi.host, uri, now, params)
url := url.URL{
Scheme: "https",
Host: duoapi.host,
Path: uri,
}
method = strings.ToUpper(method)
if method == "GET" {
url.RawQuery = params.Encode()
}
headers := make(map[string]string)
headers["Authorization"] = auth_sig
headers["Date"] = now
var requestBody io.ReadCloser = nil
if method == "POST" || method == "PUT" {
headers["Content-Type"] = "application/x-www-form-urlencoded"
requestBody = ioutil.NopCloser(strings.NewReader(params.Encode()))
}
return duoapi.makeRetryableHttpCall(method, url, headers, requestBody, options...)
}
func (duoapi *DuoApi) makeRetryableHttpCall(
method string,
url url.URL,
headers map[string]string,
body io.ReadCloser,
options ...DuoApiOption) (*http.Response, []byte, error) {
opts := duoapi.buildOptions(options...)
client := duoapi.authClient
if opts.timeout {
client = duoapi.apiClient
}
backoffMs := initialBackoffMS
for {
request, err := http.NewRequest(method, url.String(), nil)
if err != nil {
return nil, nil, err
}
if headers != nil {
for k, v := range headers {
request.Header.Set(k, v)
}
}
if body != nil {
request.Body = body
}
resp, err := client.Do(request)
var body []byte
if err != nil {
return resp, body, err
}
if backoffMs > maxBackoffMS || resp.StatusCode != rateLimitHttpCode {
body, err = ioutil.ReadAll(resp.Body)
resp.Body.Close()
return resp, body, err
}
duoapi.sleepSvc.Sleep(time.Millisecond * time.Duration(backoffMs))
backoffMs *= backoffFactor
}
}
const duoPinnedCert string = `
subject= /C=US/O=DigiCert Inc/OU=www.digicert.com/CN=DigiCert Assured ID Root CA
-----BEGIN CERTIFICATE-----
MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl
MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv
b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG
EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl
cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c
JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP
mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+
wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4
VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/
AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB
AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun
pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC
dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf
fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm
NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx
H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe
+o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g==
-----END CERTIFICATE-----
subject= /C=US/O=DigiCert Inc/OU=www.digicert.com/CN=DigiCert Global Root CA
-----BEGIN CERTIFICATE-----
MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh
MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD
QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT
MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG
9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB
CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97
nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt
43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P
T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4
gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO
BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR
TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw
DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr
hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg
06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF
PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls
YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk
CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4=
-----END CERTIFICATE-----
subject= /C=US/O=DigiCert Inc/OU=www.digicert.com/CN=DigiCert High Assurance EV Root CA
-----BEGIN CERTIFICATE-----
MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs
MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j
ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL
MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3
LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug
RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm
+9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW
PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM
xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB
Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3
hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg
EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF
MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA
FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec
nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z
eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF
hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2
Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe
vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep
+OkuE6N36B9K
-----END CERTIFICATE-----
subject= /C=US/O=SecureTrust Corporation/CN=SecureTrust CA
-----BEGIN CERTIFICATE-----
MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI
MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x
FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz
MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv
cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz
Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO
0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao
wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj
7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS
8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT
BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB
/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg
JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC
NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3
6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/
3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm
D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS
CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR
3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE=
-----END CERTIFICATE-----
subject= /C=US/O=SecureTrust Corporation/CN=Secure Global CA
-----BEGIN CERTIFICATE-----
MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK
MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x
GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx
MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg
Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG
SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ
iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa
/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ
jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI
HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7
sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w
gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF
MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw
KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG
AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L
URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO
H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm
I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY
iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc
f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW
-----END CERTIFICATE-----`

View file

@ -1 +0,0 @@
module github.com/duosecurity/duo_api_golang

View file

@ -1 +0,0 @@
* text eol=lf

View file

@ -1,42 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
.idea
.vscode
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
.project
EBNF.txt
test1.tpl
pongo2_internal_test.go
tpl-error.out
/count.out
/cover.out
*.swp
*.iml
/cpu.out
/mem.out
/pongo2.test
*.error
/profile
/coverage.out
/pongo2_internal_test.ignore
go.sum

View file

@ -1,8 +0,0 @@
language: go
os:
- linux
- osx
go:
- 1.12
script:
- go test -v

View file

@ -1,11 +0,0 @@
Main author and maintainer of pongo2:
* Florian Schlachter <flori@n-schlachter.de>
Contributors (in no specific order):
* @romanoaugusto88
* @vitalbh
* @blaubaer
Feel free to add yourself to the list or to modify your entry if you did a contribution.

View file

@ -1,20 +0,0 @@
The MIT License (MIT)
Copyright (c) 2013-2014 Florian Schlachter
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -1,273 +0,0 @@
# [pongo](https://en.wikipedia.org/wiki/Pongo_%28genus%29)2
[![Join the chat at https://gitter.im/flosch/pongo2](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/flosch/pongo2)
[![GoDoc](https://godoc.org/github.com/flosch/pongo2?status.svg)](https://godoc.org/github.com/flosch/pongo2)
[![Build Status](https://travis-ci.org/flosch/pongo2.svg?branch=master)](https://travis-ci.org/flosch/pongo2)
[![Backers on Open Collective](https://opencollective.com/pongo2/backers/badge.svg)](#backers)
[![Sponsors on Open Collective](https://opencollective.com/pongo2/sponsors/badge.svg)](#sponsors)
pongo2 is the successor of [pongo](https://github.com/flosch/pongo), a Django-syntax like templating-language.
Install/update using `go get` (no dependencies required by pongo2):
```
go get -u github.com/flosch/pongo2
```
Please use the [issue tracker](https://github.com/flosch/pongo2/issues) if you're encountering any problems with pongo2 or if you need help with implementing tags or filters ([create a ticket!](https://github.com/flosch/pongo2/issues/new)).
## First impression of a template
```HTML+Django
<html><head><title>Our admins and users</title></head>
{# This is a short example to give you a quick overview of pongo2's syntax. #}
{% macro user_details(user, is_admin=false) %}
<div class="user_item">
<!-- Let's indicate a user's good karma -->
<h2 {% if (user.karma >= 40) || (user.karma > calc_avg_karma(userlist)+5) %}
class="karma-good"{% endif %}>
<!-- This will call user.String() automatically if available: -->
{{ user }}
</h2>
<!-- Will print a human-readable time duration like "3 weeks ago" -->
<p>This user registered {{ user.register_date|naturaltime }}.</p>
<!-- Let's allow the users to write down their biography using markdown;
we will only show the first 15 words as a preview -->
<p>The user's biography:</p>
<p>{{ user.biography|markdown|truncatewords_html:15 }}
<a href="/user/{{ user.id }}/">read more</a></p>
{% if is_admin %}<p>This user is an admin!</p>{% endif %}
</div>
{% endmacro %}
<body>
<!-- Make use of the macro defined above to avoid repetitive HTML code
since we want to use the same code for admins AND members -->
<h1>Our admins</h1>
{% for admin in adminlist %}
{{ user_details(admin, true) }}
{% endfor %}
<h1>Our members</h1>
{% for user in userlist %}
{{ user_details(user) }}
{% endfor %}
</body>
</html>
```
## Development status
**Latest stable release**: v3.0 (`go get -u gopkg.in/flosch/pongo2.v3` / [`v3`](https://github.com/flosch/pongo2/tree/v3)-branch)
**Current development**: v4 (`master`-branch)
*Note*: With the release of pongo v4 the branch v2 will be deprecated.
**Deprecated versions** (not supported anymore): v1
| Topic | Status |
| ------------------------------------ | -------------------------------------------------------------------------------------- |
| Django version compatibility: | [1.7](https://docs.djangoproject.com/en/1.7/ref/templates/builtins/) |
| *Missing* (planned) **filters**: | none ([hints](https://github.com/flosch/pongo2/blob/master/filters_builtin.go#L3)) |
| *Missing* (planned) **tags**: | none ([hints](https://github.com/flosch/pongo2/blob/master/tags.go#L3)) |
Please also have a look on the [caveats](https://github.com/flosch/pongo2#caveats) and on the [official add-ons](https://github.com/flosch/pongo2#official).
## Features (and new in pongo2)
* Entirely rewritten from the ground-up.
* [Advanced C-like expressions](https://github.com/flosch/pongo2/blob/master/template_tests/expressions.tpl).
* [Complex function calls within expressions](https://github.com/flosch/pongo2/blob/master/template_tests/function_calls_wrapper.tpl).
* [Easy API to create new filters and tags](http://godoc.org/github.com/flosch/pongo2#RegisterFilter) ([including parsing arguments](http://godoc.org/github.com/flosch/pongo2#Parser))
* Additional features:
* Macros including importing macros from other files (see [template_tests/macro.tpl](https://github.com/flosch/pongo2/blob/master/template_tests/macro.tpl))
* [Template sandboxing](https://godoc.org/github.com/flosch/pongo2#TemplateSet) ([directory patterns](http://golang.org/pkg/path/filepath/#Match), banned tags/filters)
## Recent API changes within pongo2
If you're using the `master`-branch of pongo2, you might be interested in this section. Since pongo2 is still in development (even though there is a first stable release!), there could be (backwards-incompatible) API changes over time. To keep track of these and therefore make it painless for you to adapt your codebase, I'll list them here.
* Function signature for tag execution changed: not taking a `bytes.Buffer` anymore; instead `Execute()`-functions are now taking a `TemplateWriter` interface.
* Function signature for tag and filter parsing/execution changed (`error` return type changed to `*Error`).
* `INodeEvaluator` has been removed and got replaced by `IEvaluator`. You can change your existing tags/filters by simply replacing the interface.
* Two new helper functions: [`RenderTemplateFile()`](https://godoc.org/github.com/flosch/pongo2#RenderTemplateFile) and [`RenderTemplateString()`](https://godoc.org/github.com/flosch/pongo2#RenderTemplateString).
* `Template.ExecuteRW()` is now [`Template.ExecuteWriter()`](https://godoc.org/github.com/flosch/pongo2#Template.ExecuteWriter)
* `Template.Execute*()` functions do now take a `pongo2.Context` directly (no pointer anymore).
## How you can help
* Write [filters](https://github.com/flosch/pongo2/blob/master/filters_builtin.go#L3) / [tags] by forking pongo2 and sending pull requests
* Write/improve code tests (use the following command to see what tests are missing: `go test -v -cover -covermode=count -coverprofile=cover.out && go tool cover -html=cover.out` or have a look on [gocover.io/github.com/flosch/pongo2](http://gocover.io/github.com/flosch/pongo2))
* Write/improve template tests (see the `template_tests/` directory)
* Write middleware, libraries and websites using pongo2. :-)
# Documentation
For a documentation on how the templating language works you can [head over to the Django documentation](https://docs.djangoproject.com/en/dev/topics/templates/). pongo2 aims to be compatible with it.
You can access pongo2's API documentation on [godoc](https://godoc.org/github.com/flosch/pongo2).
## Caveats
### Filters
* **date** / **time**: The `date` and `time` filter are taking the Golang specific time- and date-format (not Django's one) currently. [Take a look on the format here](http://golang.org/pkg/time/#Time.Format).
* **stringformat**: `stringformat` does **not** take Python's string format syntax as a parameter, instead it takes Go's. Essentially `{{ 3.14|stringformat:"pi is %.2f" }}` is `fmt.Sprintf("pi is %.2f", 3.14)`.
* **escape** / **force_escape**: Unlike Django's behaviour, the `escape`-filter is applied immediately. Therefore there is no need for a `force_escape`-filter yet.
### Tags
* **for**: All the `forloop` fields (like `forloop.counter`) are written with a capital letter at the beginning. For example, the `counter` can be accessed by `forloop.Counter` and the parentloop by `forloop.Parentloop`.
* **now**: takes Go's time format (see **date** and **time**-filter).
### Misc
* **not in-operator**: You can check whether a map/struct/string contains a key/field/substring by using the in-operator (or the negation of it):
`{% if key in map %}Key is in map{% else %}Key not in map{% endif %}` or `{% if !(key in map) %}Key is NOT in map{% else %}Key is in map{% endif %}`.
# Add-ons, libraries and helpers
## Official
* [ponginae](https://github.com/flosch/ponginae) - A web-framework for Go (using pongo2).
* [pongo2-tools](https://github.com/flosch/pongo2-tools) - Official tools and helpers for pongo2
* [pongo2-addons](https://github.com/flosch/pongo2-addons) - Official additional filters/tags for pongo2 (for example a **markdown**-filter). They are in their own repository because they're relying on 3rd-party-libraries.
## 3rd-party
* [beego-pongo2](https://github.com/oal/beego-pongo2) - A tiny little helper for using Pongo2 with [Beego](https://github.com/astaxie/beego).
* [beego-pongo2.v2](https://github.com/ipfans/beego-pongo2.v2) - Same as `beego-pongo2`, but for pongo2 v2.
* [macaron-pongo2](https://github.com/macaron-contrib/pongo2) - pongo2 support for [Macaron](https://github.com/Unknwon/macaron), a modular web framework.
* [ginpongo2](https://github.com/ngerakines/ginpongo2) - middleware for [gin](github.com/gin-gonic/gin) to use pongo2 templates
* [Build'n support for Iris' template engine](https://github.com/kataras/iris)
* [pongo2gin](https://gitlab.com/go-box/pongo2gin) - alternative renderer for [gin](github.com/gin-gonic/gin) to use pongo2 templates
* [pongo2-trans](https://github.com/digitalcrab/pongo2trans) - `trans`-tag implementation for internationalization
* [tpongo2](https://github.com/tango-contrib/tpongo2) - pongo2 support for [Tango](https://github.com/lunny/tango), a micro-kernel & pluggable web framework.
* [p2cli](https://github.com/wrouesnel/p2cli) - command line templating utility based on pongo2
Please add your project to this list and send me a pull request when you've developed something nice for pongo2.
# API-usage examples
Please see the documentation for a full list of provided API methods.
## A tiny example (template string)
```Go
// Compile the template first (i. e. creating the AST)
tpl, err := pongo2.FromString("Hello {{ name|capfirst }}!")
if err != nil {
panic(err)
}
// Now you can render the template with the given
// pongo2.Context how often you want to.
out, err := tpl.Execute(pongo2.Context{"name": "florian"})
if err != nil {
panic(err)
}
fmt.Println(out) // Output: Hello Florian!
```
## Example server-usage (template file)
```Go
package main
import (
"github.com/flosch/pongo2"
"net/http"
)
// Pre-compiling the templates at application startup using the
// little Must()-helper function (Must() will panic if FromFile()
// or FromString() will return with an error - that's it).
// It's faster to pre-compile it anywhere at startup and only
// execute the template later.
var tplExample = pongo2.Must(pongo2.FromFile("example.html"))
func examplePage(w http.ResponseWriter, r *http.Request) {
// Execute the template per HTTP request
err := tplExample.ExecuteWriter(pongo2.Context{"query": r.FormValue("query")}, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
func main() {
http.HandleFunc("/", examplePage)
http.ListenAndServe(":8080", nil)
}
```
# Benchmark
The benchmarks have been run on the my machine (`Intel(R) Core(TM) i7-2600 CPU @ 3.40GHz`) using the command:
go test -bench . -cpu 1,2,4,8
All benchmarks are compiling (depends on the benchmark) and executing the `template_tests/complex.tpl` template.
The results are:
BenchmarkExecuteComplexWithSandboxActive 50000 60450 ns/op
BenchmarkExecuteComplexWithSandboxActive-2 50000 56998 ns/op
BenchmarkExecuteComplexWithSandboxActive-4 50000 60343 ns/op
BenchmarkExecuteComplexWithSandboxActive-8 50000 64229 ns/op
BenchmarkCompileAndExecuteComplexWithSandboxActive 10000 164410 ns/op
BenchmarkCompileAndExecuteComplexWithSandboxActive-2 10000 156682 ns/op
BenchmarkCompileAndExecuteComplexWithSandboxActive-4 10000 164821 ns/op
BenchmarkCompileAndExecuteComplexWithSandboxActive-8 10000 171806 ns/op
BenchmarkParallelExecuteComplexWithSandboxActive 50000 60428 ns/op
BenchmarkParallelExecuteComplexWithSandboxActive-2 50000 31887 ns/op
BenchmarkParallelExecuteComplexWithSandboxActive-4 100000 22810 ns/op
BenchmarkParallelExecuteComplexWithSandboxActive-8 100000 18820 ns/op
BenchmarkExecuteComplexWithoutSandbox 50000 56942 ns/op
BenchmarkExecuteComplexWithoutSandbox-2 50000 56168 ns/op
BenchmarkExecuteComplexWithoutSandbox-4 50000 57838 ns/op
BenchmarkExecuteComplexWithoutSandbox-8 50000 60539 ns/op
BenchmarkCompileAndExecuteComplexWithoutSandbox 10000 162086 ns/op
BenchmarkCompileAndExecuteComplexWithoutSandbox-2 10000 159771 ns/op
BenchmarkCompileAndExecuteComplexWithoutSandbox-4 10000 163826 ns/op
BenchmarkCompileAndExecuteComplexWithoutSandbox-8 10000 169062 ns/op
BenchmarkParallelExecuteComplexWithoutSandbox 50000 57152 ns/op
BenchmarkParallelExecuteComplexWithoutSandbox-2 50000 30276 ns/op
BenchmarkParallelExecuteComplexWithoutSandbox-4 100000 22065 ns/op
BenchmarkParallelExecuteComplexWithoutSandbox-8 100000 18034 ns/op
Benchmarked on October 2nd 2014.
## Contributors
This project exists thanks to all the people who contribute.
<a href="https://github.com/flosch/pongo2/graphs/contributors"><img src="https://opencollective.com/pongo2/contributors.svg?width=890&button=false" /></a>
## Backers
Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/pongo2#backer)]
<a href="https://opencollective.com/pongo2#backers" target="_blank"><img src="https://opencollective.com/pongo2/backers.svg?width=890"></a>
## Sponsors
Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/pongo2#sponsor)]
<a href="https://opencollective.com/pongo2/sponsor/0/website" target="_blank"><img src="https://opencollective.com/pongo2/sponsor/0/avatar.svg"></a>
<a href="https://opencollective.com/pongo2/sponsor/1/website" target="_blank"><img src="https://opencollective.com/pongo2/sponsor/1/avatar.svg"></a>
<a href="https://opencollective.com/pongo2/sponsor/2/website" target="_blank"><img src="https://opencollective.com/pongo2/sponsor/2/avatar.svg"></a>
<a href="https://opencollective.com/pongo2/sponsor/3/website" target="_blank"><img src="https://opencollective.com/pongo2/sponsor/3/avatar.svg"></a>
<a href="https://opencollective.com/pongo2/sponsor/4/website" target="_blank"><img src="https://opencollective.com/pongo2/sponsor/4/avatar.svg"></a>
<a href="https://opencollective.com/pongo2/sponsor/5/website" target="_blank"><img src="https://opencollective.com/pongo2/sponsor/5/avatar.svg"></a>
<a href="https://opencollective.com/pongo2/sponsor/6/website" target="_blank"><img src="https://opencollective.com/pongo2/sponsor/6/avatar.svg"></a>
<a href="https://opencollective.com/pongo2/sponsor/7/website" target="_blank"><img src="https://opencollective.com/pongo2/sponsor/7/avatar.svg"></a>
<a href="https://opencollective.com/pongo2/sponsor/8/website" target="_blank"><img src="https://opencollective.com/pongo2/sponsor/8/avatar.svg"></a>
<a href="https://opencollective.com/pongo2/sponsor/9/website" target="_blank"><img src="https://opencollective.com/pongo2/sponsor/9/avatar.svg"></a>

View file

@ -1,136 +0,0 @@
package pongo2
import (
"regexp"
"github.com/juju/errors"
)
var reIdentifiers = regexp.MustCompile("^[a-zA-Z0-9_]+$")
var autoescape = true
func SetAutoescape(newValue bool) {
autoescape = newValue
}
// A Context type provides constants, variables, instances or functions to a template.
//
// pongo2 automatically provides meta-information or functions through the "pongo2"-key.
// Currently, context["pongo2"] contains the following keys:
// 1. version: returns the version string
//
// Template examples for accessing items from your context:
// {{ myconstant }}
// {{ myfunc("test", 42) }}
// {{ user.name }}
// {{ pongo2.version }}
type Context map[string]interface{}
func (c Context) checkForValidIdentifiers() *Error {
for k, v := range c {
if !reIdentifiers.MatchString(k) {
return &Error{
Sender: "checkForValidIdentifiers",
OrigError: errors.Errorf("context-key '%s' (value: '%+v') is not a valid identifier", k, v),
}
}
}
return nil
}
// Update updates this context with the key/value-pairs from another context.
func (c Context) Update(other Context) Context {
for k, v := range other {
c[k] = v
}
return c
}
// ExecutionContext contains all data important for the current rendering state.
//
// If you're writing a custom tag, your tag's Execute()-function will
// have access to the ExecutionContext. This struct stores anything
// about the current rendering process's Context including
// the Context provided by the user (field Public).
// You can safely use the Private context to provide data to the user's
// template (like a 'forloop'-information). The Shared-context is used
// to share data between tags. All ExecutionContexts share this context.
//
// Please be careful when accessing the Public data.
// PLEASE DO NOT MODIFY THE PUBLIC CONTEXT (read-only).
//
// To create your own execution context within tags, use the
// NewChildExecutionContext(parent) function.
type ExecutionContext struct {
template *Template
Autoescape bool
Public Context
Private Context
Shared Context
}
var pongo2MetaContext = Context{
"version": Version,
}
func newExecutionContext(tpl *Template, ctx Context) *ExecutionContext {
privateCtx := make(Context)
// Make the pongo2-related funcs/vars available to the context
privateCtx["pongo2"] = pongo2MetaContext
return &ExecutionContext{
template: tpl,
Public: ctx,
Private: privateCtx,
Autoescape: autoescape,
}
}
func NewChildExecutionContext(parent *ExecutionContext) *ExecutionContext {
newctx := &ExecutionContext{
template: parent.template,
Public: parent.Public,
Private: make(Context),
Autoescape: parent.Autoescape,
}
newctx.Shared = parent.Shared
// Copy all existing private items
newctx.Private.Update(parent.Private)
return newctx
}
func (ctx *ExecutionContext) Error(msg string, token *Token) *Error {
return ctx.OrigError(errors.New(msg), token)
}
func (ctx *ExecutionContext) OrigError(err error, token *Token) *Error {
filename := ctx.template.name
var line, col int
if token != nil {
// No tokens available
// TODO: Add location (from where?)
filename = token.Filename
line = token.Line
col = token.Col
}
return &Error{
Template: ctx.template,
Filename: filename,
Line: line,
Column: col,
Token: token,
Sender: "execution",
OrigError: err,
}
}
func (ctx *ExecutionContext) Logf(format string, args ...interface{}) {
ctx.template.set.logf(format, args...)
}

View file

@ -1,31 +0,0 @@
// A Django-syntax like template-engine
//
// Blog posts about pongo2 (including introduction and migration):
// https://www.florian-schlachter.de/?tag=pongo2
//
// Complete documentation on the template language:
// https://docs.djangoproject.com/en/dev/topics/templates/
//
// Try out pongo2 live in the pongo2 playground:
// https://www.florian-schlachter.de/pongo2/
//
// Make sure to read README.md in the repository as well.
//
// A tiny example with template strings:
//
// (Snippet on playground: https://www.florian-schlachter.de/pongo2/?id=1206546277)
//
// // Compile the template first (i. e. creating the AST)
// tpl, err := pongo2.FromString("Hello {{ name|capfirst }}!")
// if err != nil {
// panic(err)
// }
// // Now you can render the template with the given
// // pongo2.Context how often you want to.
// out, err := tpl.Execute(pongo2.Context{"name": "fred"})
// if err != nil {
// panic(err)
// }
// fmt.Println(out) // Output: Hello Fred!
//
package pongo2

View file

@ -1,91 +0,0 @@
package pongo2
import (
"bufio"
"fmt"
"os"
)
// The Error type is being used to address an error during lexing, parsing or
// execution. If you want to return an error object (for example in your own
// tag or filter) fill this object with as much information as you have.
// Make sure "Sender" is always given (if you're returning an error within
// a filter, make Sender equals 'filter:yourfilter'; same goes for tags: 'tag:mytag').
// It's okay if you only fill in ErrorMsg if you don't have any other details at hand.
type Error struct {
Template *Template
Filename string
Line int
Column int
Token *Token
Sender string
OrigError error
}
func (e *Error) updateFromTokenIfNeeded(template *Template, t *Token) *Error {
if e.Template == nil {
e.Template = template
}
if e.Token == nil {
e.Token = t
if e.Line <= 0 {
e.Line = t.Line
e.Column = t.Col
}
}
return e
}
// Returns a nice formatted error string.
func (e *Error) Error() string {
s := "[Error"
if e.Sender != "" {
s += " (where: " + e.Sender + ")"
}
if e.Filename != "" {
s += " in " + e.Filename
}
if e.Line > 0 {
s += fmt.Sprintf(" | Line %d Col %d", e.Line, e.Column)
if e.Token != nil {
s += fmt.Sprintf(" near '%s'", e.Token.Val)
}
}
s += "] "
s += e.OrigError.Error()
return s
}
// RawLine returns the affected line from the original template, if available.
func (e *Error) RawLine() (line string, available bool, outErr error) {
if e.Line <= 0 || e.Filename == "<string>" {
return "", false, nil
}
filename := e.Filename
if e.Template != nil {
filename = e.Template.set.resolveFilename(e.Template, e.Filename)
}
file, err := os.Open(filename)
if err != nil {
return "", false, err
}
defer func() {
err := file.Close()
if err != nil && outErr == nil {
outErr = err
}
}()
scanner := bufio.NewScanner(file)
l := 0
for scanner.Scan() {
l++
if l == e.Line {
return scanner.Text(), true, nil
}
}
return "", false, nil
}

View file

@ -1,143 +0,0 @@
package pongo2
import (
"fmt"
"github.com/juju/errors"
)
// FilterFunction is the type filter functions must fulfil
type FilterFunction func(in *Value, param *Value) (out *Value, err *Error)
var filters map[string]FilterFunction
func init() {
filters = make(map[string]FilterFunction)
}
// FilterExists returns true if the given filter is already registered
func FilterExists(name string) bool {
_, existing := filters[name]
return existing
}
// RegisterFilter registers a new filter. If there's already a filter with the same
// name, RegisterFilter will panic. You usually want to call this
// function in the filter's init() function:
// http://golang.org/doc/effective_go.html#init
//
// See http://www.florian-schlachter.de/post/pongo2/ for more about
// writing filters and tags.
func RegisterFilter(name string, fn FilterFunction) error {
if FilterExists(name) {
return errors.Errorf("filter with name '%s' is already registered", name)
}
filters[name] = fn
return nil
}
// ReplaceFilter replaces an already registered filter with a new implementation. Use this
// function with caution since it allows you to change existing filter behaviour.
func ReplaceFilter(name string, fn FilterFunction) error {
if !FilterExists(name) {
return errors.Errorf("filter with name '%s' does not exist (therefore cannot be overridden)", name)
}
filters[name] = fn
return nil
}
// MustApplyFilter behaves like ApplyFilter, but panics on an error.
func MustApplyFilter(name string, value *Value, param *Value) *Value {
val, err := ApplyFilter(name, value, param)
if err != nil {
panic(err)
}
return val
}
// ApplyFilter applies a filter to a given value using the given parameters.
// Returns a *pongo2.Value or an error.
func ApplyFilter(name string, value *Value, param *Value) (*Value, *Error) {
fn, existing := filters[name]
if !existing {
return nil, &Error{
Sender: "applyfilter",
OrigError: errors.Errorf("Filter with name '%s' not found.", name),
}
}
// Make sure param is a *Value
if param == nil {
param = AsValue(nil)
}
return fn(value, param)
}
type filterCall struct {
token *Token
name string
parameter IEvaluator
filterFunc FilterFunction
}
func (fc *filterCall) Execute(v *Value, ctx *ExecutionContext) (*Value, *Error) {
var param *Value
var err *Error
if fc.parameter != nil {
param, err = fc.parameter.Evaluate(ctx)
if err != nil {
return nil, err
}
} else {
param = AsValue(nil)
}
filteredValue, err := fc.filterFunc(v, param)
if err != nil {
return nil, err.updateFromTokenIfNeeded(ctx.template, fc.token)
}
return filteredValue, nil
}
// Filter = IDENT | IDENT ":" FilterArg | IDENT "|" Filter
func (p *Parser) parseFilter() (*filterCall, *Error) {
identToken := p.MatchType(TokenIdentifier)
// Check filter ident
if identToken == nil {
return nil, p.Error("Filter name must be an identifier.", nil)
}
filter := &filterCall{
token: identToken,
name: identToken.Val,
}
// Get the appropriate filter function and bind it
filterFn, exists := filters[identToken.Val]
if !exists {
return nil, p.Error(fmt.Sprintf("Filter '%s' does not exist.", identToken.Val), identToken)
}
filter.filterFunc = filterFn
// Check for filter-argument (2 tokens needed: ':' ARG)
if p.Match(TokenSymbol, ":") != nil {
if p.Peek(TokenSymbol, "}}") != nil {
return nil, p.Error("Filter parameter required after ':'.", nil)
}
// Get filter argument expression
v, err := p.parseVariableOrLiteral()
if err != nil {
return nil, err
}
filter.parameter = v
}
return filter, nil
}

View file

@ -1,927 +0,0 @@
package pongo2
/* Filters that are provided through github.com/flosch/pongo2-addons:
------------------------------------------------------------------
filesizeformat
slugify
timesince
timeuntil
Filters that won't be added:
----------------------------
get_static_prefix (reason: web-framework specific)
pprint (reason: python-specific)
static (reason: web-framework specific)
Reconsideration (not implemented yet):
--------------------------------------
force_escape (reason: not yet needed since this is the behaviour of pongo2's escape filter)
safeseq (reason: same reason as `force_escape`)
unordered_list (python-specific; not sure whether needed or not)
dictsort (python-specific; maybe one could add a filter to sort a list of structs by a specific field name)
dictsortreversed (see dictsort)
*/
import (
"bytes"
"fmt"
"math/rand"
"net/url"
"regexp"
"strconv"
"strings"
"time"
"unicode/utf8"
"github.com/juju/errors"
)
func init() {
rand.Seed(time.Now().Unix())
RegisterFilter("escape", filterEscape)
RegisterFilter("safe", filterSafe)
RegisterFilter("escapejs", filterEscapejs)
RegisterFilter("add", filterAdd)
RegisterFilter("addslashes", filterAddslashes)
RegisterFilter("capfirst", filterCapfirst)
RegisterFilter("center", filterCenter)
RegisterFilter("cut", filterCut)
RegisterFilter("date", filterDate)
RegisterFilter("default", filterDefault)
RegisterFilter("default_if_none", filterDefaultIfNone)
RegisterFilter("divisibleby", filterDivisibleby)
RegisterFilter("first", filterFirst)
RegisterFilter("floatformat", filterFloatformat)
RegisterFilter("get_digit", filterGetdigit)
RegisterFilter("iriencode", filterIriencode)
RegisterFilter("join", filterJoin)
RegisterFilter("last", filterLast)
RegisterFilter("length", filterLength)
RegisterFilter("length_is", filterLengthis)
RegisterFilter("linebreaks", filterLinebreaks)
RegisterFilter("linebreaksbr", filterLinebreaksbr)
RegisterFilter("linenumbers", filterLinenumbers)
RegisterFilter("ljust", filterLjust)
RegisterFilter("lower", filterLower)
RegisterFilter("make_list", filterMakelist)
RegisterFilter("phone2numeric", filterPhone2numeric)
RegisterFilter("pluralize", filterPluralize)
RegisterFilter("random", filterRandom)
RegisterFilter("removetags", filterRemovetags)
RegisterFilter("rjust", filterRjust)
RegisterFilter("slice", filterSlice)
RegisterFilter("split", filterSplit)
RegisterFilter("stringformat", filterStringformat)
RegisterFilter("striptags", filterStriptags)
RegisterFilter("time", filterDate) // time uses filterDate (same golang-format)
RegisterFilter("title", filterTitle)
RegisterFilter("truncatechars", filterTruncatechars)
RegisterFilter("truncatechars_html", filterTruncatecharsHTML)
RegisterFilter("truncatewords", filterTruncatewords)
RegisterFilter("truncatewords_html", filterTruncatewordsHTML)
RegisterFilter("upper", filterUpper)
RegisterFilter("urlencode", filterUrlencode)
RegisterFilter("urlize", filterUrlize)
RegisterFilter("urlizetrunc", filterUrlizetrunc)
RegisterFilter("wordcount", filterWordcount)
RegisterFilter("wordwrap", filterWordwrap)
RegisterFilter("yesno", filterYesno)
RegisterFilter("float", filterFloat) // pongo-specific
RegisterFilter("integer", filterInteger) // pongo-specific
}
func filterTruncatecharsHelper(s string, newLen int) string {
runes := []rune(s)
if newLen < len(runes) {
if newLen >= 3 {
return fmt.Sprintf("%s...", string(runes[:newLen-3]))
}
// Not enough space for the ellipsis
return string(runes[:newLen])
}
return string(runes)
}
func filterTruncateHTMLHelper(value string, newOutput *bytes.Buffer, cond func() bool, fn func(c rune, s int, idx int) int, finalize func()) {
vLen := len(value)
var tagStack []string
idx := 0
for idx < vLen && !cond() {
c, s := utf8.DecodeRuneInString(value[idx:])
if c == utf8.RuneError {
idx += s
continue
}
if c == '<' {
newOutput.WriteRune(c)
idx += s // consume "<"
if idx+1 < vLen {
if value[idx] == '/' {
// Close tag
newOutput.WriteString("/")
tag := ""
idx++ // consume "/"
for idx < vLen {
c2, size2 := utf8.DecodeRuneInString(value[idx:])
if c2 == utf8.RuneError {
idx += size2
continue
}
// End of tag found
if c2 == '>' {
idx++ // consume ">"
break
}
tag += string(c2)
idx += size2
}
if len(tagStack) > 0 {
// Ideally, the close tag is TOP of tag stack
// In malformed HTML, it must not be, so iterate through the stack and remove the tag
for i := len(tagStack) - 1; i >= 0; i-- {
if tagStack[i] == tag {
// Found the tag
tagStack[i] = tagStack[len(tagStack)-1]
tagStack = tagStack[:len(tagStack)-1]
break
}
}
}
newOutput.WriteString(tag)
newOutput.WriteString(">")
} else {
// Open tag
tag := ""
params := false
for idx < vLen {
c2, size2 := utf8.DecodeRuneInString(value[idx:])
if c2 == utf8.RuneError {
idx += size2
continue
}
newOutput.WriteRune(c2)
// End of tag found
if c2 == '>' {
idx++ // consume ">"
break
}
if !params {
if c2 == ' ' {
params = true
} else {
tag += string(c2)
}
}
idx += size2
}
// Add tag to stack
tagStack = append(tagStack, tag)
}
}
} else {
idx = fn(c, s, idx)
}
}
finalize()
for i := len(tagStack) - 1; i >= 0; i-- {
tag := tagStack[i]
// Close everything from the regular tag stack
newOutput.WriteString(fmt.Sprintf("</%s>", tag))
}
}
func filterTruncatechars(in *Value, param *Value) (*Value, *Error) {
s := in.String()
newLen := param.Integer()
return AsValue(filterTruncatecharsHelper(s, newLen)), nil
}
func filterTruncatecharsHTML(in *Value, param *Value) (*Value, *Error) {
value := in.String()
newLen := max(param.Integer()-3, 0)
newOutput := bytes.NewBuffer(nil)
textcounter := 0
filterTruncateHTMLHelper(value, newOutput, func() bool {
return textcounter >= newLen
}, func(c rune, s int, idx int) int {
textcounter++
newOutput.WriteRune(c)
return idx + s
}, func() {
if textcounter >= newLen && textcounter < len(value) {
newOutput.WriteString("...")
}
})
return AsSafeValue(newOutput.String()), nil
}
func filterTruncatewords(in *Value, param *Value) (*Value, *Error) {
words := strings.Fields(in.String())
n := param.Integer()
if n <= 0 {
return AsValue(""), nil
}
nlen := min(len(words), n)
out := make([]string, 0, nlen)
for i := 0; i < nlen; i++ {
out = append(out, words[i])
}
if n < len(words) {
out = append(out, "...")
}
return AsValue(strings.Join(out, " ")), nil
}
func filterTruncatewordsHTML(in *Value, param *Value) (*Value, *Error) {
value := in.String()
newLen := max(param.Integer(), 0)
newOutput := bytes.NewBuffer(nil)
wordcounter := 0
filterTruncateHTMLHelper(value, newOutput, func() bool {
return wordcounter >= newLen
}, func(_ rune, _ int, idx int) int {
// Get next word
wordFound := false
for idx < len(value) {
c2, size2 := utf8.DecodeRuneInString(value[idx:])
if c2 == utf8.RuneError {
idx += size2
continue
}
if c2 == '<' {
// HTML tag start, don't consume it
return idx
}
newOutput.WriteRune(c2)
idx += size2
if c2 == ' ' || c2 == '.' || c2 == ',' || c2 == ';' {
// Word ends here, stop capturing it now
break
} else {
wordFound = true
}
}
if wordFound {
wordcounter++
}
return idx
}, func() {
if wordcounter >= newLen {
newOutput.WriteString("...")
}
})
return AsSafeValue(newOutput.String()), nil
}
func filterEscape(in *Value, param *Value) (*Value, *Error) {
output := strings.Replace(in.String(), "&", "&amp;", -1)
output = strings.Replace(output, ">", "&gt;", -1)
output = strings.Replace(output, "<", "&lt;", -1)
output = strings.Replace(output, "\"", "&quot;", -1)
output = strings.Replace(output, "'", "&#39;", -1)
return AsValue(output), nil
}
func filterSafe(in *Value, param *Value) (*Value, *Error) {
return in, nil // nothing to do here, just to keep track of the safe application
}
func filterEscapejs(in *Value, param *Value) (*Value, *Error) {
sin := in.String()
var b bytes.Buffer
idx := 0
for idx < len(sin) {
c, size := utf8.DecodeRuneInString(sin[idx:])
if c == utf8.RuneError {
idx += size
continue
}
if c == '\\' {
// Escape seq?
if idx+1 < len(sin) {
switch sin[idx+1] {
case 'r':
b.WriteString(fmt.Sprintf(`\u%04X`, '\r'))
idx += 2
continue
case 'n':
b.WriteString(fmt.Sprintf(`\u%04X`, '\n'))
idx += 2
continue
/*case '\'':
b.WriteString(fmt.Sprintf(`\u%04X`, '\''))
idx += 2
continue
case '"':
b.WriteString(fmt.Sprintf(`\u%04X`, '"'))
idx += 2
continue*/
}
}
}
if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == ' ' || c == '/' {
b.WriteRune(c)
} else {
b.WriteString(fmt.Sprintf(`\u%04X`, c))
}
idx += size
}
return AsValue(b.String()), nil
}
func filterAdd(in *Value, param *Value) (*Value, *Error) {
if in.IsNumber() && param.IsNumber() {
if in.IsFloat() || param.IsFloat() {
return AsValue(in.Float() + param.Float()), nil
}
return AsValue(in.Integer() + param.Integer()), nil
}
// If in/param is not a number, we're relying on the
// Value's String() conversion and just add them both together
return AsValue(in.String() + param.String()), nil
}
func filterAddslashes(in *Value, param *Value) (*Value, *Error) {
output := strings.Replace(in.String(), "\\", "\\\\", -1)
output = strings.Replace(output, "\"", "\\\"", -1)
output = strings.Replace(output, "'", "\\'", -1)
return AsValue(output), nil
}
func filterCut(in *Value, param *Value) (*Value, *Error) {
return AsValue(strings.Replace(in.String(), param.String(), "", -1)), nil
}
func filterLength(in *Value, param *Value) (*Value, *Error) {
return AsValue(in.Len()), nil
}
func filterLengthis(in *Value, param *Value) (*Value, *Error) {
return AsValue(in.Len() == param.Integer()), nil
}
func filterDefault(in *Value, param *Value) (*Value, *Error) {
if !in.IsTrue() {
return param, nil
}
return in, nil
}
func filterDefaultIfNone(in *Value, param *Value) (*Value, *Error) {
if in.IsNil() {
return param, nil
}
return in, nil
}
func filterDivisibleby(in *Value, param *Value) (*Value, *Error) {
if param.Integer() == 0 {
return AsValue(false), nil
}
return AsValue(in.Integer()%param.Integer() == 0), nil
}
func filterFirst(in *Value, param *Value) (*Value, *Error) {
if in.CanSlice() && in.Len() > 0 {
return in.Index(0), nil
}
return AsValue(""), nil
}
func filterFloatformat(in *Value, param *Value) (*Value, *Error) {
val := in.Float()
decimals := -1
if !param.IsNil() {
// Any argument provided?
decimals = param.Integer()
}
// if the argument is not a number (e. g. empty), the default
// behaviour is trim the result
trim := !param.IsNumber()
if decimals <= 0 {
// argument is negative or zero, so we
// want the output being trimmed
decimals = -decimals
trim = true
}
if trim {
// Remove zeroes
if float64(int(val)) == val {
return AsValue(in.Integer()), nil
}
}
return AsValue(strconv.FormatFloat(val, 'f', decimals, 64)), nil
}
func filterGetdigit(in *Value, param *Value) (*Value, *Error) {
i := param.Integer()
l := len(in.String()) // do NOT use in.Len() here!
if i <= 0 || i > l {
return in, nil
}
return AsValue(in.String()[l-i] - 48), nil
}
const filterIRIChars = "/#%[]=:;$&()+,!?*@'~"
func filterIriencode(in *Value, param *Value) (*Value, *Error) {
var b bytes.Buffer
sin := in.String()
for _, r := range sin {
if strings.IndexRune(filterIRIChars, r) >= 0 {
b.WriteRune(r)
} else {
b.WriteString(url.QueryEscape(string(r)))
}
}
return AsValue(b.String()), nil
}
func filterJoin(in *Value, param *Value) (*Value, *Error) {
if !in.CanSlice() {
return in, nil
}
sep := param.String()
sl := make([]string, 0, in.Len())
for i := 0; i < in.Len(); i++ {
sl = append(sl, in.Index(i).String())
}
return AsValue(strings.Join(sl, sep)), nil
}
func filterLast(in *Value, param *Value) (*Value, *Error) {
if in.CanSlice() && in.Len() > 0 {
return in.Index(in.Len() - 1), nil
}
return AsValue(""), nil
}
func filterUpper(in *Value, param *Value) (*Value, *Error) {
return AsValue(strings.ToUpper(in.String())), nil
}
func filterLower(in *Value, param *Value) (*Value, *Error) {
return AsValue(strings.ToLower(in.String())), nil
}
func filterMakelist(in *Value, param *Value) (*Value, *Error) {
s := in.String()
result := make([]string, 0, len(s))
for _, c := range s {
result = append(result, string(c))
}
return AsValue(result), nil
}
func filterCapfirst(in *Value, param *Value) (*Value, *Error) {
if in.Len() <= 0 {
return AsValue(""), nil
}
t := in.String()
r, size := utf8.DecodeRuneInString(t)
return AsValue(strings.ToUpper(string(r)) + t[size:]), nil
}
func filterCenter(in *Value, param *Value) (*Value, *Error) {
width := param.Integer()
slen := in.Len()
if width <= slen {
return in, nil
}
spaces := width - slen
left := spaces/2 + spaces%2
right := spaces / 2
return AsValue(fmt.Sprintf("%s%s%s", strings.Repeat(" ", left),
in.String(), strings.Repeat(" ", right))), nil
}
func filterDate(in *Value, param *Value) (*Value, *Error) {
t, isTime := in.Interface().(time.Time)
if !isTime {
return nil, &Error{
Sender: "filter:date",
OrigError: errors.New("filter input argument must be of type 'time.Time'"),
}
}
return AsValue(t.Format(param.String())), nil
}
func filterFloat(in *Value, param *Value) (*Value, *Error) {
return AsValue(in.Float()), nil
}
func filterInteger(in *Value, param *Value) (*Value, *Error) {
return AsValue(in.Integer()), nil
}
func filterLinebreaks(in *Value, param *Value) (*Value, *Error) {
if in.Len() == 0 {
return in, nil
}
var b bytes.Buffer
// Newline = <br />
// Double newline = <p>...</p>
lines := strings.Split(in.String(), "\n")
lenlines := len(lines)
opened := false
for idx, line := range lines {
if !opened {
b.WriteString("<p>")
opened = true
}
b.WriteString(line)
if idx < lenlines-1 && strings.TrimSpace(lines[idx]) != "" {
// We've not reached the end
if strings.TrimSpace(lines[idx+1]) == "" {
// Next line is empty
if opened {
b.WriteString("</p>")
opened = false
}
} else {
b.WriteString("<br />")
}
}
}
if opened {
b.WriteString("</p>")
}
return AsValue(b.String()), nil
}
func filterSplit(in *Value, param *Value) (*Value, *Error) {
chunks := strings.Split(in.String(), param.String())
return AsValue(chunks), nil
}
func filterLinebreaksbr(in *Value, param *Value) (*Value, *Error) {
return AsValue(strings.Replace(in.String(), "\n", "<br />", -1)), nil
}
func filterLinenumbers(in *Value, param *Value) (*Value, *Error) {
lines := strings.Split(in.String(), "\n")
output := make([]string, 0, len(lines))
for idx, line := range lines {
output = append(output, fmt.Sprintf("%d. %s", idx+1, line))
}
return AsValue(strings.Join(output, "\n")), nil
}
func filterLjust(in *Value, param *Value) (*Value, *Error) {
times := param.Integer() - in.Len()
if times < 0 {
times = 0
}
return AsValue(fmt.Sprintf("%s%s", in.String(), strings.Repeat(" ", times))), nil
}
func filterUrlencode(in *Value, param *Value) (*Value, *Error) {
return AsValue(url.QueryEscape(in.String())), nil
}
// TODO: This regexp could do some work
var filterUrlizeURLRegexp = regexp.MustCompile(`((((http|https)://)|www\.|((^|[ ])[0-9A-Za-z_\-]+(\.com|\.net|\.org|\.info|\.biz|\.de))))(?U:.*)([ ]+|$)`)
var filterUrlizeEmailRegexp = regexp.MustCompile(`(\w+@\w+\.\w{2,4})`)
func filterUrlizeHelper(input string, autoescape bool, trunc int) (string, error) {
var soutErr error
sout := filterUrlizeURLRegexp.ReplaceAllStringFunc(input, func(raw_url string) string {
var prefix string
var suffix string
if strings.HasPrefix(raw_url, " ") {
prefix = " "
}
if strings.HasSuffix(raw_url, " ") {
suffix = " "
}
raw_url = strings.TrimSpace(raw_url)
t, err := ApplyFilter("iriencode", AsValue(raw_url), nil)
if err != nil {
soutErr = err
return ""
}
url := t.String()
if !strings.HasPrefix(url, "http") {
url = fmt.Sprintf("http://%s", url)
}
title := raw_url
if trunc > 3 && len(title) > trunc {
title = fmt.Sprintf("%s...", title[:trunc-3])
}
if autoescape {
t, err := ApplyFilter("escape", AsValue(title), nil)
if err != nil {
soutErr = err
return ""
}
title = t.String()
}
return fmt.Sprintf(`%s<a href="%s" rel="nofollow">%s</a>%s`, prefix, url, title, suffix)
})
if soutErr != nil {
return "", soutErr
}
sout = filterUrlizeEmailRegexp.ReplaceAllStringFunc(sout, func(mail string) string {
title := mail
if trunc > 3 && len(title) > trunc {
title = fmt.Sprintf("%s...", title[:trunc-3])
}
return fmt.Sprintf(`<a href="mailto:%s">%s</a>`, mail, title)
})
return sout, nil
}
func filterUrlize(in *Value, param *Value) (*Value, *Error) {
autoescape := true
if param.IsBool() {
autoescape = param.Bool()
}
s, err := filterUrlizeHelper(in.String(), autoescape, -1)
if err != nil {
}
return AsValue(s), nil
}
func filterUrlizetrunc(in *Value, param *Value) (*Value, *Error) {
s, err := filterUrlizeHelper(in.String(), true, param.Integer())
if err != nil {
return nil, &Error{
Sender: "filter:urlizetrunc",
OrigError: errors.New("you cannot pass more than 2 arguments to filter 'pluralize'"),
}
}
return AsValue(s), nil
}
func filterStringformat(in *Value, param *Value) (*Value, *Error) {
return AsValue(fmt.Sprintf(param.String(), in.Interface())), nil
}
var reStriptags = regexp.MustCompile("<[^>]*?>")
func filterStriptags(in *Value, param *Value) (*Value, *Error) {
s := in.String()
// Strip all tags
s = reStriptags.ReplaceAllString(s, "")
return AsValue(strings.TrimSpace(s)), nil
}
// https://en.wikipedia.org/wiki/Phoneword
var filterPhone2numericMap = map[string]string{
"a": "2", "b": "2", "c": "2", "d": "3", "e": "3", "f": "3", "g": "4", "h": "4", "i": "4", "j": "5", "k": "5",
"l": "5", "m": "6", "n": "6", "o": "6", "p": "7", "q": "7", "r": "7", "s": "7", "t": "8", "u": "8", "v": "8",
"w": "9", "x": "9", "y": "9", "z": "9",
}
func filterPhone2numeric(in *Value, param *Value) (*Value, *Error) {
sin := in.String()
for k, v := range filterPhone2numericMap {
sin = strings.Replace(sin, k, v, -1)
sin = strings.Replace(sin, strings.ToUpper(k), v, -1)
}
return AsValue(sin), nil
}
func filterPluralize(in *Value, param *Value) (*Value, *Error) {
if in.IsNumber() {
// Works only on numbers
if param.Len() > 0 {
endings := strings.Split(param.String(), ",")
if len(endings) > 2 {
return nil, &Error{
Sender: "filter:pluralize",
OrigError: errors.New("you cannot pass more than 2 arguments to filter 'pluralize'"),
}
}
if len(endings) == 1 {
// 1 argument
if in.Integer() != 1 {
return AsValue(endings[0]), nil
}
} else {
if in.Integer() != 1 {
// 2 arguments
return AsValue(endings[1]), nil
}
return AsValue(endings[0]), nil
}
} else {
if in.Integer() != 1 {
// return default 's'
return AsValue("s"), nil
}
}
return AsValue(""), nil
}
return nil, &Error{
Sender: "filter:pluralize",
OrigError: errors.New("filter 'pluralize' does only work on numbers"),
}
}
func filterRandom(in *Value, param *Value) (*Value, *Error) {
if !in.CanSlice() || in.Len() <= 0 {
return in, nil
}
i := rand.Intn(in.Len())
return in.Index(i), nil
}
func filterRemovetags(in *Value, param *Value) (*Value, *Error) {
s := in.String()
tags := strings.Split(param.String(), ",")
// Strip only specific tags
for _, tag := range tags {
re := regexp.MustCompile(fmt.Sprintf("</?%s/?>", tag))
s = re.ReplaceAllString(s, "")
}
return AsValue(strings.TrimSpace(s)), nil
}
func filterRjust(in *Value, param *Value) (*Value, *Error) {
return AsValue(fmt.Sprintf(fmt.Sprintf("%%%ds", param.Integer()), in.String())), nil
}
func filterSlice(in *Value, param *Value) (*Value, *Error) {
comp := strings.Split(param.String(), ":")
if len(comp) != 2 {
return nil, &Error{
Sender: "filter:slice",
OrigError: errors.New("Slice string must have the format 'from:to' [from/to can be omitted, but the ':' is required]"),
}
}
if !in.CanSlice() {
return in, nil
}
from := AsValue(comp[0]).Integer()
to := in.Len()
if from > to {
from = to
}
vto := AsValue(comp[1]).Integer()
if vto >= from && vto <= in.Len() {
to = vto
}
return in.Slice(from, to), nil
}
func filterTitle(in *Value, param *Value) (*Value, *Error) {
if !in.IsString() {
return AsValue(""), nil
}
return AsValue(strings.Title(strings.ToLower(in.String()))), nil
}
func filterWordcount(in *Value, param *Value) (*Value, *Error) {
return AsValue(len(strings.Fields(in.String()))), nil
}
func filterWordwrap(in *Value, param *Value) (*Value, *Error) {
words := strings.Fields(in.String())
wordsLen := len(words)
wrapAt := param.Integer()
if wrapAt <= 0 {
return in, nil
}
linecount := wordsLen/wrapAt + wordsLen%wrapAt
lines := make([]string, 0, linecount)
for i := 0; i < linecount; i++ {
lines = append(lines, strings.Join(words[wrapAt*i:min(wrapAt*(i+1), wordsLen)], " "))
}
return AsValue(strings.Join(lines, "\n")), nil
}
func filterYesno(in *Value, param *Value) (*Value, *Error) {
choices := map[int]string{
0: "yes",
1: "no",
2: "maybe",
}
paramString := param.String()
customChoices := strings.Split(paramString, ",")
if len(paramString) > 0 {
if len(customChoices) > 3 {
return nil, &Error{
Sender: "filter:yesno",
OrigError: errors.Errorf("You cannot pass more than 3 options to the 'yesno'-filter (got: '%s').", paramString),
}
}
if len(customChoices) < 2 {
return nil, &Error{
Sender: "filter:yesno",
OrigError: errors.Errorf("You must pass either no or at least 2 arguments to the 'yesno'-filter (got: '%s').", paramString),
}
}
// Map to the options now
choices[0] = customChoices[0]
choices[1] = customChoices[1]
if len(customChoices) == 3 {
choices[2] = customChoices[2]
}
}
// maybe
if in.IsNil() {
return AsValue(choices[2]), nil
}
// yes
if in.IsTrue() {
return AsValue(choices[0]), nil
}
// no
return AsValue(choices[1]), nil
}

View file

@ -1,13 +0,0 @@
module github.com/flosch/pongo2
require (
github.com/go-check/check v0.0.0-20180628173108-788fd7840127
github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5
github.com/juju/loggo v0.0.0-20180524022052-584905176618 // indirect
github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073 // indirect
github.com/kr/pretty v0.1.0 // indirect
github.com/mattn/goveralls v0.0.2 // indirect
golang.org/x/tools v0.0.0-20181221001348-537d06c36207 // indirect
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce // indirect
gopkg.in/yaml.v2 v2.2.2 // indirect
)

View file

@ -1,15 +0,0 @@
package pongo2
func max(a, b int) int {
if a > b {
return a
}
return b
}
func min(a, b int) int {
if a < b {
return a
}
return b
}

View file

@ -1,432 +0,0 @@
package pongo2
import (
"fmt"
"strings"
"unicode/utf8"
"github.com/juju/errors"
)
const (
TokenError = iota
EOF
TokenHTML
TokenKeyword
TokenIdentifier
TokenString
TokenNumber
TokenSymbol
)
var (
tokenSpaceChars = " \n\r\t"
tokenIdentifierChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_"
tokenIdentifierCharsWithDigits = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789"
tokenDigits = "0123456789"
// Available symbols in pongo2 (within filters/tag)
TokenSymbols = []string{
// 3-Char symbols
"{{-", "-}}", "{%-", "-%}",
// 2-Char symbols
"==", ">=", "<=", "&&", "||", "{{", "}}", "{%", "%}", "!=", "<>",
// 1-Char symbol
"(", ")", "+", "-", "*", "<", ">", "/", "^", ",", ".", "!", "|", ":", "=", "%",
}
// Available keywords in pongo2
TokenKeywords = []string{"in", "and", "or", "not", "true", "false", "as", "export"}
)
type TokenType int
type Token struct {
Filename string
Typ TokenType
Val string
Line int
Col int
TrimWhitespaces bool
}
type lexerStateFn func() lexerStateFn
type lexer struct {
name string
input string
start int // start pos of the item
pos int // current pos
width int // width of last rune
tokens []*Token
errored bool
startline int
startcol int
line int
col int
inVerbatim bool
verbatimName string
}
func (t *Token) String() string {
val := t.Val
if len(val) > 1000 {
val = fmt.Sprintf("%s...%s", val[:10], val[len(val)-5:len(val)])
}
typ := ""
switch t.Typ {
case TokenHTML:
typ = "HTML"
case TokenError:
typ = "Error"
case TokenIdentifier:
typ = "Identifier"
case TokenKeyword:
typ = "Keyword"
case TokenNumber:
typ = "Number"
case TokenString:
typ = "String"
case TokenSymbol:
typ = "Symbol"
default:
typ = "Unknown"
}
return fmt.Sprintf("<Token Typ=%s (%d) Val='%s' Line=%d Col=%d, WT=%t>",
typ, t.Typ, val, t.Line, t.Col, t.TrimWhitespaces)
}
func lex(name string, input string) ([]*Token, *Error) {
l := &lexer{
name: name,
input: input,
tokens: make([]*Token, 0, 100),
line: 1,
col: 1,
startline: 1,
startcol: 1,
}
l.run()
if l.errored {
errtoken := l.tokens[len(l.tokens)-1]
return nil, &Error{
Filename: name,
Line: errtoken.Line,
Column: errtoken.Col,
Sender: "lexer",
OrigError: errors.New(errtoken.Val),
}
}
return l.tokens, nil
}
func (l *lexer) value() string {
return l.input[l.start:l.pos]
}
func (l *lexer) length() int {
return l.pos - l.start
}
func (l *lexer) emit(t TokenType) {
tok := &Token{
Filename: l.name,
Typ: t,
Val: l.value(),
Line: l.startline,
Col: l.startcol,
}
if t == TokenString {
// Escape sequence \" in strings
tok.Val = strings.Replace(tok.Val, `\"`, `"`, -1)
tok.Val = strings.Replace(tok.Val, `\\`, `\`, -1)
}
if t == TokenSymbol && len(tok.Val) == 3 && (strings.HasSuffix(tok.Val, "-") || strings.HasPrefix(tok.Val, "-")) {
tok.TrimWhitespaces = true
tok.Val = strings.Replace(tok.Val, "-", "", -1)
}
l.tokens = append(l.tokens, tok)
l.start = l.pos
l.startline = l.line
l.startcol = l.col
}
func (l *lexer) next() rune {
if l.pos >= len(l.input) {
l.width = 0
return EOF
}
r, w := utf8.DecodeRuneInString(l.input[l.pos:])
l.width = w
l.pos += l.width
l.col += l.width
return r
}
func (l *lexer) backup() {
l.pos -= l.width
l.col -= l.width
}
func (l *lexer) peek() rune {
r := l.next()
l.backup()
return r
}
func (l *lexer) ignore() {
l.start = l.pos
l.startline = l.line
l.startcol = l.col
}
func (l *lexer) accept(what string) bool {
if strings.IndexRune(what, l.next()) >= 0 {
return true
}
l.backup()
return false
}
func (l *lexer) acceptRun(what string) {
for strings.IndexRune(what, l.next()) >= 0 {
}
l.backup()
}
func (l *lexer) errorf(format string, args ...interface{}) lexerStateFn {
t := &Token{
Filename: l.name,
Typ: TokenError,
Val: fmt.Sprintf(format, args...),
Line: l.startline,
Col: l.startcol,
}
l.tokens = append(l.tokens, t)
l.errored = true
l.startline = l.line
l.startcol = l.col
return nil
}
func (l *lexer) eof() bool {
return l.start >= len(l.input)-1
}
func (l *lexer) run() {
for {
// TODO: Support verbatim tag names
// https://docs.djangoproject.com/en/dev/ref/templates/builtins/#verbatim
if l.inVerbatim {
name := l.verbatimName
if name != "" {
name += " "
}
if strings.HasPrefix(l.input[l.pos:], fmt.Sprintf("{%% endverbatim %s%%}", name)) { // end verbatim
if l.pos > l.start {
l.emit(TokenHTML)
}
w := len("{% endverbatim %}")
l.pos += w
l.col += w
l.ignore()
l.inVerbatim = false
}
} else if strings.HasPrefix(l.input[l.pos:], "{% verbatim %}") { // tag
if l.pos > l.start {
l.emit(TokenHTML)
}
l.inVerbatim = true
w := len("{% verbatim %}")
l.pos += w
l.col += w
l.ignore()
}
if !l.inVerbatim {
// Ignore single-line comments {# ... #}
if strings.HasPrefix(l.input[l.pos:], "{#") {
if l.pos > l.start {
l.emit(TokenHTML)
}
l.pos += 2 // pass '{#'
l.col += 2
for {
switch l.peek() {
case EOF:
l.errorf("Single-line comment not closed.")
return
case '\n':
l.errorf("Newline not permitted in a single-line comment.")
return
}
if strings.HasPrefix(l.input[l.pos:], "#}") {
l.pos += 2 // pass '#}'
l.col += 2
break
}
l.next()
}
l.ignore() // ignore whole comment
// Comment skipped
continue // next token
}
if strings.HasPrefix(l.input[l.pos:], "{{") || // variable
strings.HasPrefix(l.input[l.pos:], "{%") { // tag
if l.pos > l.start {
l.emit(TokenHTML)
}
l.tokenize()
if l.errored {
return
}
continue
}
}
switch l.peek() {
case '\n':
l.line++
l.col = 0
}
if l.next() == EOF {
break
}
}
if l.pos > l.start {
l.emit(TokenHTML)
}
if l.inVerbatim {
l.errorf("verbatim-tag not closed, got EOF.")
}
}
func (l *lexer) tokenize() {
for state := l.stateCode; state != nil; {
state = state()
}
}
func (l *lexer) stateCode() lexerStateFn {
outer_loop:
for {
switch {
case l.accept(tokenSpaceChars):
if l.value() == "\n" {
return l.errorf("Newline not allowed within tag/variable.")
}
l.ignore()
continue
case l.accept(tokenIdentifierChars):
return l.stateIdentifier
case l.accept(tokenDigits):
return l.stateNumber
case l.accept(`"'`):
return l.stateString
}
// Check for symbol
for _, sym := range TokenSymbols {
if strings.HasPrefix(l.input[l.start:], sym) {
l.pos += len(sym)
l.col += l.length()
l.emit(TokenSymbol)
if sym == "%}" || sym == "-%}" || sym == "}}" || sym == "-}}" {
// Tag/variable end, return after emit
return nil
}
continue outer_loop
}
}
break
}
// Normal shut down
return nil
}
func (l *lexer) stateIdentifier() lexerStateFn {
l.acceptRun(tokenIdentifierChars)
l.acceptRun(tokenIdentifierCharsWithDigits)
for _, kw := range TokenKeywords {
if kw == l.value() {
l.emit(TokenKeyword)
return l.stateCode
}
}
l.emit(TokenIdentifier)
return l.stateCode
}
func (l *lexer) stateNumber() lexerStateFn {
l.acceptRun(tokenDigits)
if l.accept(tokenIdentifierCharsWithDigits) {
// This seems to be an identifier starting with a number.
// See https://github.com/flosch/pongo2/issues/151
return l.stateIdentifier()
}
/*
Maybe context-sensitive number lexing?
* comments.0.Text // first comment
* usercomments.1.0 // second user, first comment
* if (score >= 8.5) // 8.5 as a number
if l.peek() == '.' {
l.accept(".")
if !l.accept(tokenDigits) {
return l.errorf("Malformed number.")
}
l.acceptRun(tokenDigits)
}
*/
l.emit(TokenNumber)
return l.stateCode
}
func (l *lexer) stateString() lexerStateFn {
quotationMark := l.value()
l.ignore()
l.startcol-- // we're starting the position at the first "
for !l.accept(quotationMark) {
switch l.next() {
case '\\':
// escape sequence
switch l.peek() {
case '"', '\\':
l.next()
default:
return l.errorf("Unknown escape sequence: \\%c", l.peek())
}
case EOF:
return l.errorf("Unexpected EOF, string not closed.")
case '\n':
return l.errorf("Newline in string is not allowed.")
}
}
l.backup()
l.emit(TokenString)
l.next()
l.ignore()
return l.stateCode
}

View file

@ -1,16 +0,0 @@
package pongo2
// The root document
type nodeDocument struct {
Nodes []INode
}
func (doc *nodeDocument) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
for _, n := range doc.Nodes {
err := n.Execute(ctx, writer)
if err != nil {
return err
}
}
return nil
}

View file

@ -1,23 +0,0 @@
package pongo2
import (
"strings"
)
type nodeHTML struct {
token *Token
trimLeft bool
trimRight bool
}
func (n *nodeHTML) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
res := n.token.Val
if n.trimLeft {
res = strings.TrimLeft(res, tokenSpaceChars)
}
if n.trimRight {
res = strings.TrimRight(res, tokenSpaceChars)
}
writer.WriteString(res)
return nil
}

View file

@ -1,16 +0,0 @@
package pongo2
type NodeWrapper struct {
Endtag string
nodes []INode
}
func (wrapper *NodeWrapper) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
for _, n := range wrapper.nodes {
err := n.Execute(ctx, writer)
if err != nil {
return err
}
}
return nil
}

View file

@ -1,26 +0,0 @@
package pongo2
// Options allow you to change the behavior of template-engine.
// You can change the options before calling the Execute method.
type Options struct {
// If this is set to true the first newline after a block is removed (block, not variable tag!). Defaults to false.
TrimBlocks bool
// If this is set to true leading spaces and tabs are stripped from the start of a line to a block. Defaults to false
LStripBlocks bool
}
func newOptions() *Options {
return &Options{
TrimBlocks: false,
LStripBlocks: false,
}
}
// Update updates this options from another options.
func (opt *Options) Update(other *Options) *Options {
opt.TrimBlocks = other.TrimBlocks
opt.LStripBlocks = other.LStripBlocks
return opt
}

View file

@ -1,309 +0,0 @@
package pongo2
import (
"fmt"
"strings"
"github.com/juju/errors"
)
type INode interface {
Execute(*ExecutionContext, TemplateWriter) *Error
}
type IEvaluator interface {
INode
GetPositionToken() *Token
Evaluate(*ExecutionContext) (*Value, *Error)
FilterApplied(name string) bool
}
// The parser provides you a comprehensive and easy tool to
// work with the template document and arguments provided by
// the user for your custom tag.
//
// The parser works on a token list which will be provided by pongo2.
// A token is a unit you can work with. Tokens are either of type identifier,
// string, number, keyword, HTML or symbol.
//
// (See Token's documentation for more about tokens)
type Parser struct {
name string
idx int
tokens []*Token
lastToken *Token
// if the parser parses a template document, here will be
// a reference to it (needed to access the template through Tags)
template *Template
}
// Creates a new parser to parse tokens.
// Used inside pongo2 to parse documents and to provide an easy-to-use
// parser for tag authors
func newParser(name string, tokens []*Token, template *Template) *Parser {
p := &Parser{
name: name,
tokens: tokens,
template: template,
}
if len(tokens) > 0 {
p.lastToken = tokens[len(tokens)-1]
}
return p
}
// Consume one token. It will be gone forever.
func (p *Parser) Consume() {
p.ConsumeN(1)
}
// Consume N tokens. They will be gone forever.
func (p *Parser) ConsumeN(count int) {
p.idx += count
}
// Returns the current token.
func (p *Parser) Current() *Token {
return p.Get(p.idx)
}
// Returns the CURRENT token if the given type matches.
// Consumes this token on success.
func (p *Parser) MatchType(typ TokenType) *Token {
if t := p.PeekType(typ); t != nil {
p.Consume()
return t
}
return nil
}
// Returns the CURRENT token if the given type AND value matches.
// Consumes this token on success.
func (p *Parser) Match(typ TokenType, val string) *Token {
if t := p.Peek(typ, val); t != nil {
p.Consume()
return t
}
return nil
}
// Returns the CURRENT token if the given type AND *one* of
// the given values matches.
// Consumes this token on success.
func (p *Parser) MatchOne(typ TokenType, vals ...string) *Token {
for _, val := range vals {
if t := p.Peek(typ, val); t != nil {
p.Consume()
return t
}
}
return nil
}
// Returns the CURRENT token if the given type matches.
// It DOES NOT consume the token.
func (p *Parser) PeekType(typ TokenType) *Token {
return p.PeekTypeN(0, typ)
}
// Returns the CURRENT token if the given type AND value matches.
// It DOES NOT consume the token.
func (p *Parser) Peek(typ TokenType, val string) *Token {
return p.PeekN(0, typ, val)
}
// Returns the CURRENT token if the given type AND *one* of
// the given values matches.
// It DOES NOT consume the token.
func (p *Parser) PeekOne(typ TokenType, vals ...string) *Token {
for _, v := range vals {
t := p.PeekN(0, typ, v)
if t != nil {
return t
}
}
return nil
}
// Returns the tokens[current position + shift] token if the
// given type AND value matches for that token.
// DOES NOT consume the token.
func (p *Parser) PeekN(shift int, typ TokenType, val string) *Token {
t := p.Get(p.idx + shift)
if t != nil {
if t.Typ == typ && t.Val == val {
return t
}
}
return nil
}
// Returns the tokens[current position + shift] token if the given type matches.
// DOES NOT consume the token for that token.
func (p *Parser) PeekTypeN(shift int, typ TokenType) *Token {
t := p.Get(p.idx + shift)
if t != nil {
if t.Typ == typ {
return t
}
}
return nil
}
// Returns the UNCONSUMED token count.
func (p *Parser) Remaining() int {
return len(p.tokens) - p.idx
}
// Returns the total token count.
func (p *Parser) Count() int {
return len(p.tokens)
}
// Returns tokens[i] or NIL (if i >= len(tokens))
func (p *Parser) Get(i int) *Token {
if i < len(p.tokens) && i >= 0 {
return p.tokens[i]
}
return nil
}
// Returns tokens[current-position + shift] or NIL
// (if (current-position + i) >= len(tokens))
func (p *Parser) GetR(shift int) *Token {
i := p.idx + shift
return p.Get(i)
}
// Error produces a nice error message and returns an error-object.
// The 'token'-argument is optional. If provided, it will take
// the token's position information. If not provided, it will
// automatically use the CURRENT token's position information.
func (p *Parser) Error(msg string, token *Token) *Error {
if token == nil {
// Set current token
token = p.Current()
if token == nil {
// Set to last token
if len(p.tokens) > 0 {
token = p.tokens[len(p.tokens)-1]
}
}
}
var line, col int
if token != nil {
line = token.Line
col = token.Col
}
return &Error{
Template: p.template,
Filename: p.name,
Sender: "parser",
Line: line,
Column: col,
Token: token,
OrigError: errors.New(msg),
}
}
// Wraps all nodes between starting tag and "{% endtag %}" and provides
// one simple interface to execute the wrapped nodes.
// It returns a parser to process provided arguments to the tag.
func (p *Parser) WrapUntilTag(names ...string) (*NodeWrapper, *Parser, *Error) {
wrapper := &NodeWrapper{}
var tagArgs []*Token
for p.Remaining() > 0 {
// New tag, check whether we have to stop wrapping here
if p.Peek(TokenSymbol, "{%") != nil {
tagIdent := p.PeekTypeN(1, TokenIdentifier)
if tagIdent != nil {
// We've found a (!) end-tag
found := false
for _, n := range names {
if tagIdent.Val == n {
found = true
break
}
}
// We only process the tag if we've found an end tag
if found {
// Okay, endtag found.
p.ConsumeN(2) // '{%' tagname
for {
if p.Match(TokenSymbol, "%}") != nil {
// Okay, end the wrapping here
wrapper.Endtag = tagIdent.Val
return wrapper, newParser(p.template.name, tagArgs, p.template), nil
}
t := p.Current()
p.Consume()
if t == nil {
return nil, nil, p.Error("Unexpected EOF.", p.lastToken)
}
tagArgs = append(tagArgs, t)
}
}
}
}
// Otherwise process next element to be wrapped
node, err := p.parseDocElement()
if err != nil {
return nil, nil, err
}
wrapper.nodes = append(wrapper.nodes, node)
}
return nil, nil, p.Error(fmt.Sprintf("Unexpected EOF, expected tag %s.", strings.Join(names, " or ")),
p.lastToken)
}
// Skips all nodes between starting tag and "{% endtag %}"
func (p *Parser) SkipUntilTag(names ...string) *Error {
for p.Remaining() > 0 {
// New tag, check whether we have to stop wrapping here
if p.Peek(TokenSymbol, "{%") != nil {
tagIdent := p.PeekTypeN(1, TokenIdentifier)
if tagIdent != nil {
// We've found a (!) end-tag
found := false
for _, n := range names {
if tagIdent.Val == n {
found = true
break
}
}
// We only process the tag if we've found an end tag
if found {
// Okay, endtag found.
p.ConsumeN(2) // '{%' tagname
for {
if p.Match(TokenSymbol, "%}") != nil {
// Done skipping, exit.
return nil
}
}
}
}
}
t := p.Current()
p.Consume()
if t == nil {
return p.Error("Unexpected EOF.", p.lastToken)
}
}
return p.Error(fmt.Sprintf("Unexpected EOF, expected tag %s.", strings.Join(names, " or ")), p.lastToken)
}

View file

@ -1,59 +0,0 @@
package pongo2
// Doc = { ( Filter | Tag | HTML ) }
func (p *Parser) parseDocElement() (INode, *Error) {
t := p.Current()
switch t.Typ {
case TokenHTML:
n := &nodeHTML{token: t}
left := p.PeekTypeN(-1, TokenSymbol)
right := p.PeekTypeN(1, TokenSymbol)
n.trimLeft = left != nil && left.TrimWhitespaces
n.trimRight = right != nil && right.TrimWhitespaces
p.Consume() // consume HTML element
return n, nil
case TokenSymbol:
switch t.Val {
case "{{":
// parse variable
variable, err := p.parseVariableElement()
if err != nil {
return nil, err
}
return variable, nil
case "{%":
// parse tag
tag, err := p.parseTagElement()
if err != nil {
return nil, err
}
return tag, nil
}
}
return nil, p.Error("Unexpected token (only HTML/tags/filters in templates allowed)", t)
}
func (tpl *Template) parse() *Error {
tpl.parser = newParser(tpl.name, tpl.tokens, tpl)
doc, err := tpl.parser.parseDocument()
if err != nil {
return err
}
tpl.root = doc
return nil
}
func (p *Parser) parseDocument() (*nodeDocument, *Error) {
doc := &nodeDocument{}
for p.Remaining() > 0 {
node, err := p.parseDocElement()
if err != nil {
return nil, err
}
doc.Nodes = append(doc.Nodes, node)
}
return doc, nil
}

View file

@ -1,503 +0,0 @@
package pongo2
import (
"fmt"
"math"
)
type Expression struct {
// TODO: Add location token?
expr1 IEvaluator
expr2 IEvaluator
opToken *Token
}
type relationalExpression struct {
// TODO: Add location token?
expr1 IEvaluator
expr2 IEvaluator
opToken *Token
}
type simpleExpression struct {
negate bool
negativeSign bool
term1 IEvaluator
term2 IEvaluator
opToken *Token
}
type term struct {
// TODO: Add location token?
factor1 IEvaluator
factor2 IEvaluator
opToken *Token
}
type power struct {
// TODO: Add location token?
power1 IEvaluator
power2 IEvaluator
}
func (expr *Expression) FilterApplied(name string) bool {
return expr.expr1.FilterApplied(name) && (expr.expr2 == nil ||
(expr.expr2 != nil && expr.expr2.FilterApplied(name)))
}
func (expr *relationalExpression) FilterApplied(name string) bool {
return expr.expr1.FilterApplied(name) && (expr.expr2 == nil ||
(expr.expr2 != nil && expr.expr2.FilterApplied(name)))
}
func (expr *simpleExpression) FilterApplied(name string) bool {
return expr.term1.FilterApplied(name) && (expr.term2 == nil ||
(expr.term2 != nil && expr.term2.FilterApplied(name)))
}
func (expr *term) FilterApplied(name string) bool {
return expr.factor1.FilterApplied(name) && (expr.factor2 == nil ||
(expr.factor2 != nil && expr.factor2.FilterApplied(name)))
}
func (expr *power) FilterApplied(name string) bool {
return expr.power1.FilterApplied(name) && (expr.power2 == nil ||
(expr.power2 != nil && expr.power2.FilterApplied(name)))
}
func (expr *Expression) GetPositionToken() *Token {
return expr.expr1.GetPositionToken()
}
func (expr *relationalExpression) GetPositionToken() *Token {
return expr.expr1.GetPositionToken()
}
func (expr *simpleExpression) GetPositionToken() *Token {
return expr.term1.GetPositionToken()
}
func (expr *term) GetPositionToken() *Token {
return expr.factor1.GetPositionToken()
}
func (expr *power) GetPositionToken() *Token {
return expr.power1.GetPositionToken()
}
func (expr *Expression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
value, err := expr.Evaluate(ctx)
if err != nil {
return err
}
writer.WriteString(value.String())
return nil
}
func (expr *relationalExpression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
value, err := expr.Evaluate(ctx)
if err != nil {
return err
}
writer.WriteString(value.String())
return nil
}
func (expr *simpleExpression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
value, err := expr.Evaluate(ctx)
if err != nil {
return err
}
writer.WriteString(value.String())
return nil
}
func (expr *term) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
value, err := expr.Evaluate(ctx)
if err != nil {
return err
}
writer.WriteString(value.String())
return nil
}
func (expr *power) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
value, err := expr.Evaluate(ctx)
if err != nil {
return err
}
writer.WriteString(value.String())
return nil
}
func (expr *Expression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
v1, err := expr.expr1.Evaluate(ctx)
if err != nil {
return nil, err
}
if expr.expr2 != nil {
switch expr.opToken.Val {
case "and", "&&":
if !v1.IsTrue() {
return AsValue(false), nil
} else {
v2, err := expr.expr2.Evaluate(ctx)
if err != nil {
return nil, err
}
return AsValue(v2.IsTrue()), nil
}
case "or", "||":
if v1.IsTrue() {
return AsValue(true), nil
} else {
v2, err := expr.expr2.Evaluate(ctx)
if err != nil {
return nil, err
}
return AsValue(v2.IsTrue()), nil
}
default:
return nil, ctx.Error(fmt.Sprintf("unimplemented: %s", expr.opToken.Val), expr.opToken)
}
} else {
return v1, nil
}
}
func (expr *relationalExpression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
v1, err := expr.expr1.Evaluate(ctx)
if err != nil {
return nil, err
}
if expr.expr2 != nil {
v2, err := expr.expr2.Evaluate(ctx)
if err != nil {
return nil, err
}
switch expr.opToken.Val {
case "<=":
if v1.IsFloat() || v2.IsFloat() {
return AsValue(v1.Float() <= v2.Float()), nil
}
return AsValue(v1.Integer() <= v2.Integer()), nil
case ">=":
if v1.IsFloat() || v2.IsFloat() {
return AsValue(v1.Float() >= v2.Float()), nil
}
return AsValue(v1.Integer() >= v2.Integer()), nil
case "==":
return AsValue(v1.EqualValueTo(v2)), nil
case ">":
if v1.IsFloat() || v2.IsFloat() {
return AsValue(v1.Float() > v2.Float()), nil
}
return AsValue(v1.Integer() > v2.Integer()), nil
case "<":
if v1.IsFloat() || v2.IsFloat() {
return AsValue(v1.Float() < v2.Float()), nil
}
return AsValue(v1.Integer() < v2.Integer()), nil
case "!=", "<>":
return AsValue(!v1.EqualValueTo(v2)), nil
case "in":
return AsValue(v2.Contains(v1)), nil
default:
return nil, ctx.Error(fmt.Sprintf("unimplemented: %s", expr.opToken.Val), expr.opToken)
}
} else {
return v1, nil
}
}
func (expr *simpleExpression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
t1, err := expr.term1.Evaluate(ctx)
if err != nil {
return nil, err
}
result := t1
if expr.negate {
result = result.Negate()
}
if expr.negativeSign {
if result.IsNumber() {
switch {
case result.IsFloat():
result = AsValue(-1 * result.Float())
case result.IsInteger():
result = AsValue(-1 * result.Integer())
default:
return nil, ctx.Error("Operation between a number and a non-(float/integer) is not possible", nil)
}
} else {
return nil, ctx.Error("Negative sign on a non-number expression", expr.GetPositionToken())
}
}
if expr.term2 != nil {
t2, err := expr.term2.Evaluate(ctx)
if err != nil {
return nil, err
}
switch expr.opToken.Val {
case "+":
if result.IsFloat() || t2.IsFloat() {
// Result will be a float
return AsValue(result.Float() + t2.Float()), nil
}
// Result will be an integer
return AsValue(result.Integer() + t2.Integer()), nil
case "-":
if result.IsFloat() || t2.IsFloat() {
// Result will be a float
return AsValue(result.Float() - t2.Float()), nil
}
// Result will be an integer
return AsValue(result.Integer() - t2.Integer()), nil
default:
return nil, ctx.Error("Unimplemented", expr.GetPositionToken())
}
}
return result, nil
}
func (expr *term) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
f1, err := expr.factor1.Evaluate(ctx)
if err != nil {
return nil, err
}
if expr.factor2 != nil {
f2, err := expr.factor2.Evaluate(ctx)
if err != nil {
return nil, err
}
switch expr.opToken.Val {
case "*":
if f1.IsFloat() || f2.IsFloat() {
// Result will be float
return AsValue(f1.Float() * f2.Float()), nil
}
// Result will be int
return AsValue(f1.Integer() * f2.Integer()), nil
case "/":
if f1.IsFloat() || f2.IsFloat() {
// Result will be float
return AsValue(f1.Float() / f2.Float()), nil
}
// Result will be int
return AsValue(f1.Integer() / f2.Integer()), nil
case "%":
// Result will be int
return AsValue(f1.Integer() % f2.Integer()), nil
default:
return nil, ctx.Error("unimplemented", expr.opToken)
}
} else {
return f1, nil
}
}
func (expr *power) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
p1, err := expr.power1.Evaluate(ctx)
if err != nil {
return nil, err
}
if expr.power2 != nil {
p2, err := expr.power2.Evaluate(ctx)
if err != nil {
return nil, err
}
return AsValue(math.Pow(p1.Float(), p2.Float())), nil
}
return p1, nil
}
func (p *Parser) parseFactor() (IEvaluator, *Error) {
if p.Match(TokenSymbol, "(") != nil {
expr, err := p.ParseExpression()
if err != nil {
return nil, err
}
if p.Match(TokenSymbol, ")") == nil {
return nil, p.Error("Closing bracket expected after expression", nil)
}
return expr, nil
}
return p.parseVariableOrLiteralWithFilter()
}
func (p *Parser) parsePower() (IEvaluator, *Error) {
pw := new(power)
power1, err := p.parseFactor()
if err != nil {
return nil, err
}
pw.power1 = power1
if p.Match(TokenSymbol, "^") != nil {
power2, err := p.parsePower()
if err != nil {
return nil, err
}
pw.power2 = power2
}
if pw.power2 == nil {
// Shortcut for faster evaluation
return pw.power1, nil
}
return pw, nil
}
func (p *Parser) parseTerm() (IEvaluator, *Error) {
returnTerm := new(term)
factor1, err := p.parsePower()
if err != nil {
return nil, err
}
returnTerm.factor1 = factor1
for p.PeekOne(TokenSymbol, "*", "/", "%") != nil {
if returnTerm.opToken != nil {
// Create new sub-term
returnTerm = &term{
factor1: returnTerm,
}
}
op := p.Current()
p.Consume()
factor2, err := p.parsePower()
if err != nil {
return nil, err
}
returnTerm.opToken = op
returnTerm.factor2 = factor2
}
if returnTerm.opToken == nil {
// Shortcut for faster evaluation
return returnTerm.factor1, nil
}
return returnTerm, nil
}
func (p *Parser) parseSimpleExpression() (IEvaluator, *Error) {
expr := new(simpleExpression)
if sign := p.MatchOne(TokenSymbol, "+", "-"); sign != nil {
if sign.Val == "-" {
expr.negativeSign = true
}
}
if p.Match(TokenSymbol, "!") != nil || p.Match(TokenKeyword, "not") != nil {
expr.negate = true
}
term1, err := p.parseTerm()
if err != nil {
return nil, err
}
expr.term1 = term1
for p.PeekOne(TokenSymbol, "+", "-") != nil {
if expr.opToken != nil {
// New sub expr
expr = &simpleExpression{
term1: expr,
}
}
op := p.Current()
p.Consume()
term2, err := p.parseTerm()
if err != nil {
return nil, err
}
expr.term2 = term2
expr.opToken = op
}
if expr.negate == false && expr.negativeSign == false && expr.term2 == nil {
// Shortcut for faster evaluation
return expr.term1, nil
}
return expr, nil
}
func (p *Parser) parseRelationalExpression() (IEvaluator, *Error) {
expr1, err := p.parseSimpleExpression()
if err != nil {
return nil, err
}
expr := &relationalExpression{
expr1: expr1,
}
if t := p.MatchOne(TokenSymbol, "==", "<=", ">=", "!=", "<>", ">", "<"); t != nil {
expr2, err := p.parseRelationalExpression()
if err != nil {
return nil, err
}
expr.opToken = t
expr.expr2 = expr2
} else if t := p.MatchOne(TokenKeyword, "in"); t != nil {
expr2, err := p.parseSimpleExpression()
if err != nil {
return nil, err
}
expr.opToken = t
expr.expr2 = expr2
}
if expr.expr2 == nil {
// Shortcut for faster evaluation
return expr.expr1, nil
}
return expr, nil
}
func (p *Parser) ParseExpression() (IEvaluator, *Error) {
rexpr1, err := p.parseRelationalExpression()
if err != nil {
return nil, err
}
exp := &Expression{
expr1: rexpr1,
}
if p.PeekOne(TokenSymbol, "&&", "||") != nil || p.PeekOne(TokenKeyword, "and", "or") != nil {
op := p.Current()
p.Consume()
expr2, err := p.ParseExpression()
if err != nil {
return nil, err
}
exp.expr2 = expr2
exp.opToken = op
}
if exp.expr2 == nil {
// Shortcut for faster evaluation
return exp.expr1, nil
}
return exp, nil
}

View file

@ -1,14 +0,0 @@
package pongo2
// Version string
const Version = "dev"
// Must panics, if a Template couldn't successfully parsed. This is how you
// would use it:
// var baseTemplate = pongo2.Must(pongo2.FromFile("templates/base.html"))
func Must(tpl *Template, err error) *Template {
if err != nil {
panic(err)
}
return tpl
}

View file

@ -1,135 +0,0 @@
package pongo2
/* Incomplete:
-----------
verbatim (only the "name" argument is missing for verbatim)
Reconsideration:
----------------
debug (reason: not sure what to output yet)
regroup / Grouping on other properties (reason: maybe too python-specific; not sure how useful this would be in Go)
Following built-in tags wont be added:
--------------------------------------
csrf_token (reason: web-framework specific)
load (reason: python-specific)
url (reason: web-framework specific)
*/
import (
"fmt"
"github.com/juju/errors"
)
type INodeTag interface {
INode
}
// This is the function signature of the tag's parser you will have
// to implement in order to create a new tag.
//
// 'doc' is providing access to the whole document while 'arguments'
// is providing access to the user's arguments to the tag:
//
// {% your_tag_name some "arguments" 123 %}
//
// start_token will be the *Token with the tag's name in it (here: your_tag_name).
//
// Please see the Parser documentation on how to use the parser.
// See RegisterTag()'s documentation for more information about
// writing a tag as well.
type TagParser func(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error)
type tag struct {
name string
parser TagParser
}
var tags map[string]*tag
func init() {
tags = make(map[string]*tag)
}
// Registers a new tag. You usually want to call this
// function in the tag's init() function:
// http://golang.org/doc/effective_go.html#init
//
// See http://www.florian-schlachter.de/post/pongo2/ for more about
// writing filters and tags.
func RegisterTag(name string, parserFn TagParser) error {
_, existing := tags[name]
if existing {
return errors.Errorf("tag with name '%s' is already registered", name)
}
tags[name] = &tag{
name: name,
parser: parserFn,
}
return nil
}
// Replaces an already registered tag with a new implementation. Use this
// function with caution since it allows you to change existing tag behaviour.
func ReplaceTag(name string, parserFn TagParser) error {
_, existing := tags[name]
if !existing {
return errors.Errorf("tag with name '%s' does not exist (therefore cannot be overridden)", name)
}
tags[name] = &tag{
name: name,
parser: parserFn,
}
return nil
}
// Tag = "{%" IDENT ARGS "%}"
func (p *Parser) parseTagElement() (INodeTag, *Error) {
p.Consume() // consume "{%"
tokenName := p.MatchType(TokenIdentifier)
// Check for identifier
if tokenName == nil {
return nil, p.Error("Tag name must be an identifier.", nil)
}
// Check for the existing tag
tag, exists := tags[tokenName.Val]
if !exists {
// Does not exists
return nil, p.Error(fmt.Sprintf("Tag '%s' not found (or beginning tag not provided)", tokenName.Val), tokenName)
}
// Check sandbox tag restriction
if _, isBanned := p.template.set.bannedTags[tokenName.Val]; isBanned {
return nil, p.Error(fmt.Sprintf("Usage of tag '%s' is not allowed (sandbox restriction active).", tokenName.Val), tokenName)
}
var argsToken []*Token
for p.Peek(TokenSymbol, "%}") == nil && p.Remaining() > 0 {
// Add token to args
argsToken = append(argsToken, p.Current())
p.Consume() // next token
}
// EOF?
if p.Remaining() == 0 {
return nil, p.Error("Unexpectedly reached EOF, no tag end found.", p.lastToken)
}
p.Match(TokenSymbol, "%}")
argParser := newParser(p.name, argsToken, p.template)
if len(argsToken) == 0 {
// This is done to have nice EOF error messages
argParser.lastToken = tokenName
}
p.template.level++
defer func() { p.template.level-- }()
return tag.parser(p, tokenName, argParser)
}

View file

@ -1,52 +0,0 @@
package pongo2
type tagAutoescapeNode struct {
wrapper *NodeWrapper
autoescape bool
}
func (node *tagAutoescapeNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
old := ctx.Autoescape
ctx.Autoescape = node.autoescape
err := node.wrapper.Execute(ctx, writer)
if err != nil {
return err
}
ctx.Autoescape = old
return nil
}
func tagAutoescapeParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
autoescapeNode := &tagAutoescapeNode{}
wrapper, _, err := doc.WrapUntilTag("endautoescape")
if err != nil {
return nil, err
}
autoescapeNode.wrapper = wrapper
modeToken := arguments.MatchType(TokenIdentifier)
if modeToken == nil {
return nil, arguments.Error("A mode is required for autoescape-tag.", nil)
}
if modeToken.Val == "on" {
autoescapeNode.autoescape = true
} else if modeToken.Val == "off" {
autoescapeNode.autoescape = false
} else {
return nil, arguments.Error("Only 'on' or 'off' is valid as an autoescape-mode.", nil)
}
if arguments.Remaining() > 0 {
return nil, arguments.Error("Malformed autoescape-tag arguments.", nil)
}
return autoescapeNode, nil
}
func init() {
RegisterTag("autoescape", tagAutoescapeParser)
}

View file

@ -1,129 +0,0 @@
package pongo2
import (
"bytes"
"fmt"
)
type tagBlockNode struct {
name string
}
func (node *tagBlockNode) getBlockWrappers(tpl *Template) []*NodeWrapper {
nodeWrappers := make([]*NodeWrapper, 0)
var t *NodeWrapper
for tpl != nil {
t = tpl.blocks[node.name]
if t != nil {
nodeWrappers = append(nodeWrappers, t)
}
tpl = tpl.child
}
return nodeWrappers
}
func (node *tagBlockNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
tpl := ctx.template
if tpl == nil {
panic("internal error: tpl == nil")
}
// Determine the block to execute
blockWrappers := node.getBlockWrappers(tpl)
lenBlockWrappers := len(blockWrappers)
if lenBlockWrappers == 0 {
return ctx.Error("internal error: len(block_wrappers) == 0 in tagBlockNode.Execute()", nil)
}
blockWrapper := blockWrappers[lenBlockWrappers-1]
ctx.Private["block"] = tagBlockInformation{
ctx: ctx,
wrappers: blockWrappers[0 : lenBlockWrappers-1],
}
err := blockWrapper.Execute(ctx, writer)
if err != nil {
return err
}
return nil
}
type tagBlockInformation struct {
ctx *ExecutionContext
wrappers []*NodeWrapper
}
func (t tagBlockInformation) Super() string {
lenWrappers := len(t.wrappers)
if lenWrappers == 0 {
return ""
}
superCtx := NewChildExecutionContext(t.ctx)
superCtx.Private["block"] = tagBlockInformation{
ctx: t.ctx,
wrappers: t.wrappers[0 : lenWrappers-1],
}
blockWrapper := t.wrappers[lenWrappers-1]
buf := bytes.NewBufferString("")
err := blockWrapper.Execute(superCtx, &templateWriter{buf})
if err != nil {
return ""
}
return buf.String()
}
func tagBlockParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
if arguments.Count() == 0 {
return nil, arguments.Error("Tag 'block' requires an identifier.", nil)
}
nameToken := arguments.MatchType(TokenIdentifier)
if nameToken == nil {
return nil, arguments.Error("First argument for tag 'block' must be an identifier.", nil)
}
if arguments.Remaining() != 0 {
return nil, arguments.Error("Tag 'block' takes exactly 1 argument (an identifier).", nil)
}
wrapper, endtagargs, err := doc.WrapUntilTag("endblock")
if err != nil {
return nil, err
}
if endtagargs.Remaining() > 0 {
endtagnameToken := endtagargs.MatchType(TokenIdentifier)
if endtagnameToken != nil {
if endtagnameToken.Val != nameToken.Val {
return nil, endtagargs.Error(fmt.Sprintf("Name for 'endblock' must equal to 'block'-tag's name ('%s' != '%s').",
nameToken.Val, endtagnameToken.Val), nil)
}
}
if endtagnameToken == nil || endtagargs.Remaining() > 0 {
return nil, endtagargs.Error("Either no or only one argument (identifier) allowed for 'endblock'.", nil)
}
}
tpl := doc.template
if tpl == nil {
panic("internal error: tpl == nil")
}
_, hasBlock := tpl.blocks[nameToken.Val]
if !hasBlock {
tpl.blocks[nameToken.Val] = wrapper
} else {
return nil, arguments.Error(fmt.Sprintf("Block named '%s' already defined", nameToken.Val), nil)
}
return &tagBlockNode{name: nameToken.Val}, nil
}
func init() {
RegisterTag("block", tagBlockParser)
}

View file

@ -1,27 +0,0 @@
package pongo2
type tagCommentNode struct{}
func (node *tagCommentNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
return nil
}
func tagCommentParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
commentNode := &tagCommentNode{}
// TODO: Process the endtag's arguments (see django 'comment'-tag documentation)
err := doc.SkipUntilTag("endcomment")
if err != nil {
return nil, err
}
if arguments.Count() != 0 {
return nil, arguments.Error("Tag 'comment' does not take any argument.", nil)
}
return commentNode, nil
}
func init() {
RegisterTag("comment", tagCommentParser)
}

View file

@ -1,106 +0,0 @@
package pongo2
type tagCycleValue struct {
node *tagCycleNode
value *Value
}
type tagCycleNode struct {
position *Token
args []IEvaluator
idx int
asName string
silent bool
}
func (cv *tagCycleValue) String() string {
return cv.value.String()
}
func (node *tagCycleNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
item := node.args[node.idx%len(node.args)]
node.idx++
val, err := item.Evaluate(ctx)
if err != nil {
return err
}
if t, ok := val.Interface().(*tagCycleValue); ok {
// {% cycle "test1" "test2"
// {% cycle cycleitem %}
// Update the cycle value with next value
item := t.node.args[t.node.idx%len(t.node.args)]
t.node.idx++
val, err := item.Evaluate(ctx)
if err != nil {
return err
}
t.value = val
if !t.node.silent {
writer.WriteString(val.String())
}
} else {
// Regular call
cycleValue := &tagCycleValue{
node: node,
value: val,
}
if node.asName != "" {
ctx.Private[node.asName] = cycleValue
}
if !node.silent {
writer.WriteString(val.String())
}
}
return nil
}
// HINT: We're not supporting the old comma-separated list of expressions argument-style
func tagCycleParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
cycleNode := &tagCycleNode{
position: start,
}
for arguments.Remaining() > 0 {
node, err := arguments.ParseExpression()
if err != nil {
return nil, err
}
cycleNode.args = append(cycleNode.args, node)
if arguments.MatchOne(TokenKeyword, "as") != nil {
// as
nameToken := arguments.MatchType(TokenIdentifier)
if nameToken == nil {
return nil, arguments.Error("Name (identifier) expected after 'as'.", nil)
}
cycleNode.asName = nameToken.Val
if arguments.MatchOne(TokenIdentifier, "silent") != nil {
cycleNode.silent = true
}
// Now we're finished
break
}
}
if arguments.Remaining() > 0 {
return nil, arguments.Error("Malformed cycle-tag.", nil)
}
return cycleNode, nil
}
func init() {
RegisterTag("cycle", tagCycleParser)
}

View file

@ -1,52 +0,0 @@
package pongo2
type tagExtendsNode struct {
filename string
}
func (node *tagExtendsNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
return nil
}
func tagExtendsParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
extendsNode := &tagExtendsNode{}
if doc.template.level > 1 {
return nil, arguments.Error("The 'extends' tag can only defined on root level.", start)
}
if doc.template.parent != nil {
// Already one parent
return nil, arguments.Error("This template has already one parent.", start)
}
if filenameToken := arguments.MatchType(TokenString); filenameToken != nil {
// prepared, static template
// Get parent's filename
parentFilename := doc.template.set.resolveFilename(doc.template, filenameToken.Val)
// Parse the parent
parentTemplate, err := doc.template.set.FromFile(parentFilename)
if err != nil {
return nil, err.(*Error)
}
// Keep track of things
parentTemplate.child = doc.template
doc.template.parent = parentTemplate
extendsNode.filename = parentFilename
} else {
return nil, arguments.Error("Tag 'extends' requires a template filename as string.", nil)
}
if arguments.Remaining() > 0 {
return nil, arguments.Error("Tag 'extends' does only take 1 argument.", nil)
}
return extendsNode, nil
}
func init() {
RegisterTag("extends", tagExtendsParser)
}

View file

@ -1,95 +0,0 @@
package pongo2
import (
"bytes"
)
type nodeFilterCall struct {
name string
paramExpr IEvaluator
}
type tagFilterNode struct {
position *Token
bodyWrapper *NodeWrapper
filterChain []*nodeFilterCall
}
func (node *tagFilterNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
temp := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB size
err := node.bodyWrapper.Execute(ctx, temp)
if err != nil {
return err
}
value := AsValue(temp.String())
for _, call := range node.filterChain {
var param *Value
if call.paramExpr != nil {
param, err = call.paramExpr.Evaluate(ctx)
if err != nil {
return err
}
} else {
param = AsValue(nil)
}
value, err = ApplyFilter(call.name, value, param)
if err != nil {
return ctx.Error(err.Error(), node.position)
}
}
writer.WriteString(value.String())
return nil
}
func tagFilterParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
filterNode := &tagFilterNode{
position: start,
}
wrapper, _, err := doc.WrapUntilTag("endfilter")
if err != nil {
return nil, err
}
filterNode.bodyWrapper = wrapper
for arguments.Remaining() > 0 {
filterCall := &nodeFilterCall{}
nameToken := arguments.MatchType(TokenIdentifier)
if nameToken == nil {
return nil, arguments.Error("Expected a filter name (identifier).", nil)
}
filterCall.name = nameToken.Val
if arguments.MatchOne(TokenSymbol, ":") != nil {
// Filter parameter
// NOTICE: we can't use ParseExpression() here, because it would parse the next filter "|..." as well in the argument list
expr, err := arguments.parseVariableOrLiteral()
if err != nil {
return nil, err
}
filterCall.paramExpr = expr
}
filterNode.filterChain = append(filterNode.filterChain, filterCall)
if arguments.MatchOne(TokenSymbol, "|") == nil {
break
}
}
if arguments.Remaining() > 0 {
return nil, arguments.Error("Malformed filter-tag arguments.", nil)
}
return filterNode, nil
}
func init() {
RegisterTag("filter", tagFilterParser)
}

View file

@ -1,49 +0,0 @@
package pongo2
type tagFirstofNode struct {
position *Token
args []IEvaluator
}
func (node *tagFirstofNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
for _, arg := range node.args {
val, err := arg.Evaluate(ctx)
if err != nil {
return err
}
if val.IsTrue() {
if ctx.Autoescape && !arg.FilterApplied("safe") {
val, err = ApplyFilter("escape", val, nil)
if err != nil {
return err
}
}
writer.WriteString(val.String())
return nil
}
}
return nil
}
func tagFirstofParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
firstofNode := &tagFirstofNode{
position: start,
}
for arguments.Remaining() > 0 {
node, err := arguments.ParseExpression()
if err != nil {
return nil, err
}
firstofNode.args = append(firstofNode.args, node)
}
return firstofNode, nil
}
func init() {
RegisterTag("firstof", tagFirstofParser)
}

View file

@ -1,159 +0,0 @@
package pongo2
type tagForNode struct {
key string
value string // only for maps: for key, value in map
objectEvaluator IEvaluator
reversed bool
sorted bool
bodyWrapper *NodeWrapper
emptyWrapper *NodeWrapper
}
type tagForLoopInformation struct {
Counter int
Counter0 int
Revcounter int
Revcounter0 int
First bool
Last bool
Parentloop *tagForLoopInformation
}
func (node *tagForNode) Execute(ctx *ExecutionContext, writer TemplateWriter) (forError *Error) {
// Backup forloop (as parentloop in public context), key-name and value-name
forCtx := NewChildExecutionContext(ctx)
parentloop := forCtx.Private["forloop"]
// Create loop struct
loopInfo := &tagForLoopInformation{
First: true,
}
// Is it a loop in a loop?
if parentloop != nil {
loopInfo.Parentloop = parentloop.(*tagForLoopInformation)
}
// Register loopInfo in public context
forCtx.Private["forloop"] = loopInfo
obj, err := node.objectEvaluator.Evaluate(forCtx)
if err != nil {
return err
}
obj.IterateOrder(func(idx, count int, key, value *Value) bool {
// There's something to iterate over (correct type and at least 1 item)
// Update loop infos and public context
forCtx.Private[node.key] = key
if value != nil {
forCtx.Private[node.value] = value
}
loopInfo.Counter = idx + 1
loopInfo.Counter0 = idx
if idx == 1 {
loopInfo.First = false
}
if idx+1 == count {
loopInfo.Last = true
}
loopInfo.Revcounter = count - idx // TODO: Not sure about this, have to look it up
loopInfo.Revcounter0 = count - (idx + 1) // TODO: Not sure about this, have to look it up
// Render elements with updated context
err := node.bodyWrapper.Execute(forCtx, writer)
if err != nil {
forError = err
return false
}
return true
}, func() {
// Nothing to iterate over (maybe wrong type or no items)
if node.emptyWrapper != nil {
err := node.emptyWrapper.Execute(forCtx, writer)
if err != nil {
forError = err
}
}
}, node.reversed, node.sorted)
return forError
}
func tagForParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
forNode := &tagForNode{}
// Arguments parsing
var valueToken *Token
keyToken := arguments.MatchType(TokenIdentifier)
if keyToken == nil {
return nil, arguments.Error("Expected an key identifier as first argument for 'for'-tag", nil)
}
if arguments.Match(TokenSymbol, ",") != nil {
// Value name is provided
valueToken = arguments.MatchType(TokenIdentifier)
if valueToken == nil {
return nil, arguments.Error("Value name must be an identifier.", nil)
}
}
if arguments.Match(TokenKeyword, "in") == nil {
return nil, arguments.Error("Expected keyword 'in'.", nil)
}
objectEvaluator, err := arguments.ParseExpression()
if err != nil {
return nil, err
}
forNode.objectEvaluator = objectEvaluator
forNode.key = keyToken.Val
if valueToken != nil {
forNode.value = valueToken.Val
}
if arguments.MatchOne(TokenIdentifier, "reversed") != nil {
forNode.reversed = true
}
if arguments.MatchOne(TokenIdentifier, "sorted") != nil {
forNode.sorted = true
}
if arguments.Remaining() > 0 {
return nil, arguments.Error("Malformed for-loop arguments.", nil)
}
// Body wrapping
wrapper, endargs, err := doc.WrapUntilTag("empty", "endfor")
if err != nil {
return nil, err
}
forNode.bodyWrapper = wrapper
if endargs.Count() > 0 {
return nil, endargs.Error("Arguments not allowed here.", nil)
}
if wrapper.Endtag == "empty" {
// if there's an else in the if-statement, we need the else-Block as well
wrapper, endargs, err = doc.WrapUntilTag("endfor")
if err != nil {
return nil, err
}
forNode.emptyWrapper = wrapper
if endargs.Count() > 0 {
return nil, endargs.Error("Arguments not allowed here.", nil)
}
}
return forNode, nil
}
func init() {
RegisterTag("for", tagForParser)
}

View file

@ -1,76 +0,0 @@
package pongo2
type tagIfNode struct {
conditions []IEvaluator
wrappers []*NodeWrapper
}
func (node *tagIfNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
for i, condition := range node.conditions {
result, err := condition.Evaluate(ctx)
if err != nil {
return err
}
if result.IsTrue() {
return node.wrappers[i].Execute(ctx, writer)
}
// Last condition?
if len(node.conditions) == i+1 && len(node.wrappers) > i+1 {
return node.wrappers[i+1].Execute(ctx, writer)
}
}
return nil
}
func tagIfParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
ifNode := &tagIfNode{}
// Parse first and main IF condition
condition, err := arguments.ParseExpression()
if err != nil {
return nil, err
}
ifNode.conditions = append(ifNode.conditions, condition)
if arguments.Remaining() > 0 {
return nil, arguments.Error("If-condition is malformed.", nil)
}
// Check the rest
for {
wrapper, tagArgs, err := doc.WrapUntilTag("elif", "else", "endif")
if err != nil {
return nil, err
}
ifNode.wrappers = append(ifNode.wrappers, wrapper)
if wrapper.Endtag == "elif" {
// elif can take a condition
condition, err = tagArgs.ParseExpression()
if err != nil {
return nil, err
}
ifNode.conditions = append(ifNode.conditions, condition)
if tagArgs.Remaining() > 0 {
return nil, tagArgs.Error("Elif-condition is malformed.", nil)
}
} else {
if tagArgs.Count() > 0 {
// else/endif can't take any conditions
return nil, tagArgs.Error("Arguments not allowed here.", nil)
}
}
if wrapper.Endtag == "endif" {
break
}
}
return ifNode, nil
}
func init() {
RegisterTag("if", tagIfParser)
}

View file

@ -1,116 +0,0 @@
package pongo2
import (
"bytes"
)
type tagIfchangedNode struct {
watchedExpr []IEvaluator
lastValues []*Value
lastContent []byte
thenWrapper *NodeWrapper
elseWrapper *NodeWrapper
}
func (node *tagIfchangedNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
if len(node.watchedExpr) == 0 {
// Check against own rendered body
buf := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB
err := node.thenWrapper.Execute(ctx, buf)
if err != nil {
return err
}
bufBytes := buf.Bytes()
if !bytes.Equal(node.lastContent, bufBytes) {
// Rendered content changed, output it
writer.Write(bufBytes)
node.lastContent = bufBytes
}
} else {
nowValues := make([]*Value, 0, len(node.watchedExpr))
for _, expr := range node.watchedExpr {
val, err := expr.Evaluate(ctx)
if err != nil {
return err
}
nowValues = append(nowValues, val)
}
// Compare old to new values now
changed := len(node.lastValues) == 0
for idx, oldVal := range node.lastValues {
if !oldVal.EqualValueTo(nowValues[idx]) {
changed = true
break // we can stop here because ONE value changed
}
}
node.lastValues = nowValues
if changed {
// Render thenWrapper
err := node.thenWrapper.Execute(ctx, writer)
if err != nil {
return err
}
} else {
// Render elseWrapper
err := node.elseWrapper.Execute(ctx, writer)
if err != nil {
return err
}
}
}
return nil
}
func tagIfchangedParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
ifchangedNode := &tagIfchangedNode{}
for arguments.Remaining() > 0 {
// Parse condition
expr, err := arguments.ParseExpression()
if err != nil {
return nil, err
}
ifchangedNode.watchedExpr = append(ifchangedNode.watchedExpr, expr)
}
if arguments.Remaining() > 0 {
return nil, arguments.Error("Ifchanged-arguments are malformed.", nil)
}
// Wrap then/else-blocks
wrapper, endargs, err := doc.WrapUntilTag("else", "endifchanged")
if err != nil {
return nil, err
}
ifchangedNode.thenWrapper = wrapper
if endargs.Count() > 0 {
return nil, endargs.Error("Arguments not allowed here.", nil)
}
if wrapper.Endtag == "else" {
// if there's an else in the if-statement, we need the else-Block as well
wrapper, endargs, err = doc.WrapUntilTag("endifchanged")
if err != nil {
return nil, err
}
ifchangedNode.elseWrapper = wrapper
if endargs.Count() > 0 {
return nil, endargs.Error("Arguments not allowed here.", nil)
}
}
return ifchangedNode, nil
}
func init() {
RegisterTag("ifchanged", tagIfchangedParser)
}

View file

@ -1,78 +0,0 @@
package pongo2
type tagIfEqualNode struct {
var1, var2 IEvaluator
thenWrapper *NodeWrapper
elseWrapper *NodeWrapper
}
func (node *tagIfEqualNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
r1, err := node.var1.Evaluate(ctx)
if err != nil {
return err
}
r2, err := node.var2.Evaluate(ctx)
if err != nil {
return err
}
result := r1.EqualValueTo(r2)
if result {
return node.thenWrapper.Execute(ctx, writer)
}
if node.elseWrapper != nil {
return node.elseWrapper.Execute(ctx, writer)
}
return nil
}
func tagIfEqualParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
ifequalNode := &tagIfEqualNode{}
// Parse two expressions
var1, err := arguments.ParseExpression()
if err != nil {
return nil, err
}
var2, err := arguments.ParseExpression()
if err != nil {
return nil, err
}
ifequalNode.var1 = var1
ifequalNode.var2 = var2
if arguments.Remaining() > 0 {
return nil, arguments.Error("ifequal only takes 2 arguments.", nil)
}
// Wrap then/else-blocks
wrapper, endargs, err := doc.WrapUntilTag("else", "endifequal")
if err != nil {
return nil, err
}
ifequalNode.thenWrapper = wrapper
if endargs.Count() > 0 {
return nil, endargs.Error("Arguments not allowed here.", nil)
}
if wrapper.Endtag == "else" {
// if there's an else in the if-statement, we need the else-Block as well
wrapper, endargs, err = doc.WrapUntilTag("endifequal")
if err != nil {
return nil, err
}
ifequalNode.elseWrapper = wrapper
if endargs.Count() > 0 {
return nil, endargs.Error("Arguments not allowed here.", nil)
}
}
return ifequalNode, nil
}
func init() {
RegisterTag("ifequal", tagIfEqualParser)
}

View file

@ -1,78 +0,0 @@
package pongo2
type tagIfNotEqualNode struct {
var1, var2 IEvaluator
thenWrapper *NodeWrapper
elseWrapper *NodeWrapper
}
func (node *tagIfNotEqualNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
r1, err := node.var1.Evaluate(ctx)
if err != nil {
return err
}
r2, err := node.var2.Evaluate(ctx)
if err != nil {
return err
}
result := !r1.EqualValueTo(r2)
if result {
return node.thenWrapper.Execute(ctx, writer)
}
if node.elseWrapper != nil {
return node.elseWrapper.Execute(ctx, writer)
}
return nil
}
func tagIfNotEqualParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
ifnotequalNode := &tagIfNotEqualNode{}
// Parse two expressions
var1, err := arguments.ParseExpression()
if err != nil {
return nil, err
}
var2, err := arguments.ParseExpression()
if err != nil {
return nil, err
}
ifnotequalNode.var1 = var1
ifnotequalNode.var2 = var2
if arguments.Remaining() > 0 {
return nil, arguments.Error("ifequal only takes 2 arguments.", nil)
}
// Wrap then/else-blocks
wrapper, endargs, err := doc.WrapUntilTag("else", "endifnotequal")
if err != nil {
return nil, err
}
ifnotequalNode.thenWrapper = wrapper
if endargs.Count() > 0 {
return nil, endargs.Error("Arguments not allowed here.", nil)
}
if wrapper.Endtag == "else" {
// if there's an else in the if-statement, we need the else-Block as well
wrapper, endargs, err = doc.WrapUntilTag("endifnotequal")
if err != nil {
return nil, err
}
ifnotequalNode.elseWrapper = wrapper
if endargs.Count() > 0 {
return nil, endargs.Error("Arguments not allowed here.", nil)
}
}
return ifnotequalNode, nil
}
func init() {
RegisterTag("ifnotequal", tagIfNotEqualParser)
}

View file

@ -1,84 +0,0 @@
package pongo2
import (
"fmt"
)
type tagImportNode struct {
position *Token
filename string
macros map[string]*tagMacroNode // alias/name -> macro instance
}
func (node *tagImportNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
for name, macro := range node.macros {
func(name string, macro *tagMacroNode) {
ctx.Private[name] = func(args ...*Value) *Value {
return macro.call(ctx, args...)
}
}(name, macro)
}
return nil
}
func tagImportParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
importNode := &tagImportNode{
position: start,
macros: make(map[string]*tagMacroNode),
}
filenameToken := arguments.MatchType(TokenString)
if filenameToken == nil {
return nil, arguments.Error("Import-tag needs a filename as string.", nil)
}
importNode.filename = doc.template.set.resolveFilename(doc.template, filenameToken.Val)
if arguments.Remaining() == 0 {
return nil, arguments.Error("You must at least specify one macro to import.", nil)
}
// Compile the given template
tpl, err := doc.template.set.FromFile(importNode.filename)
if err != nil {
return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, start)
}
for arguments.Remaining() > 0 {
macroNameToken := arguments.MatchType(TokenIdentifier)
if macroNameToken == nil {
return nil, arguments.Error("Expected macro name (identifier).", nil)
}
asName := macroNameToken.Val
if arguments.Match(TokenKeyword, "as") != nil {
aliasToken := arguments.MatchType(TokenIdentifier)
if aliasToken == nil {
return nil, arguments.Error("Expected macro alias name (identifier).", nil)
}
asName = aliasToken.Val
}
macroInstance, has := tpl.exportedMacros[macroNameToken.Val]
if !has {
return nil, arguments.Error(fmt.Sprintf("Macro '%s' not found (or not exported) in '%s'.", macroNameToken.Val,
importNode.filename), macroNameToken)
}
importNode.macros[asName] = macroInstance
if arguments.Remaining() == 0 {
break
}
if arguments.Match(TokenSymbol, ",") == nil {
return nil, arguments.Error("Expected ','.", nil)
}
}
return importNode, nil
}
func init() {
RegisterTag("import", tagImportParser)
}

View file

@ -1,146 +0,0 @@
package pongo2
type tagIncludeNode struct {
tpl *Template
filenameEvaluator IEvaluator
lazy bool
only bool
filename string
withPairs map[string]IEvaluator
ifExists bool
}
func (node *tagIncludeNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
// Building the context for the template
includeCtx := make(Context)
// Fill the context with all data from the parent
if !node.only {
includeCtx.Update(ctx.Public)
includeCtx.Update(ctx.Private)
}
// Put all custom with-pairs into the context
for key, value := range node.withPairs {
val, err := value.Evaluate(ctx)
if err != nil {
return err
}
includeCtx[key] = val
}
// Execute the template
if node.lazy {
// Evaluate the filename
filename, err := node.filenameEvaluator.Evaluate(ctx)
if err != nil {
return err
}
if filename.String() == "" {
return ctx.Error("Filename for 'include'-tag evaluated to an empty string.", nil)
}
// Get include-filename
includedFilename := ctx.template.set.resolveFilename(ctx.template, filename.String())
includedTpl, err2 := ctx.template.set.FromFile(includedFilename)
if err2 != nil {
// if this is ReadFile error, and "if_exists" flag is enabled
if node.ifExists && err2.(*Error).Sender == "fromfile" {
return nil
}
return err2.(*Error)
}
err2 = includedTpl.ExecuteWriter(includeCtx, writer)
if err2 != nil {
return err2.(*Error)
}
return nil
}
// Template is already parsed with static filename
err := node.tpl.ExecuteWriter(includeCtx, writer)
if err != nil {
return err.(*Error)
}
return nil
}
type tagIncludeEmptyNode struct{}
func (node *tagIncludeEmptyNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
return nil
}
func tagIncludeParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
includeNode := &tagIncludeNode{
withPairs: make(map[string]IEvaluator),
}
if filenameToken := arguments.MatchType(TokenString); filenameToken != nil {
// prepared, static template
// "if_exists" flag
ifExists := arguments.Match(TokenIdentifier, "if_exists") != nil
// Get include-filename
includedFilename := doc.template.set.resolveFilename(doc.template, filenameToken.Val)
// Parse the parent
includeNode.filename = includedFilename
includedTpl, err := doc.template.set.FromFile(includedFilename)
if err != nil {
// if this is ReadFile error, and "if_exists" token presents we should create and empty node
if err.(*Error).Sender == "fromfile" && ifExists {
return &tagIncludeEmptyNode{}, nil
}
return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, filenameToken)
}
includeNode.tpl = includedTpl
} else {
// No String, then the user wants to use lazy-evaluation (slower, but possible)
filenameEvaluator, err := arguments.ParseExpression()
if err != nil {
return nil, err.updateFromTokenIfNeeded(doc.template, filenameToken)
}
includeNode.filenameEvaluator = filenameEvaluator
includeNode.lazy = true
includeNode.ifExists = arguments.Match(TokenIdentifier, "if_exists") != nil // "if_exists" flag
}
// After having parsed the filename we're gonna parse the with+only options
if arguments.Match(TokenIdentifier, "with") != nil {
for arguments.Remaining() > 0 {
// We have at least one key=expr pair (because of starting "with")
keyToken := arguments.MatchType(TokenIdentifier)
if keyToken == nil {
return nil, arguments.Error("Expected an identifier", nil)
}
if arguments.Match(TokenSymbol, "=") == nil {
return nil, arguments.Error("Expected '='.", nil)
}
valueExpr, err := arguments.ParseExpression()
if err != nil {
return nil, err.updateFromTokenIfNeeded(doc.template, keyToken)
}
includeNode.withPairs[keyToken.Val] = valueExpr
// Only?
if arguments.Match(TokenIdentifier, "only") != nil {
includeNode.only = true
break // stop parsing arguments because it's the last option
}
}
}
if arguments.Remaining() > 0 {
return nil, arguments.Error("Malformed 'include'-tag arguments.", nil)
}
return includeNode, nil
}
func init() {
RegisterTag("include", tagIncludeParser)
}

View file

@ -1,133 +0,0 @@
package pongo2
import (
"math/rand"
"strings"
"time"
"github.com/juju/errors"
)
var (
tagLoremParagraphs = strings.Split(tagLoremText, "\n")
tagLoremWords = strings.Fields(tagLoremText)
)
type tagLoremNode struct {
position *Token
count int // number of paragraphs
method string // w = words, p = HTML paragraphs, b = plain-text (default is b)
random bool // does not use the default paragraph "Lorem ipsum dolor sit amet, ..."
}
func (node *tagLoremNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
switch node.method {
case "b":
if node.random {
for i := 0; i < node.count; i++ {
if i > 0 {
writer.WriteString("\n")
}
par := tagLoremParagraphs[rand.Intn(len(tagLoremParagraphs))]
writer.WriteString(par)
}
} else {
for i := 0; i < node.count; i++ {
if i > 0 {
writer.WriteString("\n")
}
par := tagLoremParagraphs[i%len(tagLoremParagraphs)]
writer.WriteString(par)
}
}
case "w":
if node.random {
for i := 0; i < node.count; i++ {
if i > 0 {
writer.WriteString(" ")
}
word := tagLoremWords[rand.Intn(len(tagLoremWords))]
writer.WriteString(word)
}
} else {
for i := 0; i < node.count; i++ {
if i > 0 {
writer.WriteString(" ")
}
word := tagLoremWords[i%len(tagLoremWords)]
writer.WriteString(word)
}
}
case "p":
if node.random {
for i := 0; i < node.count; i++ {
if i > 0 {
writer.WriteString("\n")
}
writer.WriteString("<p>")
par := tagLoremParagraphs[rand.Intn(len(tagLoremParagraphs))]
writer.WriteString(par)
writer.WriteString("</p>")
}
} else {
for i := 0; i < node.count; i++ {
if i > 0 {
writer.WriteString("\n")
}
writer.WriteString("<p>")
par := tagLoremParagraphs[i%len(tagLoremParagraphs)]
writer.WriteString(par)
writer.WriteString("</p>")
}
}
default:
return ctx.OrigError(errors.Errorf("unsupported method: %s", node.method), nil)
}
return nil
}
func tagLoremParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
loremNode := &tagLoremNode{
position: start,
count: 1,
method: "b",
}
if countToken := arguments.MatchType(TokenNumber); countToken != nil {
loremNode.count = AsValue(countToken.Val).Integer()
}
if methodToken := arguments.MatchType(TokenIdentifier); methodToken != nil {
if methodToken.Val != "w" && methodToken.Val != "p" && methodToken.Val != "b" {
return nil, arguments.Error("lorem-method must be either 'w', 'p' or 'b'.", nil)
}
loremNode.method = methodToken.Val
}
if arguments.MatchOne(TokenIdentifier, "random") != nil {
loremNode.random = true
}
if arguments.Remaining() > 0 {
return nil, arguments.Error("Malformed lorem-tag arguments.", nil)
}
return loremNode, nil
}
func init() {
rand.Seed(time.Now().Unix())
RegisterTag("lorem", tagLoremParser)
}
const tagLoremText = `Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat.
Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat. Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi.
Nam liber tempor cum soluta nobis eleifend option congue nihil imperdiet doming id quod mazim placerat facer possim assum. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat.
Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis.
At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, At accusam aliquyam diam diam dolore dolores duo eirmod eos erat, et nonumy sed tempor et et invidunt justo labore Stet clita ea et gubergren, kasd magna no rebum. sanctus sea sed takimata ut vero voluptua. est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat.
Consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.`

View file

@ -1,149 +0,0 @@
package pongo2
import (
"bytes"
"fmt"
)
type tagMacroNode struct {
position *Token
name string
argsOrder []string
args map[string]IEvaluator
exported bool
wrapper *NodeWrapper
}
func (node *tagMacroNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
ctx.Private[node.name] = func(args ...*Value) *Value {
return node.call(ctx, args...)
}
return nil
}
func (node *tagMacroNode) call(ctx *ExecutionContext, args ...*Value) *Value {
argsCtx := make(Context)
for k, v := range node.args {
if v == nil {
// User did not provided a default value
argsCtx[k] = nil
} else {
// Evaluate the default value
valueExpr, err := v.Evaluate(ctx)
if err != nil {
ctx.Logf(err.Error())
return AsSafeValue(err.Error())
}
argsCtx[k] = valueExpr
}
}
if len(args) > len(node.argsOrder) {
// Too many arguments, we're ignoring them and just logging into debug mode.
err := ctx.Error(fmt.Sprintf("Macro '%s' called with too many arguments (%d instead of %d).",
node.name, len(args), len(node.argsOrder)), nil).updateFromTokenIfNeeded(ctx.template, node.position)
ctx.Logf(err.Error()) // TODO: This is a workaround, because the error is not returned yet to the Execution()-methods
return AsSafeValue(err.Error())
}
// Make a context for the macro execution
macroCtx := NewChildExecutionContext(ctx)
// Register all arguments in the private context
macroCtx.Private.Update(argsCtx)
for idx, argValue := range args {
macroCtx.Private[node.argsOrder[idx]] = argValue.Interface()
}
var b bytes.Buffer
err := node.wrapper.Execute(macroCtx, &b)
if err != nil {
return AsSafeValue(err.updateFromTokenIfNeeded(ctx.template, node.position).Error())
}
return AsSafeValue(b.String())
}
func tagMacroParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
macroNode := &tagMacroNode{
position: start,
args: make(map[string]IEvaluator),
}
nameToken := arguments.MatchType(TokenIdentifier)
if nameToken == nil {
return nil, arguments.Error("Macro-tag needs at least an identifier as name.", nil)
}
macroNode.name = nameToken.Val
if arguments.MatchOne(TokenSymbol, "(") == nil {
return nil, arguments.Error("Expected '('.", nil)
}
for arguments.Match(TokenSymbol, ")") == nil {
argNameToken := arguments.MatchType(TokenIdentifier)
if argNameToken == nil {
return nil, arguments.Error("Expected argument name as identifier.", nil)
}
macroNode.argsOrder = append(macroNode.argsOrder, argNameToken.Val)
if arguments.Match(TokenSymbol, "=") != nil {
// Default expression follows
argDefaultExpr, err := arguments.ParseExpression()
if err != nil {
return nil, err
}
macroNode.args[argNameToken.Val] = argDefaultExpr
} else {
// No default expression
macroNode.args[argNameToken.Val] = nil
}
if arguments.Match(TokenSymbol, ")") != nil {
break
}
if arguments.Match(TokenSymbol, ",") == nil {
return nil, arguments.Error("Expected ',' or ')'.", nil)
}
}
if arguments.Match(TokenKeyword, "export") != nil {
macroNode.exported = true
}
if arguments.Remaining() > 0 {
return nil, arguments.Error("Malformed macro-tag.", nil)
}
// Body wrapping
wrapper, endargs, err := doc.WrapUntilTag("endmacro")
if err != nil {
return nil, err
}
macroNode.wrapper = wrapper
if endargs.Count() > 0 {
return nil, endargs.Error("Arguments not allowed here.", nil)
}
if macroNode.exported {
// Now register the macro if it wants to be exported
_, has := doc.template.exportedMacros[macroNode.name]
if has {
return nil, doc.Error(fmt.Sprintf("another macro with name '%s' already exported", macroNode.name), start)
}
doc.template.exportedMacros[macroNode.name] = macroNode
}
return macroNode, nil
}
func init() {
RegisterTag("macro", tagMacroParser)
}

View file

@ -1,50 +0,0 @@
package pongo2
import (
"time"
)
type tagNowNode struct {
position *Token
format string
fake bool
}
func (node *tagNowNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
var t time.Time
if node.fake {
t = time.Date(2014, time.February, 05, 18, 31, 45, 00, time.UTC)
} else {
t = time.Now()
}
writer.WriteString(t.Format(node.format))
return nil
}
func tagNowParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
nowNode := &tagNowNode{
position: start,
}
formatToken := arguments.MatchType(TokenString)
if formatToken == nil {
return nil, arguments.Error("Expected a format string.", nil)
}
nowNode.format = formatToken.Val
if arguments.MatchOne(TokenIdentifier, "fake") != nil {
nowNode.fake = true
}
if arguments.Remaining() > 0 {
return nil, arguments.Error("Malformed now-tag arguments.", nil)
}
return nowNode, nil
}
func init() {
RegisterTag("now", tagNowParser)
}

View file

@ -1,50 +0,0 @@
package pongo2
type tagSetNode struct {
name string
expression IEvaluator
}
func (node *tagSetNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
// Evaluate expression
value, err := node.expression.Evaluate(ctx)
if err != nil {
return err
}
ctx.Private[node.name] = value
return nil
}
func tagSetParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
node := &tagSetNode{}
// Parse variable name
typeToken := arguments.MatchType(TokenIdentifier)
if typeToken == nil {
return nil, arguments.Error("Expected an identifier.", nil)
}
node.name = typeToken.Val
if arguments.Match(TokenSymbol, "=") == nil {
return nil, arguments.Error("Expected '='.", nil)
}
// Variable expression
keyExpression, err := arguments.ParseExpression()
if err != nil {
return nil, err
}
node.expression = keyExpression
// Remaining arguments
if arguments.Remaining() > 0 {
return nil, arguments.Error("Malformed 'set'-tag arguments.", nil)
}
return node, nil
}
func init() {
RegisterTag("set", tagSetParser)
}

Some files were not shown because too many files have changed in this diff Show more