1
0
Fork 0
mirror of https://github.com/Luzifer/staticmap.git synced 2024-10-18 07:34:23 +00:00

Update dependencies

Signed-off-by: Knut Ahlers <knut@ahlers.me>
This commit is contained in:
Knut Ahlers 2018-04-03 21:14:16 +02:00
parent 3c73d0a08c
commit 1abd72dfdc
Signed by: luzifer
GPG key ID: DC2729FDD34BE99E
411 changed files with 85657 additions and 37863 deletions

155
Godeps/Godeps.json generated
View file

@ -1,155 +0,0 @@
{
"ImportPath": "github.com/Luzifer/staticmap",
"GoVersion": "go1.8",
"GodepVersion": "v79",
"Deps": [
{
"ImportPath": "github.com/Luzifer/go_helpers/accessLogger",
"Comment": "v2.2.0",
"Rev": "e31c3a2659d3f4901f696692cfe98bd0eb5168f9"
},
{
"ImportPath": "github.com/Luzifer/go_helpers/http",
"Comment": "v2.2.0",
"Rev": "e31c3a2659d3f4901f696692cfe98bd0eb5168f9"
},
{
"ImportPath": "github.com/Luzifer/rconfig",
"Comment": "v1.2.0",
"Rev": "7aef1d393c1e2d0758901853b59981c7adc67c7e"
},
{
"ImportPath": "github.com/Sirupsen/logrus",
"Comment": "v0.10.0-38-g3ec0642",
"Rev": "3ec0642a7fb6488f65b06f9040adc67e3990296a"
},
{
"ImportPath": "github.com/Wessie/appdirs",
"Rev": "6573e894f8e294cbae0c4e45c25ff9f2e2918a4e"
},
{
"ImportPath": "github.com/didip/tollbooth",
"Rev": "e11ced12e0e0bbfedbc1783fcf0a5ec7f9dc4856"
},
{
"ImportPath": "github.com/didip/tollbooth/config",
"Rev": "e11ced12e0e0bbfedbc1783fcf0a5ec7f9dc4856"
},
{
"ImportPath": "github.com/didip/tollbooth/errors",
"Rev": "e11ced12e0e0bbfedbc1783fcf0a5ec7f9dc4856"
},
{
"ImportPath": "github.com/didip/tollbooth/libstring",
"Rev": "e11ced12e0e0bbfedbc1783fcf0a5ec7f9dc4856"
},
{
"ImportPath": "github.com/flopp/go-coordsparser",
"Rev": "845bca739e263e1cd38de25024a47b4d6acbfc1f"
},
{
"ImportPath": "github.com/flopp/go-staticmaps",
"Rev": "e8779c98399f6efad291d6504990daceeb9940a9"
},
{
"ImportPath": "github.com/fogleman/gg",
"Comment": "v1.0.0-10-gee8994f",
"Rev": "ee8994ff90057955c428a5a949da5d064bf3ce6b"
},
{
"ImportPath": "github.com/golang/freetype/raster",
"Comment": "release-131-g38b4c39",
"Rev": "38b4c392adc5eed94207994c4848fff99f4ac234"
},
{
"ImportPath": "github.com/golang/freetype/truetype",
"Comment": "release-131-g38b4c39",
"Rev": "38b4c392adc5eed94207994c4848fff99f4ac234"
},
{
"ImportPath": "github.com/golang/geo/r1",
"Rev": "f65fe014169924880aa2c95d7707c2da435534b9"
},
{
"ImportPath": "github.com/golang/geo/r2",
"Rev": "f65fe014169924880aa2c95d7707c2da435534b9"
},
{
"ImportPath": "github.com/golang/geo/r3",
"Rev": "f65fe014169924880aa2c95d7707c2da435534b9"
},
{
"ImportPath": "github.com/golang/geo/s1",
"Rev": "f65fe014169924880aa2c95d7707c2da435534b9"
},
{
"ImportPath": "github.com/golang/geo/s2",
"Rev": "f65fe014169924880aa2c95d7707c2da435534b9"
},
{
"ImportPath": "github.com/gorilla/context",
"Rev": "1c83b3eabd45b6d76072b66b746c20815fb2872d"
},
{
"ImportPath": "github.com/gorilla/mux",
"Rev": "49c024275504f0341e5a9971eb7ba7fa3dc7af40"
},
{
"ImportPath": "github.com/lucasb-eyer/go-colorful",
"Rev": "c900de9dbbc73129068f5af6a823068fc5f2308c"
},
{
"ImportPath": "github.com/patrickmn/go-cache",
"Comment": "v2.0.0-9-g7ac1518",
"Rev": "7ac151875ffb48b9f3ccce9ea20f020b0c1596c8"
},
{
"ImportPath": "github.com/spf13/pflag",
"Rev": "c7e63cf4530bcd3ba943729cee0efeff2ebea63f"
},
{
"ImportPath": "github.com/tkrajina/gpxgo/gpx",
"Rev": "7848cf26f5a58b4a4e23b89a4b67cfc3d52dd042"
},
{
"ImportPath": "golang.org/x/image/draw",
"Rev": "97680175a5267bb8b31f1923e7a66df98013b11a"
},
{
"ImportPath": "golang.org/x/image/font",
"Rev": "97680175a5267bb8b31f1923e7a66df98013b11a"
},
{
"ImportPath": "golang.org/x/image/font/basicfont",
"Rev": "97680175a5267bb8b31f1923e7a66df98013b11a"
},
{
"ImportPath": "golang.org/x/image/math/f64",
"Rev": "97680175a5267bb8b31f1923e7a66df98013b11a"
},
{
"ImportPath": "golang.org/x/image/math/fixed",
"Rev": "97680175a5267bb8b31f1923e7a66df98013b11a"
},
{
"ImportPath": "golang.org/x/net/context",
"Rev": "dfe83d419c9403b40b19d08cdba2afec27b002f7"
},
{
"ImportPath": "golang.org/x/sys/unix",
"Rev": "8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9"
},
{
"ImportPath": "golang.org/x/time/rate",
"Rev": "8be79e1e0910c292df4e79c241bb7e8f7e725959"
},
{
"ImportPath": "gopkg.in/validator.v2",
"Rev": "07ffaad256c8e957050ad83d6472eb97d785013d"
},
{
"ImportPath": "gopkg.in/yaml.v2",
"Rev": "31c299268d302dd0aa9a0dcf765a3d58971ac83f"
}
]
}

5
Godeps/Readme generated
View file

@ -1,5 +0,0 @@
This directory tree is generated automatically by godep.
Please do not edit.
See https://github.com/tools/godep for more information.

81
Gopkg.lock generated
View file

@ -1,14 +1,21 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
branch = "master"
name = "github.com/Luzifer/go-staticmaps"
packages = ["."]
revision = "5f69ec7945b506b1d33edd717e84e479818e2365"
[[projects]]
name = "github.com/Luzifer/go_helpers"
packages = [
"accessLogger",
"http"
"http",
"str"
]
revision = "e31c3a2659d3f4901f696692cfe98bd0eb5168f9"
version = "v2.2.0"
revision = "8fdddb7041fe962e750caa553a0714f94e261c4a"
version = "v2.3.1"
[[projects]]
name = "github.com/Luzifer/rconfig"
@ -19,7 +26,8 @@
[[projects]]
name = "github.com/Sirupsen/logrus"
packages = ["."]
revision = "3ec0642a7fb6488f65b06f9040adc67e3990296a"
revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc"
version = "v1.0.5"
[[projects]]
branch = "master"
@ -31,11 +39,12 @@
name = "github.com/didip/tollbooth"
packages = [
".",
"config",
"errors",
"libstring"
"libstring",
"limiter"
]
revision = "e11ced12e0e0bbfedbc1783fcf0a5ec7f9dc4856"
revision = "c95eaa3ddc98f635a91e218b48727fb2e06613ea"
version = "v4.0.0"
[[projects]]
branch = "master"
@ -43,25 +52,23 @@
packages = ["."]
revision = "845bca739e263e1cd38de25024a47b4d6acbfc1f"
[[projects]]
name = "github.com/flopp/go-staticmaps"
packages = ["."]
revision = "e8779c98399f6efad291d6504990daceeb9940a9"
[[projects]]
name = "github.com/fogleman/gg"
packages = ["."]
revision = "ee8994ff90057955c428a5a949da5d064bf3ce6b"
revision = "6166aa3c1afaee416f384645a81636267aee6d25"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/golang/freetype"
packages = [
"raster",
"truetype"
]
revision = "38b4c392adc5eed94207994c4848fff99f4ac234"
revision = "e2365dfdc4a05e4b8299a783240d4a7d5a65d4e4"
[[projects]]
branch = "master"
name = "github.com/golang/geo"
packages = [
"r1",
@ -70,22 +77,25 @@
"s1",
"s2"
]
revision = "f65fe014169924880aa2c95d7707c2da435534b9"
revision = "fb250ae94fbe10f86b4f1a9b70a19925da3410b9"
[[projects]]
name = "github.com/gorilla/context"
packages = ["."]
revision = "1c83b3eabd45b6d76072b66b746c20815fb2872d"
revision = "1ea25387ff6f684839d82767c1733ff4d4d15d0a"
version = "v1.1"
[[projects]]
name = "github.com/gorilla/mux"
packages = ["."]
revision = "49c024275504f0341e5a9971eb7ba7fa3dc7af40"
revision = "53c1911da2b537f792e7cafcb446b05ffe33b996"
version = "v1.6.1"
[[projects]]
branch = "master"
name = "github.com/lucasb-eyer/go-colorful"
packages = ["."]
revision = "c900de9dbbc73129068f5af6a823068fc5f2308c"
revision = "231272389856c976b7500c4fffcc52ddf06ff4eb"
[[projects]]
name = "github.com/patrickmn/go-cache"
@ -95,7 +105,8 @@
[[projects]]
name = "github.com/spf13/pflag"
packages = ["."]
revision = "c7e63cf4530bcd3ba943729cee0efeff2ebea63f"
revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
version = "v1.0.0"
[[projects]]
branch = "master"
@ -104,45 +115,57 @@
revision = "7848cf26f5a58b4a4e23b89a4b67cfc3d52dd042"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = ["ssh/terminal"]
revision = "b2aa35443fbc700ab74c586ae79b81c171851023"
[[projects]]
branch = "master"
name = "golang.org/x/image"
packages = [
"draw",
"font",
"font/basicfont",
"font/plan9font",
"math/f64",
"math/fixed"
]
revision = "97680175a5267bb8b31f1923e7a66df98013b11a"
revision = "f315e440302883054d0c2bd85486878cb4f8572c"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = ["context"]
revision = "dfe83d419c9403b40b19d08cdba2afec27b002f7"
revision = "b68f30494add4df6bd8ef5e82803f308e7f7c59c"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = ["unix"]
revision = "8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9"
packages = [
"unix",
"windows"
]
revision = "378d26f46672a356c46195c28f61bdb4c0a781dd"
[[projects]]
name = "golang.org/x/time"
packages = ["rate"]
revision = "8be79e1e0910c292df4e79c241bb7e8f7e725959"
revision = "711ca1cb87636abec28122ef3bc6a77269d433f3"
[[projects]]
branch = "v2"
name = "gopkg.in/validator.v2"
packages = ["."]
revision = "07ffaad256c8e957050ad83d6472eb97d785013d"
revision = "59c90c7046f643cbe0d4e7c8776c42a84ce75910"
[[projects]]
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "31c299268d302dd0aa9a0dcf765a3d58971ac83f"
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.2.1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "3d56f50ffce992752c5b104c169174827eb4cb480ed07f718d3ce4f84bd1244e"
inputs-digest = "7c8497a8311887c02aced01ebdf91a73dc82b48920f8f9ebd5ad643204043ce2"
solver-name = "gps-cdcl"
solver-version = 1

View file

@ -25,14 +25,42 @@
# unused-packages = true
[[constraint]]
branch = "master"
name = "github.com/Luzifer/go-staticmaps"
[[constraint]]
name = "github.com/Luzifer/go_helpers"
version = "2.2.0"
version = "2.3.1"
[[constraint]]
name = "github.com/Luzifer/rconfig"
version = "1.2.0"
[[constraint]]
name = "github.com/Sirupsen/logrus"
version = "1.0.5"
[[constraint]]
name = "github.com/didip/tollbooth"
version = "4.0.0"
[[constraint]]
name = "github.com/fogleman/gg"
version = "1.0.0"
[[constraint]]
branch = "master"
name = "github.com/golang/geo"
[[constraint]]
name = "github.com/gorilla/mux"
version = "1.6.1"
[[constraint]]
branch = "master"
name = "github.com/lucasb-eyer/go-colorful"
[prune]
go-tests = true
unused-packages = true

View file

@ -13,6 +13,7 @@ import (
"github.com/Luzifer/rconfig"
log "github.com/Sirupsen/logrus"
"github.com/didip/tollbooth"
"github.com/didip/tollbooth/limiter"
"github.com/golang/geo/s2"
"github.com/gorilla/mux"
colorful "github.com/lucasb-eyer/go-colorful"
@ -24,7 +25,7 @@ var (
ForceCache time.Duration `flag:"force-cache" default:"24h" env:"FORCE_CACHE" description:"Force map to be cached for this duration"`
Listen string `flag:"listen" default:":3000" description:"IP/Port to listen on"`
MaxSize string `flag:"max-size" default:"1024x1024" env:"MAX_SIZE" description:"Maximum map size requestable"`
RateLimit int64 `flag:"rate-limit" default:"1" env:"RATE_LIMIT" description:"How many requests to allow per time"`
RateLimit float64 `flag:"rate-limit" default:"1" env:"RATE_LIMIT" description:"How many requests to allow per time"`
RateLimitTime time.Duration `flag:"rate-limit-time" default:"1s" env:"RATE_LIMIT_TIME" description:"Time interval to allow N requests in"`
VersionAndExit bool `flag:"version" default:"false" description:"Print version information and exit"`
}
@ -51,8 +52,10 @@ func init() {
}
func main() {
rateLimit := tollbooth.NewLimiter(cfg.RateLimit, cfg.RateLimitTime)
rateLimit.IPLookups = []string{"X-Forwarded-For", "RemoteAddr", "X-Real-IP"}
rateLimit := tollbooth.NewLimiter(cfg.RateLimit, &limiter.ExpirableOptions{
DefaultExpirationTTL: cfg.RateLimitTime,
})
rateLimit.SetIPLookups([]string{"X-Forwarded-For", "RemoteAddr", "X-Real-IP"})
r := mux.NewRouter()
r.HandleFunc("/status", func(res http.ResponseWriter, r *http.Request) { http.Error(res, "I'm fine", http.StatusOK) })

6
map.go
View file

@ -6,7 +6,7 @@ import (
"image/color"
"io"
staticMap "github.com/flopp/go-staticmaps"
staticMap "github.com/Luzifer/go-staticmaps"
"github.com/fogleman/gg"
"github.com/golang/geo/s2"
)
@ -45,6 +45,8 @@ func (m marker) String() string {
func generateMap(center s2.LatLng, zoom int, marker []marker, x, y int) (io.Reader, error) {
ctx := staticMap.NewContext()
ctx.SetUserAgent(fmt.Sprintf("Mozilla/5.0+(compatible; staticmap/%s; https://github.com/Luzifer/staticmap)", version))
ctx.SetSize(x, y)
ctx.SetCenter(center)
ctx.SetZoom(zoom)
@ -55,8 +57,6 @@ func generateMap(center s2.LatLng, zoom int, marker []marker, x, y int) (io.Read
}
}
staticMap.TileFetcherUserAgent = fmt.Sprintf("Mozilla/5.0+(compatible; staticmap/%s; https://github.com/Luzifer/staticmap)", version)
img, err := ctx.Render()
if err != nil {
return nil, err

36
vendor/github.com/Luzifer/go-staticmaps/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,36 @@
language: go
go:
- 1.9
- master
install:
- go get -t ./...
matrix:
allow_failures:
- go: master
# Don't wait for tip tests to finish. Mark the test run green if the
# tests pass on the stable versions of Go.
fast_finish: true
notifications:
email: false
# Anything in before_script that returns a nonzero exit code will
# flunk the build and immediately stop. It's sorta like having
# set -e enabled in bash.
before_script:
- GO_FILES=$(find . -iname '*.go' -type f) # All the .go files
- go get github.com/golang/lint/golint # Linter
- go get honnef.co/go/tools/cmd/megacheck # Badass static analyzer/linter
- go get github.com/fzipp/gocyclo
# script always run to completion (set +e). All of these code checks are must haves
# in a modern Go project.
script:
- test -z $(gofmt -s -l $GO_FILES) # Fail if a .go file hasn't been formatted with gofmt
- go vet ./... # go vet is the official Go static analyzer
- megacheck ./... # "go vet on steroids" + linter
- gocyclo -over 19 $GO_FILES # forbid code with huge functions
- golint -set_exit_status $(go list ./...) # one last linter

View file

@ -1,5 +1,6 @@
[![GoDoc](https://godoc.org/github.com/flopp/go-staticmaps?status.svg)](https://godoc.org/github.com/flopp/go-staticmaps)
[![Go Report Card](https://goreportcard.com/badge/github.com/flopp/go-staticmaps)](https://goreportcard.com/report/flopp/go-staticmaps)
[![Build Status](https://travis-ci.org/flopp/go-staticmaps.svg?branch=master)](https://travis-ci.org/flopp/go-staticmaps)
[![License MIT](https://img.shields.io/badge/license-MIT-lightgrey.svg?style=flat)](https://github.com/flopp/go-staticmaps/)
# go-staticmaps
@ -80,9 +81,13 @@ See [GoDoc](https://godoc.org/github.com/flopp/go-staticmaps) for a complete doc
Set the bounding box (NW_LATLNG = north-western point of the
bounding box, SW_LATLNG = southe-western point of the bounding
box)
--background=COLOR Background color (default: transparent)
-u, --useragent=USERAGENT
Overwrite the default HTTP user agent string
-m, --marker=MARKER Add a marker to the static map
-p, --path=PATH Add a path to the static map
-a, --area=AREA Add an area to the static map
-C, --circle=CIRCLE Add a circle to the static map
Help Options:
-h, --help Show this help message
@ -91,6 +96,8 @@ See [GoDoc](https://godoc.org/github.com/flopp/go-staticmaps) for a complete doc
The command line interface tries to resemble [Google's Static Maps API](https://developers.google.com/maps/documentation/static-maps/intro).
If neither `--bbox`, `--center`, nor `--zoom` are given, the map extent is determined from the specified markers, paths and areas.
`--background` lets you specify a color used for map areas that are not covered by map tiles (areas north of 85°/south of -85°).
### Markers
The `--marker` option defines one or more map markers of the same style. Use multiple `--marker` options to add markers of different styles.
@ -132,6 +139,21 @@ The `--area` option defines a closed area on the map. Use multiple `--area` opti
- `fill:COLOR` - where `COLOR` is either of the form `0xRRGGBB`, `0xRRGGBBAA`, or one of `black`, `blue`, `brown`, `green`, `orange`, `purple`, `red`, `yellow`, `white` (default: none)
### Circles
The `--circles` option defines one or more circles of the same style. Use multiple `--circle` options to add circles of different styles.
--circle CIRCLE_STYLES|LATLNG|LATLNG|...
`LATLNG` is a comma separated pair of latitude and longitude, e.g. `52.5153,13.3564`.
`CIRCLE_STYLES` consists of a set of style descriptors separated by the pipe character `|`:
- `color:COLOR` - where `COLOR` is either of the form `0xRRGGBB`, `0xRRGGBBAA`, or one of `black`, `blue`, `brown`, `green`, `orange`, `purple`, `red`, `yellow`, `white` (default: `red`)
- `fill:COLOR` - where `COLOR` is either of the form `0xRRGGBB`, `0xRRGGBBAA`, or one of `black`, `blue`, `brown`, `green`, `orange`, `purple`, `red`, `yellow`, `white` (default: no fill color)
- `radius:RADIUS` - where `RADIUS` is te circle radius in meters (default: `100.0`)
- `weight:WEIGHT` - where `WEIGHT` is the line width in pixels (defaut: `5`)
## Examples
### Basic Maps
@ -217,7 +239,9 @@ Besides the go standard library, go-staticmaps uses
- [bcicen](https://github.com/bcicen): reported and fixed *syntax error in examples*
- [pshevtsov](https://github.com/pshevtsov): fixed *drawing of empty attribution strings*
- [Luzifer](https://github.com/Luzifer): added *overwritable user agent strings* to comply with the OSM tile usage policy
- [Jason Fox](https://github.com/jasonpfox): added `RenderWithBounds` function
- [Alexander A. Kapralov](https://github.com/alnkapa): initial *circles* implementation
- [tsukumaru](https://github.com/tsukumaru): added `NewArea` and `NewPath` functions
## License
Copyright 2016, 2017 Florian Pigorsch & Contributors. All rights reserved.

View file

@ -24,6 +24,17 @@ type Area struct {
Weight float64
}
// NewArea creates a new Area
func NewArea(positions []s2.LatLng, col color.Color, fill color.Color, weight float64) *Area {
a := new(Area)
a.Positions = positions
a.Color = col
a.Fill = fill
a.Weight = weight
return a
}
// ParseAreaString parses a string and returns an area
func ParseAreaString(s string) (*Area, error) {
area := new(Area)
@ -57,7 +68,6 @@ func ParseAreaString(s string) (*Area, error) {
}
area.Positions = append(area.Positions, s2.LatLngFromDegrees(lat, lng))
}
}
return area, nil
}

128
vendor/github.com/Luzifer/go-staticmaps/circle.go generated vendored Normal file
View file

@ -0,0 +1,128 @@
package sm
import (
"image/color"
"log"
"math"
"strings"
"strconv"
"github.com/flopp/go-coordsparser"
"github.com/fogleman/gg"
"github.com/golang/geo/s1"
"github.com/golang/geo/s2"
)
// Circle represents a circle on the map
type Circle struct {
MapObject
Position s2.LatLng
Color color.Color
Fill color.Color
Weight float64
Radius float64 // in m.
}
// NewCircle creates a new circle
func NewCircle(pos s2.LatLng, col, fill color.Color, radius, weight float64) *Circle {
return &Circle{
Position: pos,
Color: col,
Fill: fill,
Weight: weight,
Radius: radius,
}
}
// ParseCircleString parses a string and returns an array of circles
func ParseCircleString(s string) (circles []*Circle, err error) {
circles = make([]*Circle, 0, 0)
var col color.Color = color.RGBA{0xff, 0, 0, 0xff}
var fill color.Color = color.Transparent
radius := 100.0
weight := 5.0
for _, ss := range strings.Split(s, "|") {
if ok, suffix := hasPrefix(ss, "color:"); ok {
col, err = ParseColorString(suffix)
if err != nil {
return nil, err
}
} else if ok, suffix := hasPrefix(ss, "fill:"); ok {
fill, err = ParseColorString(suffix)
if err != nil {
return nil, err
}
} else if ok, suffix := hasPrefix(ss, "radius:"); ok {
if radius, err = strconv.ParseFloat(suffix, 64); err != nil {
return nil, err
}
} else if ok, suffix := hasPrefix(ss, "weight:"); ok {
if weight, err = strconv.ParseFloat(suffix, 64); err != nil {
return nil, err
}
} else {
lat, lng, err := coordsparser.Parse(ss)
if err != nil {
return nil, err
}
c := NewCircle(s2.LatLngFromDegrees(lat, lng), col, fill, radius, weight)
circles = append(circles, c)
}
}
return circles, nil
}
func (m *Circle) getLatLng(plus bool) s2.LatLng {
const (
R = 6371000.0
)
th := m.Radius / R
br := 0 / float64(s1.Degree)
if !plus {
th *= -1
}
lat := m.Position.Lat.Radians()
lat1 := math.Asin(math.Sin(lat)*math.Cos(th) + math.Cos(lat)*math.Sin(th)*math.Cos(br))
lng1 := m.Position.Lng.Radians() +
math.Atan2(math.Sin(br)*math.Sin(th)*math.Cos(lat),
math.Cos(th)-math.Sin(lat)*math.Sin(lat1))
return s2.LatLng{
Lat: s1.Angle(lat1),
Lng: s1.Angle(lng1),
}
}
func (m *Circle) extraMarginPixels() float64 {
return 0.5 * m.Weight
}
func (m *Circle) bounds() s2.Rect {
r := s2.EmptyRect()
r = r.AddPoint(m.getLatLng(false))
r = r.AddPoint(m.getLatLng(true))
return r
}
func (m *Circle) draw(gc *gg.Context, trans *transformer) {
if !CanDisplay(m.Position) {
log.Printf("Circle coordinates not displayable: %f/%f", m.Position.Lat.Degrees(), m.Position.Lng.Degrees())
return
}
ll := m.getLatLng(true)
x, y := trans.ll2p(m.Position)
x1, y1 := trans.ll2p(ll)
radius := math.Sqrt(math.Pow(x1-x, 2) + math.Pow(y1-y, 2))
gc.ClearPath()
gc.SetLineWidth(m.Weight)
gc.SetLineCap(gg.LineCapRound)
gc.SetLineJoin(gg.LineJoinRound)
gc.DrawCircle(x, y, radius)
gc.SetColor(m.Fill)
gc.FillPreserve()
gc.SetColor(m.Color)
gc.Stroke()
}

View file

@ -54,6 +54,8 @@ func ParseColorString(s string) (color.Color, error) {
return color.RGBA{0xff, 0xff, 0x00, 0xff}, nil
case "white":
return color.RGBA{0xff, 0xff, 0xff, 0xff}, nil
case "transparent":
return color.RGBA{0x00, 0x00, 0x00, 0x00}, nil
}
return color.Transparent, fmt.Errorf("Cannot parse color string: %s", s)
}

View file

@ -9,6 +9,7 @@ package sm
import (
"errors"
"image"
"image/color"
"image/draw"
"log"
"math"
@ -31,10 +32,15 @@ type Context struct {
hasBoundingBox bool
boundingBox s2.Rect
markers []*Marker
paths []*Path
areas []*Area
background color.Color
markers []*Marker
paths []*Path
areas []*Area
circles []*Circle
overlays []*TileProvider
userAgent string
tileProvider *TileProvider
}
@ -46,6 +52,8 @@ func NewContext() *Context {
t.hasZoom = false
t.hasCenter = false
t.hasBoundingBox = false
t.background = nil
t.userAgent = ""
t.tileProvider = NewTileProviderOpenStreetMaps()
return t
}
@ -55,6 +63,11 @@ func (m *Context) SetTileProvider(t *TileProvider) {
m.tileProvider = t
}
// SetUserAgent sets the HTTP user agent string used when downloading map tiles
func (m *Context) SetUserAgent(a string) {
m.userAgent = a
}
// SetSize sets the size of the generated image
func (m *Context) SetSize(width, height int) {
m.width = width
@ -79,6 +92,11 @@ func (m *Context) SetBoundingBox(bbox s2.Rect) {
m.hasBoundingBox = true
}
// SetBackground sets the background color (used as a fallback for areas without map tiles)
func (m *Context) SetBackground(col color.Color) {
m.background = col
}
// AddMarker adds a marker to the Context
func (m *Context) AddMarker(marker *Marker) {
m.markers = append(m.markers, marker)
@ -109,6 +127,26 @@ func (m *Context) ClearAreas() {
m.areas = nil
}
// AddCircle adds an circle to the Context
func (m *Context) AddCircle(circle *Circle) {
m.circles = append(m.circles, circle)
}
// ClearCircles removes all circles from the Context
func (m *Context) ClearCircles() {
m.circles = nil
}
// AddOverlay adds an overlay to the Context
func (m *Context) AddOverlay(overlay *TileProvider) {
m.overlays = append(m.overlays, overlay)
}
// ClearOverlays removes all overlays from the Context
func (m *Context) ClearOverlays() {
m.overlays = nil
}
func (m *Context) determineBounds() s2.Rect {
r := s2.EmptyRect()
for _, marker := range m.markers {
@ -120,6 +158,9 @@ func (m *Context) determineBounds() s2.Rect {
for _, area := range m.areas {
r = r.Union(area.bounds())
}
for _, circle := range m.circles {
r = r.Union(circle.bounds())
}
return r
}
@ -140,6 +181,11 @@ func (m *Context) determineExtraMarginPixels() float64 {
p = pp
}
}
for _, circle := range m.circles {
if pp := circle.extraMarginPixels(); pp > p {
p = pp
}
}
return p
}
@ -202,55 +248,91 @@ func (m *Context) determineZoomCenter() (int, s2.LatLng, error) {
type transformer struct {
zoom int
tileSize int
pWidth, pHeight int
pCenterX, pCenterY int
tCountX, tCountY int
tCenterX, tCenterY float64
tOriginX, tOriginY int
numTiles float64 // number of tiles per dimension at this zoom level
tileSize int // tile size in pixels from this provider
pWidth, pHeight int // pixel size of returned set of tiles
pCenterX, pCenterY int // pixel location of requested center in set of tiles
tCountX, tCountY int // download area in tile units
tCenterX, tCenterY float64 // tile index to requested center
tOriginX, tOriginY int // bottom left tile to download
pMinX, pMaxX int
}
func newTransformer(width int, height int, zoom int, llCenter s2.LatLng, tileSize int) *transformer {
t := new(transformer)
t.zoom = zoom
t.numTiles = math.Exp2(float64(t.zoom))
t.tileSize = tileSize
// fractional tile index to center of requested area
t.tCenterX, t.tCenterY = t.ll2t(llCenter)
ww := float64(width) / float64(tileSize)
hh := float64(height) / float64(tileSize)
// origin tile to fulfill request
t.tOriginX = int(math.Floor(t.tCenterX - 0.5*ww))
t.tOriginY = int(math.Floor(t.tCenterY - 0.5*hh))
// tiles in each axis to fulfill request
t.tCountX = 1 + int(math.Floor(t.tCenterX+0.5*ww)) - t.tOriginX
t.tCountY = 1 + int(math.Floor(t.tCenterY+0.5*hh)) - t.tOriginY
// final pixel dimensions of area returned
t.pWidth = t.tCountX * tileSize
t.pHeight = t.tCountY * tileSize
// Pixel location in returned image for center of requested area
t.pCenterX = int((t.tCenterX - float64(t.tOriginX)) * float64(tileSize))
t.pCenterY = int((t.tCenterY - float64(t.tOriginY)) * float64(tileSize))
t.pMinX = t.pCenterX - width/2
t.pMaxX = t.pMinX + width
return t
}
// ll2t returns fractional tile index for a lat/lng points
func (t *transformer) ll2t(ll s2.LatLng) (float64, float64) {
tiles := math.Exp2(float64(t.zoom))
x := tiles * (ll.Lng.Degrees() + 180.0) / 360.0
y := tiles * (1 - math.Log(math.Tan(ll.Lat.Radians())+(1.0/math.Cos(ll.Lat.Radians())))/math.Pi) / 2.0
x := t.numTiles * (ll.Lng.Degrees() + 180.0) / 360.0
y := t.numTiles * (1 - math.Log(math.Tan(ll.Lat.Radians())+(1.0/math.Cos(ll.Lat.Radians())))/math.Pi) / 2.0
return x, y
}
func (t *transformer) ll2p(ll s2.LatLng) (float64, float64) {
x, y := t.ll2t(ll)
if x < float64(t.tOriginX) {
x = x + math.Exp2(float64(t.zoom))
}
x = float64(t.pCenterX) + (x-t.tCenterX)*float64(t.tileSize)
y = float64(t.pCenterY) + (y-t.tCenterY)*float64(t.tileSize)
offset := t.numTiles * float64(t.tileSize)
if x < float64(t.pMinX) {
for x < float64(t.pMinX) {
x = x + offset
}
} else if x >= float64(t.pMaxX) {
for x >= float64(t.pMaxX) {
x = x - offset
}
}
return x, y
}
// Rect returns an s2.Rect bounding box around the set of tiles described by transformer
func (t *transformer) Rect() (bbox s2.Rect) {
// transform from https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Go
invNumTiles := 1.0 / t.numTiles
// Get latitude bounds
n := math.Pi - 2.0*math.Pi*float64(t.tOriginY)*invNumTiles
bbox.Lat.Hi = math.Atan(0.5 * (math.Exp(n) - math.Exp(-n)))
n = math.Pi - 2.0*math.Pi*float64(t.tOriginY+t.tCountY)*invNumTiles
bbox.Lat.Lo = math.Atan(0.5 * (math.Exp(n) - math.Exp(-n)))
// Get longtitude bounds, much easier
bbox.Lng.Lo = float64(t.tOriginX)*invNumTiles*2.0*math.Pi - math.Pi
bbox.Lng.Hi = float64(t.tOriginX+t.tCountX)*invNumTiles*2.0*math.Pi - math.Pi
return bbox
}
// Render actually renders the map image including all map objects (markers, paths, areas)
func (m *Context) Render() (image.Image, error) {
zoom, center, err := m.determineZoomCenter()
@ -262,28 +344,19 @@ func (m *Context) Render() (image.Image, error) {
trans := newTransformer(m.width, m.height, zoom, center, tileSize)
img := image.NewRGBA(image.Rect(0, 0, trans.pWidth, trans.pHeight))
gc := gg.NewContextForRGBA(img)
if m.background != nil {
draw.Draw(img, img.Bounds(), &image.Uniform{m.background}, image.ZP, draw.Src)
}
// fetch and draw tiles to img
t := NewTileFetcher(m.tileProvider)
tiles := (1 << uint(zoom))
for xx := 0; xx < trans.tCountX; xx++ {
x := trans.tOriginX + xx
if x < 0 {
x = x + tiles
} else if x >= tiles {
x = x - tiles
}
for yy := 0; yy < trans.tCountY; yy++ {
y := trans.tOriginY + yy
if y < 0 || y >= tiles {
log.Printf("Skipping out of bounds tile %d/%d", x, y)
} else {
if tileImg, err := t.Fetch(zoom, x, y); err == nil {
gc.DrawImage(tileImg, xx*tileSize, yy*tileSize)
} else {
log.Printf("Error downloading tile file: %s", err)
}
}
layers := []*TileProvider{m.tileProvider}
if m.overlays != nil {
layers = append(layers, m.overlays...)
}
for _, layer := range layers {
if err := m.renderLayer(gc, zoom, trans, tileSize, layer); err != nil {
return nil, err
}
}
@ -297,6 +370,9 @@ func (m *Context) Render() (image.Image, error) {
for _, marker := range m.markers {
marker.draw(gc, trans)
}
for _, circle := range m.circles {
circle.draw(gc, trans)
}
// crop image
croppedImg := image.NewRGBA(image.Rect(0, 0, int(m.width), int(m.height)))
@ -319,3 +395,94 @@ func (m *Context) Render() (image.Image, error) {
return croppedImg, nil
}
// RenderWithBounds actually renders the map image including all map objects (markers, paths, areas).
// The returned image covers requested area as well as any tiles necessary to cover that area, which may
// be larger than the request.
//
// Specific bounding box of returned image is provided to support image registration with other data
func (m *Context) RenderWithBounds() (image.Image, s2.Rect, error) {
zoom, center, err := m.determineZoomCenter()
if err != nil {
return nil, s2.Rect{}, err
}
tileSize := m.tileProvider.TileSize
trans := newTransformer(m.width, m.height, zoom, center, tileSize)
img := image.NewRGBA(image.Rect(0, 0, trans.pWidth, trans.pHeight))
gc := gg.NewContextForRGBA(img)
if m.background != nil {
draw.Draw(img, img.Bounds(), &image.Uniform{m.background}, image.ZP, draw.Src)
}
// fetch and draw tiles to img
layers := []*TileProvider{m.tileProvider}
if m.overlays != nil {
layers = append(layers, m.overlays...)
}
for _, layer := range layers {
if err := m.renderLayer(gc, zoom, trans, tileSize, layer); err != nil {
return nil, s2.Rect{}, err
}
}
// draw map objects
for _, area := range m.areas {
area.draw(gc, trans)
}
for _, path := range m.paths {
path.draw(gc, trans)
}
for _, circle := range m.circles {
circle.draw(gc, trans)
}
for _, marker := range m.markers {
marker.draw(gc, trans)
}
// draw attribution
if m.tileProvider.Attribution == "" {
return img, trans.Rect(), nil
}
_, textHeight := gc.MeasureString(m.tileProvider.Attribution)
boxHeight := textHeight + 4.0
gc.SetRGBA(0.0, 0.0, 0.0, 0.5)
gc.DrawRectangle(0.0, float64(trans.pHeight)-boxHeight, float64(trans.pWidth), boxHeight)
gc.Fill()
gc.SetRGBA(1.0, 1.0, 1.0, 0.75)
gc.DrawString(m.tileProvider.Attribution, 4.0, float64(m.height)-4.0)
return img, trans.Rect(), nil
}
func (m *Context) renderLayer(gc *gg.Context, zoom int, trans *transformer, tileSize int, provider *TileProvider) error {
t := NewTileFetcher(provider)
if m.userAgent != "" {
t.SetUserAgent(m.userAgent)
}
tiles := (1 << uint(zoom))
for xx := 0; xx < trans.tCountX; xx++ {
x := trans.tOriginX + xx
if x < 0 {
x = x + tiles
} else if x >= tiles {
x = x - tiles
}
for yy := 0; yy < trans.tCountY; yy++ {
y := trans.tOriginY + yy
if y < 0 || y >= tiles {
log.Printf("Skipping out of bounds tile %d/%d", x, y)
} else {
if tileImg, err := t.Fetch(zoom, x, y); err == nil {
gc.DrawImage(tileImg, xx*tileSize, yy*tileSize)
} else {
log.Printf("Error downloading tile file: %s", err)
}
}
}
}
return nil
}

View file

@ -16,3 +16,10 @@ type MapObject interface {
extraMarginPixels() float64
draw(dc *gg.Context, trans *transformer)
}
// CanDisplay checks if pos is generally displayable (i.e. its latitude is in [-85,85])
func CanDisplay(pos s2.LatLng) bool {
const minLatitude float64 = -85.0
const maxLatitude float64 = 85.0
return (minLatitude <= pos.Lat.Degrees()) && (pos.Lat.Degrees() <= maxLatitude)
}

View file

@ -8,6 +8,7 @@ package sm
import (
"fmt"
"image/color"
"log"
"math"
"strconv"
"strings"
@ -39,6 +40,7 @@ func NewMarker(pos s2.LatLng, col color.Color, size float64) *Marker {
} else {
m.LabelColor = color.RGBA{0xff, 0xff, 0xff, 0xff}
}
return m
}
@ -121,6 +123,11 @@ func (m *Marker) bounds() s2.Rect {
}
func (m *Marker) draw(gc *gg.Context, trans *transformer) {
if !CanDisplay(m.Position) {
log.Printf("Marker coordinates not displayable: %f/%f", m.Position.Lat.Degrees(), m.Position.Lng.Degrees())
return
}
gc.ClearPath()
gc.SetLineJoin(gg.LineJoinRound)
gc.SetLineWidth(1.0)

View file

@ -24,6 +24,16 @@ type Path struct {
Weight float64
}
// NewPath creates a new Path
func NewPath(positions []s2.LatLng, col color.Color, weight float64) *Path {
p := new(Path)
p.Positions = positions
p.Color = col
p.Weight = weight
return p
}
// ParsePathString parses a string and returns a path
func ParsePathString(s string) ([]*Path, error) {
paths := make([]*Path, 0, 0)

View file

@ -21,13 +21,12 @@ import (
"github.com/Wessie/appdirs"
)
var TileFetcherUserAgent = "Mozilla/5.0+(compatible; go-staticmaps/0.1; https://github.com/flopp/go-staticmaps)"
// TileFetcher downloads map tile images from a TileProvider
type TileFetcher struct {
tileProvider *TileProvider
cacheDir string
useCaching bool
userAgent string
}
// NewTileFetcher creates a new Tilefetcher struct
@ -37,9 +36,15 @@ func NewTileFetcher(tileProvider *TileProvider) *TileFetcher {
app := appdirs.New("go-staticmaps", "flopp.net", "0.1")
t.cacheDir = fmt.Sprintf("%s/%s", app.UserCache(), tileProvider.Name)
t.useCaching = true
t.userAgent = "Mozilla/5.0+(compatible; go-staticmaps/0.1; https://github.com/flopp/go-staticmaps)"
return t
}
// SetUserAgent sets the HTTP user agent string used when downloading map tiles
func (t *TileFetcher) SetUserAgent(a string) {
t.userAgent = a
}
func (t *TileFetcher) url(zoom, x, y int) string {
shard := ""
ss := len(t.tileProvider.Shards)
@ -91,7 +96,7 @@ func (t *TileFetcher) Fetch(zoom, x, y int) (image.Image, error) {
func (t *TileFetcher) download(url string) ([]byte, error) {
req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("User-Agent", TileFetcherUserAgent)
req.Header.Set("User-Agent", t.userAgent)
resp, err := http.DefaultClient.Do(req)
if err != nil {

View file

@ -33,5 +33,5 @@ func (a *AccessLogResponseWriter) WriteHeader(code int) {
}
func (a *AccessLogResponseWriter) HTTPResponseType() string {
return fmt.Sprintf("%sxx", strconv.FormatInt(int64(a.StatusCode), 10)[0])
return fmt.Sprintf("%cxx", strconv.FormatInt(int64(a.StatusCode), 10)[0])
}

55
vendor/github.com/Luzifer/go_helpers/http/digest.go generated vendored Normal file
View file

@ -0,0 +1,55 @@
package http
import (
"crypto/md5"
"crypto/rand"
"encoding/hex"
"fmt"
"io"
"net/http"
"strings"
"github.com/Luzifer/go_helpers/str"
)
func GetDigestAuth(resp *http.Response, method, requestPath, user, password string) string {
params := map[string]string{}
for _, part := range strings.Split(resp.Header.Get("Www-Authenticate"), " ") {
if !strings.Contains(part, `="`) {
continue
}
spl := strings.Split(strings.Trim(part, " ,"), "=")
if !str.StringInSlice(spl[0], []string{"nonce", "realm", "qop"}) {
continue
}
params[spl[0]] = strings.Trim(spl[1], `"`)
}
b := make([]byte, 8)
io.ReadFull(rand.Reader, b)
params["cnonce"] = fmt.Sprintf("%x", b)
params["nc"] = "1"
params["uri"] = requestPath
params["username"] = user
params["response"] = getMD5([]string{
getMD5([]string{params["username"], params["realm"], password}),
params["nonce"],
params["nc"],
params["cnonce"],
params["qop"],
getMD5([]string{method, requestPath}),
})
authParts := []string{}
for k, v := range params {
authParts = append(authParts, fmt.Sprintf("%s=%q", k, v))
}
return "Digest " + strings.Join(authParts, ", ")
}
func getMD5(in []string) string {
h := md5.New()
h.Write([]byte(strings.Join(in, ":")))
return hex.EncodeToString(h.Sum(nil))
}

21
vendor/github.com/Luzifer/go_helpers/str/slice.go generated vendored Normal file
View file

@ -0,0 +1,21 @@
package str
// AppendIfMissing adds a string to a slice when it's not present yet
func AppendIfMissing(slice []string, s string) []string {
for _, e := range slice {
if e == s {
return slice
}
}
return append(slice, s)
}
// StringInSlice checks for the existence of a string in the slice
func StringInSlice(a string, list []string) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}

View file

@ -1,10 +1,15 @@
language: go
go:
- 1.3
- 1.4
- 1.5
- 1.6
- 1.6.x
- 1.7.x
- 1.8.x
- tip
env:
- GOMAXPROCS=4 GORACE=halt_on_error=1
install:
- go get -t ./...
script: GOMAXPROCS=4 GORACE="halt_on_error=1" go test -race -v ./...
- go get github.com/stretchr/testify/assert
- go get gopkg.in/gemnasium/logrus-airbrake-hook.v2
- go get golang.org/x/sys/unix
- go get golang.org/x/sys/windows
script:
- go test -race -v ./...

View file

@ -1,3 +1,60 @@
# 1.0.5
* Fix hooks race (#707)
* Fix panic deadlock (#695)
# 1.0.4
* Fix race when adding hooks (#612)
* Fix terminal check in AppEngine (#635)
# 1.0.3
* Replace example files with testable examples
# 1.0.2
* bug: quote non-string values in text formatter (#583)
* Make (*Logger) SetLevel a public method
# 1.0.1
* bug: fix escaping in text formatter (#575)
# 1.0.0
* Officially changed name to lower-case
* bug: colors on Windows 10 (#541)
* bug: fix race in accessing level (#512)
# 0.11.5
* feature: add writer and writerlevel to entry (#372)
# 0.11.4
* bug: fix undefined variable on solaris (#493)
# 0.11.3
* formatter: configure quoting of empty values (#484)
* formatter: configure quoting character (default is `"`) (#484)
* bug: fix not importing io correctly in non-linux environments (#481)
# 0.11.2
* bug: fix windows terminal detection (#476)
# 0.11.1
* bug: fix tty detection with custom out (#471)
# 0.11.0
* performance: Use bufferpool to allocate (#370)
* terminal: terminal detection for app-engine (#343)
* feature: exit handler (#375)
# 0.10.0
* feature: Add a test hook (#180)

View file

@ -1,11 +1,24 @@
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)&nbsp;[![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus)
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus)&nbsp;[![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus)
Logrus is a structured logger for Go (golang), completely API compatible with
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
yet stable (pre 1.0). Logrus itself is completely stable and has been used in
many large deployments. The core API is unlikely to change much but please
version control your Logrus to make sure you aren't fetching latest `master` on
every build.**
the standard library logger.
**Seeing weird case-sensitive problems?** It's in the past been possible to
import Logrus as both upper- and lower-case. Due to the Go package environment,
this caused issues in the community and we needed a standard. Some environments
experienced problems with the upper-case variant, so the lower-case was decided.
Everything using `logrus` will need to use the lower-case:
`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
To fix Glide, see [these
comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
For an in-depth explanation of the casing issue, see [this
comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276).
**Are you interested in assisting in maintaining Logrus?** Currently I have a
lot of obligations, and I am unable to provide Logrus with the maintainership it
needs. If you'd like to help, please reach out to me at `simon at author's
username dot com`.
Nicely color-coded in development (when a TTY is attached, otherwise just
plain text):
@ -46,6 +59,12 @@ time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x20822
exit status 1
```
#### Case-sensitivity
The organization's name was changed to lower-case--and this will not be changed
back. If you are getting import conflicts due to case sensitivity, please use
the lower-case import: `github.com/sirupsen/logrus`.
#### Example
The simplest way to use Logrus is simply the package-level exported logger:
@ -54,7 +73,7 @@ The simplest way to use Logrus is simply the package-level exported logger:
package main
import (
log "github.com/Sirupsen/logrus"
log "github.com/sirupsen/logrus"
)
func main() {
@ -65,7 +84,7 @@ func main() {
```
Note that it's completely api-compatible with the stdlib logger, so you can
replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
and you'll now have the flexibility of Logrus. You can customize it all you
want:
@ -74,15 +93,16 @@ package main
import (
"os"
log "github.com/Sirupsen/logrus"
log "github.com/sirupsen/logrus"
)
func init() {
// Log as JSON instead of the default ASCII formatter.
log.SetFormatter(&log.JSONFormatter{})
// Output to stderr instead of stdout, could also be a file.
log.SetOutput(os.Stderr)
// Output to stdout instead of the default stderr
// Can be any io.Writer, see below for File example
log.SetOutput(os.Stdout)
// Only log the warning severity or above.
log.SetLevel(log.WarnLevel)
@ -123,7 +143,8 @@ application, you can also create an instance of the `logrus` Logger:
package main
import (
"github.com/Sirupsen/logrus"
"os"
"github.com/sirupsen/logrus"
)
// Create a new instance of the logger. You can have any number of instances.
@ -132,7 +153,15 @@ var log = logrus.New()
func main() {
// The API for setting attributes is a little different than the package level
// exported logger. See Godoc.
log.Out = os.Stderr
log.Out = os.Stdout
// You could set this to any `io.Writer` such as a file
// file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
// if err == nil {
// log.Out = file
// } else {
// log.Info("Failed to log to file, using default stderr")
// }
log.WithFields(logrus.Fields{
"animal": "walrus",
@ -143,7 +172,7 @@ func main() {
#### Fields
Logrus encourages careful, structured logging though logging fields instead of
Logrus encourages careful, structured logging through logging fields instead of
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
to send event %s to topic %s with key %d")`, you should log the much more
discoverable:
@ -165,6 +194,20 @@ In general, with Logrus using any of the `printf`-family functions should be
seen as a hint you should add a field, however, you can still use the
`printf`-family functions with Logrus.
#### Default Fields
Often it's helpful to have fields _always_ attached to log statements in an
application or parts of one. For example, you may want to always log the
`request_id` and `user_ip` in the context of a request. Instead of writing
`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
every line, you can create a `logrus.Entry` to pass around instead:
```go
requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
requestLogger.Info("something happened on that request") # will log request_id and user_ip
requestLogger.Warn("something not great happened")
```
#### Hooks
You can add hooks for logging levels. For example to send errors to an exception
@ -176,9 +219,9 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
```go
import (
log "github.com/Sirupsen/logrus"
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
log "github.com/sirupsen/logrus"
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake"
logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
"log/syslog"
)
@ -200,37 +243,58 @@ Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/v
| Hook | Description |
| ----- | ----------- |
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) |
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
| [Application Insights](https://github.com/jjcollinge/logrus-appinsights) | Hook for logging to [Application Insights](https://azure.microsoft.com/en-us/services/application-insights/)
| [AzureTableHook](https://github.com/kpfaulkner/azuretablehook/) | Hook for logging to Azure Table Storage|
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) |
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/)
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) |
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) |
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
| [KafkaLogrus](https://github.com/tracer0tong/kafkalogrus) | Hook for logging to Kafka |
| [Kafka REST Proxy](https://github.com/Nordstrom/logrus-kafka-rest-proxy) | Hook for logging to [Kafka REST Proxy](https://docs.confluent.io/current/kafka-rest/docs) |
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
| [Logbeat](https://github.com/macandmia/logbeat) | Hook for logging to [Opbeat](https://opbeat.com/) |
| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) |
| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
| [Mattermost](https://github.com/shuLhan/mattermost-integration/tree/master/hooks/logrus) | Hook for logging to [Mattermost](https://mattermost.com/) |
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) |
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
| [Promrus](https://github.com/weaveworks/promrus) | Expose number of log messages as [Prometheus](https://prometheus.io/) metrics |
| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) |
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
| [Syslog](https://github.com/sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. |
| [Telegram](https://github.com/rossmcdonald/telegram_hook) | Hook for logging errors to [Telegram](https://telegram.org/) |
| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) |
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) |
#### Level logging
@ -279,7 +343,7 @@ could do:
```go
import (
log "github.com/Sirupsen/logrus"
log "github.com/sirupsen/logrus"
)
init() {
@ -306,11 +370,15 @@ The built-in logging formatters are:
without colors.
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
field to `true`. To force no colored output even if there is a TTY set the
`DisableColors` field to `true`
`DisableColors` field to `true`. For Windows, see
[github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
* `logrus.JSONFormatter`. Logs fields as JSON.
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
Third party logging formatters:
* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine.
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
@ -356,6 +424,18 @@ srv := http.Server{
Each line written to that writer will be printed the usual way, using formatters
and hooks. The level for those entries is `info`.
This means that we can override the standard library logger easily:
```go
logger := logrus.New()
logger.Formatter = &logrus.JSONFormatter{}
// Use logrus for standard log output
// Note that `log` here references stdlib's log
// Not logrus imported under the name `log`.
log.SetOutput(logger.Writer())
```
#### Rotation
Log rotation is not provided with Logrus. Log rotation should be done by an
@ -367,6 +447,7 @@ entries. It should not be a feature of the application-level logger.
| Tool | Description |
| ---- | ----------- |
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
#### Testing
@ -376,15 +457,24 @@ Logrus has a built in facility for asserting the presence of log messages. This
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
```go
logger, hook := NewNullLogger()
logger.Error("Hello error")
import(
"github.com/sirupsen/logrus"
"github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/assert"
"testing"
)
assert.Equal(1, len(hook.Entries))
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
assert.Equal("Hello error", hook.LastEntry().Message)
func TestSomething(t*testing.T){
logger, hook := test.NewNullLogger()
logger.Error("Helloerror")
hook.Reset()
assert.Nil(hook.LastEntry())
assert.Equal(t, 1, len(hook.Entries))
assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
assert.Equal(t, "Helloerror", hook.LastEntry().Message)
hook.Reset()
assert.Nil(t, hook.LastEntry())
}
```
#### Fatal handlers
@ -403,7 +493,7 @@ logrus.RegisterExitHandler(handler)
...
```
#### Thread safty
#### Thread safety
By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.

View file

@ -1,7 +1,7 @@
package logrus
// The following code was sourced and modified from the
// https://bitbucket.org/tebeka/atexit package governed by the following license:
// https://github.com/tebeka/atexit package governed by the following license:
//
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
//

14
vendor/github.com/Sirupsen/logrus/appveyor.yml generated vendored Normal file
View file

@ -0,0 +1,14 @@
version: "{build}"
platform: x64
clone_folder: c:\gopath\src\github.com\sirupsen\logrus
environment:
GOPATH: c:\gopath
branches:
only:
- master
install:
- set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
- go version
build_script:
- go get -t
- go test

View file

@ -7,7 +7,7 @@ The simplest way to use Logrus is simply the package-level exported logger:
package main
import (
log "github.com/Sirupsen/logrus"
log "github.com/sirupsen/logrus"
)
func main() {
@ -21,6 +21,6 @@ The simplest way to use Logrus is simply the package-level exported logger:
Output:
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
For a full guide visit https://github.com/Sirupsen/logrus
For a full guide visit https://github.com/sirupsen/logrus
*/
package logrus

View file

@ -35,6 +35,7 @@ type Entry struct {
Time time.Time
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
// This field will be set on entry firing and the value will be equal to the one in Logger struct field.
Level Level
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
@ -93,29 +94,16 @@ func (entry Entry) log(level Level, msg string) {
entry.Level = level
entry.Message = msg
if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
entry.Logger.mu.Unlock()
}
entry.fireHooks()
buffer = bufferPool.Get().(*bytes.Buffer)
buffer.Reset()
defer bufferPool.Put(buffer)
entry.Buffer = buffer
serialized, err := entry.Logger.Formatter.Format(&entry)
entry.write()
entry.Buffer = nil
if err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
entry.Logger.mu.Unlock()
} else {
entry.Logger.mu.Lock()
_, err = entry.Logger.Out.Write(serialized)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
entry.Logger.mu.Unlock()
}
// To avoid Entry#log() returning a value that only would make sense for
// panic() to use in Entry#Panic(), we avoid the allocation by checking
@ -125,8 +113,33 @@ func (entry Entry) log(level Level, msg string) {
}
}
// This function is not declared with a pointer value because otherwise
// race conditions will occur when using multiple goroutines
func (entry Entry) fireHooks() {
entry.Logger.mu.Lock()
defer entry.Logger.mu.Unlock()
err := entry.Logger.Hooks.Fire(entry.Level, &entry)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
}
}
func (entry *Entry) write() {
serialized, err := entry.Logger.Formatter.Format(entry)
entry.Logger.mu.Lock()
defer entry.Logger.mu.Unlock()
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
} else {
_, err = entry.Logger.Out.Write(serialized)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
}
}
func (entry *Entry) Debug(args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
if entry.Logger.level() >= DebugLevel {
entry.log(DebugLevel, fmt.Sprint(args...))
}
}
@ -136,13 +149,13 @@ func (entry *Entry) Print(args ...interface{}) {
}
func (entry *Entry) Info(args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
if entry.Logger.level() >= InfoLevel {
entry.log(InfoLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Warn(args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
if entry.Logger.level() >= WarnLevel {
entry.log(WarnLevel, fmt.Sprint(args...))
}
}
@ -152,20 +165,20 @@ func (entry *Entry) Warning(args ...interface{}) {
}
func (entry *Entry) Error(args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
if entry.Logger.level() >= ErrorLevel {
entry.log(ErrorLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Fatal(args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
if entry.Logger.level() >= FatalLevel {
entry.log(FatalLevel, fmt.Sprint(args...))
}
Exit(1)
}
func (entry *Entry) Panic(args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
if entry.Logger.level() >= PanicLevel {
entry.log(PanicLevel, fmt.Sprint(args...))
}
panic(fmt.Sprint(args...))
@ -174,13 +187,13 @@ func (entry *Entry) Panic(args ...interface{}) {
// Entry Printf family functions
func (entry *Entry) Debugf(format string, args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
if entry.Logger.level() >= DebugLevel {
entry.Debug(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Infof(format string, args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
if entry.Logger.level() >= InfoLevel {
entry.Info(fmt.Sprintf(format, args...))
}
}
@ -190,7 +203,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) {
}
func (entry *Entry) Warnf(format string, args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
if entry.Logger.level() >= WarnLevel {
entry.Warn(fmt.Sprintf(format, args...))
}
}
@ -200,20 +213,20 @@ func (entry *Entry) Warningf(format string, args ...interface{}) {
}
func (entry *Entry) Errorf(format string, args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
if entry.Logger.level() >= ErrorLevel {
entry.Error(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Fatalf(format string, args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
if entry.Logger.level() >= FatalLevel {
entry.Fatal(fmt.Sprintf(format, args...))
}
Exit(1)
}
func (entry *Entry) Panicf(format string, args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
if entry.Logger.level() >= PanicLevel {
entry.Panic(fmt.Sprintf(format, args...))
}
}
@ -221,13 +234,13 @@ func (entry *Entry) Panicf(format string, args ...interface{}) {
// Entry Println family functions
func (entry *Entry) Debugln(args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
if entry.Logger.level() >= DebugLevel {
entry.Debug(entry.sprintlnn(args...))
}
}
func (entry *Entry) Infoln(args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
if entry.Logger.level() >= InfoLevel {
entry.Info(entry.sprintlnn(args...))
}
}
@ -237,7 +250,7 @@ func (entry *Entry) Println(args ...interface{}) {
}
func (entry *Entry) Warnln(args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
if entry.Logger.level() >= WarnLevel {
entry.Warn(entry.sprintlnn(args...))
}
}
@ -247,20 +260,20 @@ func (entry *Entry) Warningln(args ...interface{}) {
}
func (entry *Entry) Errorln(args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
if entry.Logger.level() >= ErrorLevel {
entry.Error(entry.sprintlnn(args...))
}
}
func (entry *Entry) Fatalln(args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
if entry.Logger.level() >= FatalLevel {
entry.Fatal(entry.sprintlnn(args...))
}
Exit(1)
}
func (entry *Entry) Panicln(args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
if entry.Logger.level() >= PanicLevel {
entry.Panic(entry.sprintlnn(args...))
}
}

View file

@ -31,14 +31,14 @@ func SetFormatter(formatter Formatter) {
func SetLevel(level Level) {
std.mu.Lock()
defer std.mu.Unlock()
std.Level = level
std.SetLevel(level)
}
// GetLevel returns the standard logger level.
func GetLevel() Level {
std.mu.Lock()
defer std.mu.Unlock()
return std.Level
return std.level()
}
// AddHook adds a hook to the standard logger hooks.

View file

@ -2,7 +2,7 @@ package logrus
import "time"
const DefaultTimestampFormat = time.RFC3339
const defaultTimestampFormat = time.RFC3339
// The Formatter interface is used to implement a custom Formatter. It takes an
// `Entry`. It exposes all the fields, including the default ones:

View file

@ -5,18 +5,54 @@ import (
"fmt"
)
type fieldKey string
// FieldMap allows customization of the key names for default fields.
type FieldMap map[fieldKey]string
// Default key names for the default fields
const (
FieldKeyMsg = "msg"
FieldKeyLevel = "level"
FieldKeyTime = "time"
)
func (f FieldMap) resolve(key fieldKey) string {
if k, ok := f[key]; ok {
return k
}
return string(key)
}
// JSONFormatter formats logs into parsable json
type JSONFormatter struct {
// TimestampFormat sets the format used for marshaling timestamps.
TimestampFormat string
// DisableTimestamp allows disabling automatic timestamps in output
DisableTimestamp bool
// FieldMap allows users to customize the names of keys for default fields.
// As an example:
// formatter := &JSONFormatter{
// FieldMap: FieldMap{
// FieldKeyTime: "@timestamp",
// FieldKeyLevel: "@level",
// FieldKeyMsg: "@message",
// },
// }
FieldMap FieldMap
}
// Format renders a single log entry
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data := make(Fields, len(entry.Data)+3)
for k, v := range entry.Data {
switch v := v.(type) {
case error:
// Otherwise errors are ignored by `encoding/json`
// https://github.com/Sirupsen/logrus/issues/137
// https://github.com/sirupsen/logrus/issues/137
data[k] = v.Error()
default:
data[k] = v
@ -26,12 +62,14 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
timestampFormat = DefaultTimestampFormat
timestampFormat = defaultTimestampFormat
}
data["time"] = entry.Time.Format(timestampFormat)
data["msg"] = entry.Message
data["level"] = entry.Level.String()
if !f.DisableTimestamp {
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
}
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
serialized, err := json.Marshal(data)
if err != nil {

View file

@ -4,6 +4,7 @@ import (
"io"
"os"
"sync"
"sync/atomic"
)
type Logger struct {
@ -24,7 +25,7 @@ type Logger struct {
Formatter Formatter
// The logging level the logger should log at. This is typically (and defaults
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
// logged. `logrus.Debug` is useful in
// logged.
Level Level
// Used to sync writing to the log. Locking is enabled by Default
mu MutexWrap
@ -112,7 +113,7 @@ func (logger *Logger) WithError(err error) *Entry {
}
func (logger *Logger) Debugf(format string, args ...interface{}) {
if logger.Level >= DebugLevel {
if logger.level() >= DebugLevel {
entry := logger.newEntry()
entry.Debugf(format, args...)
logger.releaseEntry(entry)
@ -120,7 +121,7 @@ func (logger *Logger) Debugf(format string, args ...interface{}) {
}
func (logger *Logger) Infof(format string, args ...interface{}) {
if logger.Level >= InfoLevel {
if logger.level() >= InfoLevel {
entry := logger.newEntry()
entry.Infof(format, args...)
logger.releaseEntry(entry)
@ -134,7 +135,7 @@ func (logger *Logger) Printf(format string, args ...interface{}) {
}
func (logger *Logger) Warnf(format string, args ...interface{}) {
if logger.Level >= WarnLevel {
if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
@ -142,7 +143,7 @@ func (logger *Logger) Warnf(format string, args ...interface{}) {
}
func (logger *Logger) Warningf(format string, args ...interface{}) {
if logger.Level >= WarnLevel {
if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
@ -150,7 +151,7 @@ func (logger *Logger) Warningf(format string, args ...interface{}) {
}
func (logger *Logger) Errorf(format string, args ...interface{}) {
if logger.Level >= ErrorLevel {
if logger.level() >= ErrorLevel {
entry := logger.newEntry()
entry.Errorf(format, args...)
logger.releaseEntry(entry)
@ -158,7 +159,7 @@ func (logger *Logger) Errorf(format string, args ...interface{}) {
}
func (logger *Logger) Fatalf(format string, args ...interface{}) {
if logger.Level >= FatalLevel {
if logger.level() >= FatalLevel {
entry := logger.newEntry()
entry.Fatalf(format, args...)
logger.releaseEntry(entry)
@ -167,7 +168,7 @@ func (logger *Logger) Fatalf(format string, args ...interface{}) {
}
func (logger *Logger) Panicf(format string, args ...interface{}) {
if logger.Level >= PanicLevel {
if logger.level() >= PanicLevel {
entry := logger.newEntry()
entry.Panicf(format, args...)
logger.releaseEntry(entry)
@ -175,7 +176,7 @@ func (logger *Logger) Panicf(format string, args ...interface{}) {
}
func (logger *Logger) Debug(args ...interface{}) {
if logger.Level >= DebugLevel {
if logger.level() >= DebugLevel {
entry := logger.newEntry()
entry.Debug(args...)
logger.releaseEntry(entry)
@ -183,7 +184,7 @@ func (logger *Logger) Debug(args ...interface{}) {
}
func (logger *Logger) Info(args ...interface{}) {
if logger.Level >= InfoLevel {
if logger.level() >= InfoLevel {
entry := logger.newEntry()
entry.Info(args...)
logger.releaseEntry(entry)
@ -197,7 +198,7 @@ func (logger *Logger) Print(args ...interface{}) {
}
func (logger *Logger) Warn(args ...interface{}) {
if logger.Level >= WarnLevel {
if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
@ -205,7 +206,7 @@ func (logger *Logger) Warn(args ...interface{}) {
}
func (logger *Logger) Warning(args ...interface{}) {
if logger.Level >= WarnLevel {
if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
@ -213,7 +214,7 @@ func (logger *Logger) Warning(args ...interface{}) {
}
func (logger *Logger) Error(args ...interface{}) {
if logger.Level >= ErrorLevel {
if logger.level() >= ErrorLevel {
entry := logger.newEntry()
entry.Error(args...)
logger.releaseEntry(entry)
@ -221,7 +222,7 @@ func (logger *Logger) Error(args ...interface{}) {
}
func (logger *Logger) Fatal(args ...interface{}) {
if logger.Level >= FatalLevel {
if logger.level() >= FatalLevel {
entry := logger.newEntry()
entry.Fatal(args...)
logger.releaseEntry(entry)
@ -230,7 +231,7 @@ func (logger *Logger) Fatal(args ...interface{}) {
}
func (logger *Logger) Panic(args ...interface{}) {
if logger.Level >= PanicLevel {
if logger.level() >= PanicLevel {
entry := logger.newEntry()
entry.Panic(args...)
logger.releaseEntry(entry)
@ -238,7 +239,7 @@ func (logger *Logger) Panic(args ...interface{}) {
}
func (logger *Logger) Debugln(args ...interface{}) {
if logger.Level >= DebugLevel {
if logger.level() >= DebugLevel {
entry := logger.newEntry()
entry.Debugln(args...)
logger.releaseEntry(entry)
@ -246,7 +247,7 @@ func (logger *Logger) Debugln(args ...interface{}) {
}
func (logger *Logger) Infoln(args ...interface{}) {
if logger.Level >= InfoLevel {
if logger.level() >= InfoLevel {
entry := logger.newEntry()
entry.Infoln(args...)
logger.releaseEntry(entry)
@ -260,7 +261,7 @@ func (logger *Logger) Println(args ...interface{}) {
}
func (logger *Logger) Warnln(args ...interface{}) {
if logger.Level >= WarnLevel {
if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
@ -268,7 +269,7 @@ func (logger *Logger) Warnln(args ...interface{}) {
}
func (logger *Logger) Warningln(args ...interface{}) {
if logger.Level >= WarnLevel {
if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
@ -276,7 +277,7 @@ func (logger *Logger) Warningln(args ...interface{}) {
}
func (logger *Logger) Errorln(args ...interface{}) {
if logger.Level >= ErrorLevel {
if logger.level() >= ErrorLevel {
entry := logger.newEntry()
entry.Errorln(args...)
logger.releaseEntry(entry)
@ -284,7 +285,7 @@ func (logger *Logger) Errorln(args ...interface{}) {
}
func (logger *Logger) Fatalln(args ...interface{}) {
if logger.Level >= FatalLevel {
if logger.level() >= FatalLevel {
entry := logger.newEntry()
entry.Fatalln(args...)
logger.releaseEntry(entry)
@ -293,7 +294,7 @@ func (logger *Logger) Fatalln(args ...interface{}) {
}
func (logger *Logger) Panicln(args ...interface{}) {
if logger.Level >= PanicLevel {
if logger.level() >= PanicLevel {
entry := logger.newEntry()
entry.Panicln(args...)
logger.releaseEntry(entry)
@ -306,3 +307,17 @@ func (logger *Logger) Panicln(args ...interface{}) {
func (logger *Logger) SetNoLock() {
logger.mu.Disable()
}
func (logger *Logger) level() Level {
return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
}
func (logger *Logger) SetLevel(level Level) {
atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
}
func (logger *Logger) AddHook(hook Hook) {
logger.mu.Lock()
defer logger.mu.Unlock()
logger.Hooks.Add(hook)
}

View file

@ -10,7 +10,7 @@ import (
type Fields map[string]interface{}
// Level type
type Level uint8
type Level uint32
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
func (level Level) String() string {

View file

@ -1,8 +0,0 @@
// +build appengine
package logrus
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal() bool {
return true
}

View file

@ -1,10 +1,10 @@
// +build darwin freebsd openbsd netbsd dragonfly
// +build !appengine
// +build !appengine,!gopherjs
package logrus
import "syscall"
import "golang.org/x/sys/unix"
const ioctlReadTermios = syscall.TIOCGETA
const ioctlReadTermios = unix.TIOCGETA
type Termios syscall.Termios
type Termios unix.Termios

View file

@ -0,0 +1,11 @@
// +build appengine gopherjs
package logrus
import (
"io"
)
func checkIfTerminal(w io.Writer) bool {
return true
}

View file

@ -0,0 +1,19 @@
// +build !appengine,!gopherjs
package logrus
import (
"io"
"os"
"golang.org/x/crypto/ssh/terminal"
)
func checkIfTerminal(w io.Writer) bool {
switch v := w.(type) {
case *os.File:
return terminal.IsTerminal(int(v.Fd()))
default:
return false
}
}

View file

@ -3,12 +3,12 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build !appengine,!gopherjs
package logrus
import "syscall"
import "golang.org/x/sys/unix"
const ioctlReadTermios = syscall.TCGETS
const ioctlReadTermios = unix.TCGETS
type Termios syscall.Termios
type Termios unix.Termios

View file

@ -1,22 +0,0 @@
// Based on ssh/terminal:
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux darwin freebsd openbsd netbsd dragonfly
// +build !appengine
package logrus
import (
"syscall"
"unsafe"
)
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stderr
var termios Termios
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
}

View file

@ -1,15 +0,0 @@
// +build solaris,!appengine
package logrus
import (
"os"
"golang.org/x/sys/unix"
)
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal() bool {
_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
return err == nil
}

View file

@ -1,27 +0,0 @@
// Based on ssh/terminal:
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows,!appengine
package logrus
import (
"syscall"
"unsafe"
)
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
var (
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
)
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stderr
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
}

View file

@ -3,9 +3,9 @@ package logrus
import (
"bytes"
"fmt"
"runtime"
"sort"
"strings"
"sync"
"time"
)
@ -14,24 +14,19 @@ const (
red = 31
green = 32
yellow = 33
blue = 34
blue = 36
gray = 37
)
var (
baseTimestamp time.Time
isTerminal bool
)
func init() {
baseTimestamp = time.Now()
isTerminal = IsTerminal()
}
func miniTS() int {
return int(time.Since(baseTimestamp) / time.Second)
}
// TextFormatter formats logs into text
type TextFormatter struct {
// Set to true to bypass checking for a TTY before outputting colors.
ForceColors bool
@ -54,11 +49,26 @@ type TextFormatter struct {
// that log extremely frequently and don't use the JSON formatter this may not
// be desired.
DisableSorting bool
// QuoteEmptyFields will wrap empty fields in quotes if true
QuoteEmptyFields bool
// Whether the logger's out is to a terminal
isTerminal bool
sync.Once
}
func (f *TextFormatter) init(entry *Entry) {
if entry.Logger != nil {
f.isTerminal = checkIfTerminal(entry.Logger.Out)
}
}
// Format renders a single log entry
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
var b *bytes.Buffer
var keys []string = make([]string, 0, len(entry.Data))
keys := make([]string, 0, len(entry.Data))
for k := range entry.Data {
keys = append(keys, k)
}
@ -74,12 +84,13 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
prefixFieldClashes(entry.Data)
isColorTerminal := isTerminal && (runtime.GOOS != "windows")
isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
f.Do(func() { f.init(entry) })
isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
timestampFormat = DefaultTimestampFormat
timestampFormat = defaultTimestampFormat
}
if isColored {
f.printColored(b, entry, keys, timestampFormat)
@ -115,23 +126,29 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
levelText := strings.ToUpper(entry.Level.String())[0:4]
if !f.FullTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
if f.DisableTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
} else if !f.FullTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
} else {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
}
for _, k := range keys {
v := entry.Data[k]
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v)
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
f.appendValue(b, v)
}
}
func needsQuoting(text string) bool {
func (f *TextFormatter) needsQuoting(text string) bool {
if f.QuoteEmptyFields && len(text) == 0 {
return true
}
for _, ch := range text {
if !((ch >= 'a' && ch <= 'z') ||
(ch >= 'A' && ch <= 'Z') ||
(ch >= '0' && ch <= '9') ||
ch == '-' || ch == '.') {
ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') {
return true
}
}
@ -139,27 +156,23 @@ func needsQuoting(text string) bool {
}
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
if b.Len() > 0 {
b.WriteByte(' ')
}
b.WriteString(key)
b.WriteByte('=')
f.appendValue(b, value)
}
switch value := value.(type) {
case string:
if !needsQuoting(value) {
b.WriteString(value)
} else {
fmt.Fprintf(b, "%q", value)
}
case error:
errmsg := value.Error()
if !needsQuoting(errmsg) {
b.WriteString(errmsg)
} else {
fmt.Fprintf(b, "%q", value)
}
default:
fmt.Fprint(b, value)
func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
stringVal, ok := value.(string)
if !ok {
stringVal = fmt.Sprint(value)
}
b.WriteByte(' ')
if !f.needsQuoting(stringVal) {
b.WriteString(stringVal)
} else {
b.WriteString(fmt.Sprintf("%q", stringVal))
}
}

View file

@ -11,39 +11,48 @@ func (logger *Logger) Writer() *io.PipeWriter {
}
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
return NewEntry(logger).WriterLevel(level)
}
func (entry *Entry) Writer() *io.PipeWriter {
return entry.WriterLevel(InfoLevel)
}
func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
reader, writer := io.Pipe()
var printFunc func(args ...interface{})
switch level {
case DebugLevel:
printFunc = logger.Debug
printFunc = entry.Debug
case InfoLevel:
printFunc = logger.Info
printFunc = entry.Info
case WarnLevel:
printFunc = logger.Warn
printFunc = entry.Warn
case ErrorLevel:
printFunc = logger.Error
printFunc = entry.Error
case FatalLevel:
printFunc = logger.Fatal
printFunc = entry.Fatal
case PanicLevel:
printFunc = logger.Panic
printFunc = entry.Panic
default:
printFunc = logger.Print
printFunc = entry.Print
}
go logger.writerScanner(reader, printFunc)
go entry.writerScanner(reader, printFunc)
runtime.SetFinalizer(writer, writerFinalizer)
return writer
}
func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
printFunc(scanner.Text())
}
if err := scanner.Err(); err != nil {
logger.Errorf("Error while reading from Writer: %s", err)
entry.Errorf("Error while reading from Writer: %s", err)
}
reader.Close()
}

View file

@ -5,7 +5,20 @@
This is a generic middleware to rate-limit HTTP requests.
**NOTE:** This library is considered finished, any new activities are probably centered around `thirdparty` modules.
**NOTE 1:** This library is considered finished.
**NOTE 2:** In the coming weeks, I will be removing thirdparty modules and moving them to their own dedicated repos.
**NOTE 3:** Major version changes are backward-incompatible. `v2.0.0` streamlines the ugliness of the old API.
## Versions
**v1.0.0:** This version maintains the old API but all of the thirdparty modules are moved to their own repo.
**v2.x.x:** Brand new API for the sake of code cleanup, thread safety, & auto-expiring data structures.
**v3.x.x:** Apparently we have been using golang.org/x/time/rate incorrectly. See issue #48. It always limit X number per 1 second. The time duration is not changeable, so it does not make sense to pass TTL to tollbooth.
## Five Minutes Tutorial
@ -24,7 +37,7 @@ func HelloHandler(w http.ResponseWriter, req *http.Request) {
func main() {
// Create a request limiter per handler.
http.Handle("/", tollbooth.LimitFuncHandler(tollbooth.NewLimiter(1, time.Second), HelloHandler))
http.Handle("/", tollbooth.LimitFuncHandler(tollbooth.NewLimiter(1, nil), HelloHandler))
http.ListenAndServe(":12345", nil)
}
```
@ -33,39 +46,119 @@ func main() {
1. Rate-limit by request's remote IP, path, methods, custom headers, & basic auth usernames.
```go
limiter := tollbooth.NewLimiter(1, time.Second)
import (
"time"
"github.com/didip/tollbooth/limiter"
)
lmt := tollbooth.NewLimiter(1, nil)
// or create a limiter with expirable token buckets
// This setting means:
// create a 1 request/second limiter and
// every token bucket in it will expire 1 hour after it was initially set.
limiter = tollbooth.NewLimiterExpiringBuckets(1, time.Second, time.Hour, 0)
lmt = tollbooth.NewLimiter(1, &limiter.ExpirableOptions{DefaultExpirationTTL: time.Hour})
// Configure list of places to look for IP address.
// By default it's: "RemoteAddr", "X-Forwarded-For", "X-Real-IP"
// If your application is behind a proxy, set "X-Forwarded-For" first.
limiter.IPLookups = []string{"RemoteAddr", "X-Forwarded-For", "X-Real-IP"}
lmt.SetIPLookups([]string{"RemoteAddr", "X-Forwarded-For", "X-Real-IP"})
// Limit only GET and POST requests.
limiter.Methods = []string{"GET", "POST"}
// Limit request headers containing certain values.
// Typically, you prefetched these values from the database.
limiter.Headers = make(map[string][]string)
limiter.Headers["X-Access-Token"] = []string{"abc123", "xyz098"}
lmt.SetMethods([]string{"GET", "POST"})
// Limit based on basic auth usernames.
// Typically, you prefetched these values from the database.
limiter.BasicAuthUsers = []string{"bob", "joe", "didip"}
// You add them on-load, or later as you handle requests.
lmt.SetBasicAuthUsers([]string{"bob", "jane", "didip", "vip"})
// You can remove them later as well.
lmt.RemoveBasicAuthUsers([]string{"vip"})
// Limit request headers containing certain values.
// You add them on-load, or later as you handle requests.
lmt.SetHeader("X-Access-Token", []string{"abc123", "xyz098"})
// You can remove all entries at once.
lmt.RemoveHeader("X-Access-Token")
// Or remove specific ones.
lmt.RemoveHeaderEntries("X-Access-Token", []string{"limitless-token"})
// By the way, the setters are chainable. Example:
lmt.SetIPLookups([]string{"RemoteAddr", "X-Forwarded-For", "X-Real-IP"}).
SetMethods([]string{"GET", "POST"}).
SetBasicAuthUsers([]string{"sansa"}).
SetBasicAuthUsers([]string{"tyrion"})
```
2. Each request handler can be rate-limited individually.
2. Compose your own middleware by using `LimitByKeys()`.
3. Compose your own middleware by using `LimitByKeys()`.
3. Header entries and basic auth users can expire over time (to conserve memory).
4. Tollbooth does not require external storage since it uses an algorithm called [Token Bucket](http://en.wikipedia.org/wiki/Token_bucket) [(Go library: golang.org/x/time/rate)](//godoc.org/golang.org/x/time/rate).
```go
import "time"
lmt := tollbooth.NewLimiter(1, nil)
// Set a custom expiration TTL for token bucket.
lmt.SetTokenBucketExpirationTTL(time.Hour)
// Set a custom expiration TTL for basic auth users.
lmt.SetBasicAuthExpirationTTL(time.Hour)
// Set a custom expiration TTL for header entries.
lmt.SetHeaderEntryExpirationTTL(time.Hour)
```
4. Upon rejection, the following HTTP response headers are available to users:
* `X-Rate-Limit-Limit` The maximum request limit.
* `X-Rate-Limit-Duration` The rate-limiter duration.
* `X-Rate-Limit-Request-Forwarded-For` The rejected request `X-Forwarded-For`.
* `X-Rate-Limit-Request-Remote-Addr` The rejected request `RemoteAddr`.
# Other Web Frameworks
5. Customize your own message or function when limit is reached.
Support for other web frameworks are defined under `/thirdparty` directory.
```go
lmt := tollbooth.NewLimiter(1, nil)
// Set a custom message.
lmt.SetMessage("You have reached maximum request limit.")
// Set a custom content-type.
lmt.SetMessageContentType("text/plain; charset=utf-8")
// Set a custom function for rejection.
lmt.SetOnLimitReached(func(w http.ResponseWriter, r *http.Request) { fmt.Println("A request was rejected") })
```
6. Tollbooth does not require external storage since it uses an algorithm called [Token Bucket](http://en.wikipedia.org/wiki/Token_bucket) [(Go library: golang.org/x/time/rate)](//godoc.org/golang.org/x/time/rate).
## Other Web Frameworks
Sometimes, other frameworks require a little bit of shim to use Tollbooth. These shims below are contributed by the community, so I make no promises on how well they work. The one I am familiar with are: Chi, Gin, and Negroni.
* [Chi](https://github.com/didip/tollbooth_chi)
* [Echo](https://github.com/didip/tollbooth_echo)
* [FastHTTP](https://github.com/didip/tollbooth_fasthttp)
* [Gin](https://github.com/didip/tollbooth_gin)
* [GoRestful](https://github.com/didip/tollbooth_gorestful)
* [HTTPRouter](https://github.com/didip/tollbooth_httprouter)
* [Iris](https://github.com/didip/tollbooth_iris)
* [Negroni](https://github.com/didip/tollbooth_negroni)
## My other Go libraries
* [Stopwatch](https://github.com/didip/stopwatch): A small library to measure latency of things. Useful if you want to report latency data to Graphite.
* [Gomet](https://github.com/didip/gomet): Simple HTTP client & server long poll library for Go. Useful for receiving live updates without needing Websocket.

View file

@ -1,157 +0,0 @@
// Package config provides data structure to configure rate-limiter.
package config
import (
"sync"
"time"
gocache "github.com/patrickmn/go-cache"
"golang.org/x/time/rate"
)
// NewLimiter is a constructor for Limiter.
func NewLimiter(max int64, ttl time.Duration) *Limiter {
limiter := &Limiter{Max: max, TTL: ttl}
limiter.MessageContentType = "text/plain; charset=utf-8"
limiter.Message = "You have reached maximum request limit."
limiter.StatusCode = 429
limiter.IPLookups = []string{"RemoteAddr", "X-Forwarded-For", "X-Real-IP"}
limiter.tokenBucketsNoTTL = make(map[string]*rate.Limiter)
return limiter
}
// NewLimiterExpiringBuckets constructs Limiter with expirable TokenBuckets.
func NewLimiterExpiringBuckets(max int64, ttl, bucketDefaultExpirationTTL, bucketExpireJobInterval time.Duration) *Limiter {
limiter := NewLimiter(max, ttl)
limiter.TokenBuckets.DefaultExpirationTTL = bucketDefaultExpirationTTL
limiter.TokenBuckets.ExpireJobInterval = bucketExpireJobInterval
// Default for ExpireJobInterval is every minute.
if limiter.TokenBuckets.ExpireJobInterval <= 0 {
limiter.TokenBuckets.ExpireJobInterval = time.Minute
}
limiter.tokenBucketsWithTTL = gocache.New(
limiter.TokenBuckets.DefaultExpirationTTL,
limiter.TokenBuckets.ExpireJobInterval,
)
return limiter
}
// Limiter is a config struct to limit a particular request handler.
type Limiter struct {
// HTTP message when limit is reached.
Message string
// Content-Type for Message
MessageContentType string
// HTTP status code when limit is reached.
StatusCode int
// Maximum number of requests to limit per duration.
Max int64
// Duration of rate-limiter.
TTL time.Duration
// List of places to look up IP address.
// Default is "RemoteAddr", "X-Forwarded-For", "X-Real-IP".
// You can rearrange the order as you like.
IPLookups []string
// List of HTTP Methods to limit (GET, POST, PUT, etc.).
// Empty means limit all methods.
Methods []string
// List of HTTP headers to limit.
// Empty means skip headers checking.
Headers map[string][]string
// List of basic auth usernames to limit.
BasicAuthUsers []string
// Able to configure token bucket expirations.
TokenBuckets struct {
// Default TTL to expire bucket per key basis.
DefaultExpirationTTL time.Duration
// How frequently tollbooth will trigger the expire job
ExpireJobInterval time.Duration
}
// Map of limiters without TTL
tokenBucketsNoTTL map[string]*rate.Limiter
// Map of limiters with TTL
tokenBucketsWithTTL *gocache.Cache
sync.RWMutex
}
func (l *Limiter) isUsingTokenBucketsWithTTL() bool {
return l.TokenBuckets.DefaultExpirationTTL > 0
}
func (l *Limiter) limitReachedNoTokenBucketTTL(key string) bool {
l.Lock()
defer l.Unlock()
if _, found := l.tokenBucketsNoTTL[key]; !found {
l.tokenBucketsNoTTL[key] = rate.NewLimiter(rate.Every(l.TTL), int(l.Max))
}
return !l.tokenBucketsNoTTL[key].AllowN(time.Now(), 1)
}
func (l *Limiter) limitReachedWithDefaultTokenBucketTTL(key string) bool {
return l.limitReachedWithCustomTokenBucketTTL(key, gocache.DefaultExpiration)
}
func (l *Limiter) limitReachedWithCustomTokenBucketTTL(key string, tokenBucketTTL time.Duration) bool {
l.Lock()
defer l.Unlock()
if _, found := l.tokenBucketsWithTTL.Get(key); !found {
l.tokenBucketsWithTTL.Set(
key,
rate.NewLimiter(rate.Every(l.TTL), int(l.Max)),
tokenBucketTTL,
)
}
expiringMap, found := l.tokenBucketsWithTTL.Get(key)
if !found {
return false
}
return !expiringMap.(*rate.Limiter).AllowN(time.Now(), 1)
}
// LimitReached returns a bool indicating if the Bucket identified by key ran out of tokens.
func (l *Limiter) LimitReached(key string) bool {
if l.isUsingTokenBucketsWithTTL() {
return l.limitReachedWithDefaultTokenBucketTTL(key)
} else {
return l.limitReachedNoTokenBucketTTL(key)
}
return false
}
// LimitReachedWithCustomTokenBucketTTL returns a bool indicating if the Bucket identified by key ran out of tokens.
// This public API allows user to define custom expiration TTL on the key.
func (l *Limiter) LimitReachedWithCustomTokenBucketTTL(key string, tokenBucketTTL time.Duration) bool {
if l.isUsingTokenBucketsWithTTL() {
return l.limitReachedWithCustomTokenBucketTTL(key, tokenBucketTTL)
} else {
return l.limitReachedNoTokenBucketTTL(key)
}
return false
}

View file

@ -2,6 +2,7 @@
package libstring
import (
"net"
"net/http"
"strings"
)
@ -16,22 +17,20 @@ func StringInSlice(sliceString []string, needle string) bool {
return false
}
func ipAddrFromRemoteAddr(s string) string {
idx := strings.LastIndex(s, ":")
if idx == -1 {
return s
}
return s[:idx]
}
// RemoteIP finds IP Address given http.Request struct.
func RemoteIP(ipLookups []string, r *http.Request) string {
func RemoteIP(ipLookups []string, forwardedForIndexFromBehind int, r *http.Request) string {
realIP := r.Header.Get("X-Real-IP")
forwardedFor := r.Header.Get("X-Forwarded-For")
for _, lookup := range ipLookups {
if lookup == "RemoteAddr" {
return ipAddrFromRemoteAddr(r.RemoteAddr)
// 1. Cover the basic use cases for both ipv4 and ipv6
ip, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
// 2. Upon error, just return the remote addr.
return r.RemoteAddr
}
return ip
}
if lookup == "X-Forwarded-For" && forwardedFor != "" {
// X-Forwarded-For is potentially a list of addresses separated with ","
@ -39,7 +38,13 @@ func RemoteIP(ipLookups []string, r *http.Request) string {
for i, p := range parts {
parts[i] = strings.TrimSpace(p)
}
return parts[0]
partIndex := len(parts) - 1 - forwardedForIndexFromBehind
if partIndex < 0 {
partIndex = 0
}
return parts[partIndex]
}
if lookup == "X-Real-IP" && realIP != "" {
return realIP

474
vendor/github.com/didip/tollbooth/limiter/limiter.go generated vendored Normal file
View file

@ -0,0 +1,474 @@
// Package limiter provides data structure to configure rate-limiter.
package limiter
import (
"net/http"
"sync"
"time"
gocache "github.com/patrickmn/go-cache"
"golang.org/x/time/rate"
)
// New is a constructor for Limiter.
func New(generalExpirableOptions *ExpirableOptions) *Limiter {
lmt := &Limiter{}
lmt.SetMessageContentType("text/plain; charset=utf-8").
SetMessage("You have reached maximum request limit.").
SetStatusCode(429).
SetOnLimitReached(nil).
SetIPLookups([]string{"RemoteAddr", "X-Forwarded-For", "X-Real-IP"}).
SetForwardedForIndexFromBehind(0).
SetHeaders(make(map[string][]string))
if generalExpirableOptions != nil {
lmt.generalExpirableOptions = generalExpirableOptions
} else {
lmt.generalExpirableOptions = &ExpirableOptions{}
}
// Default for ExpireJobInterval is every minute.
if lmt.generalExpirableOptions.ExpireJobInterval <= 0 {
lmt.generalExpirableOptions.ExpireJobInterval = time.Minute
}
// Default for DefaultExpirationTTL is 10 years.
if lmt.generalExpirableOptions.DefaultExpirationTTL <= 0 {
lmt.generalExpirableOptions.DefaultExpirationTTL = 87600 * time.Hour
}
lmt.tokenBuckets = gocache.New(
lmt.generalExpirableOptions.DefaultExpirationTTL,
lmt.generalExpirableOptions.ExpireJobInterval,
)
lmt.basicAuthUsers = gocache.New(
lmt.generalExpirableOptions.DefaultExpirationTTL,
lmt.generalExpirableOptions.ExpireJobInterval,
)
return lmt
}
// Limiter is a config struct to limit a particular request handler.
type Limiter struct {
// Maximum number of requests to limit per second.
max float64
// Limiter burst size
burst int
// HTTP message when limit is reached.
message string
// Content-Type for Message
messageContentType string
// HTTP status code when limit is reached.
statusCode int
// A function to call when a request is rejected.
onLimitReached func(w http.ResponseWriter, r *http.Request)
// List of places to look up IP address.
// Default is "RemoteAddr", "X-Forwarded-For", "X-Real-IP".
// You can rearrange the order as you like.
ipLookups []string
forwardedForIndex int
// List of HTTP Methods to limit (GET, POST, PUT, etc.).
// Empty means limit all methods.
methods []string
// Able to configure token bucket expirations.
generalExpirableOptions *ExpirableOptions
// List of basic auth usernames to limit.
basicAuthUsers *gocache.Cache
// Map of HTTP headers to limit.
// Empty means skip headers checking.
headers map[string]*gocache.Cache
// Map of limiters with TTL
tokenBuckets *gocache.Cache
tokenBucketExpirationTTL time.Duration
basicAuthExpirationTTL time.Duration
headerEntryExpirationTTL time.Duration
sync.RWMutex
}
// SetTokenBucketExpirationTTL is thread-safe way of setting custom token bucket expiration TTL.
func (l *Limiter) SetTokenBucketExpirationTTL(ttl time.Duration) *Limiter {
l.Lock()
l.tokenBucketExpirationTTL = ttl
l.Unlock()
return l
}
// GettokenBucketExpirationTTL is thread-safe way of getting custom token bucket expiration TTL.
func (l *Limiter) GetTokenBucketExpirationTTL() time.Duration {
l.RLock()
defer l.RUnlock()
return l.tokenBucketExpirationTTL
}
// SetBasicAuthExpirationTTL is thread-safe way of setting custom basic auth expiration TTL.
func (l *Limiter) SetBasicAuthExpirationTTL(ttl time.Duration) *Limiter {
l.Lock()
l.basicAuthExpirationTTL = ttl
l.Unlock()
return l
}
// GetBasicAuthExpirationTTL is thread-safe way of getting custom basic auth expiration TTL.
func (l *Limiter) GetBasicAuthExpirationTTL() time.Duration {
l.RLock()
defer l.RUnlock()
return l.basicAuthExpirationTTL
}
// SetHeaderEntryExpirationTTL is thread-safe way of setting custom basic auth expiration TTL.
func (l *Limiter) SetHeaderEntryExpirationTTL(ttl time.Duration) *Limiter {
l.Lock()
l.headerEntryExpirationTTL = ttl
l.Unlock()
return l
}
// GetHeaderEntryExpirationTTL is thread-safe way of getting custom basic auth expiration TTL.
func (l *Limiter) GetHeaderEntryExpirationTTL() time.Duration {
l.RLock()
defer l.RUnlock()
return l.headerEntryExpirationTTL
}
// SetMax is thread-safe way of setting maximum number of requests to limit per duration.
func (l *Limiter) SetMax(max float64) *Limiter {
l.Lock()
l.max = max
l.Unlock()
return l
}
// GetMax is thread-safe way of getting maximum number of requests to limit per duration.
func (l *Limiter) GetMax() float64 {
l.RLock()
defer l.RUnlock()
return l.max
}
// SetBurst is thread-safe way of setting maximum burst size.
func (l *Limiter) SetBurst(burst int) *Limiter {
l.Lock()
l.burst = burst
l.Unlock()
return l
}
// GetBurst is thread-safe way of setting maximum burst size.
func (l *Limiter) GetBurst() int {
l.RLock()
defer l.RUnlock()
return l.burst
}
// SetMessage is thread-safe way of setting HTTP message when limit is reached.
func (l *Limiter) SetMessage(msg string) *Limiter {
l.Lock()
l.message = msg
l.Unlock()
return l
}
// GetMessage is thread-safe way of getting HTTP message when limit is reached.
func (l *Limiter) GetMessage() string {
l.RLock()
defer l.RUnlock()
return l.message
}
// SetMessageContentType is thread-safe way of setting HTTP message Content-Type when limit is reached.
func (l *Limiter) SetMessageContentType(contentType string) *Limiter {
l.Lock()
l.messageContentType = contentType
l.Unlock()
return l
}
// GetMessageContentType is thread-safe way of getting HTTP message Content-Type when limit is reached.
func (l *Limiter) GetMessageContentType() string {
l.RLock()
defer l.RUnlock()
return l.messageContentType
}
// SetStatusCode is thread-safe way of setting HTTP status code when limit is reached.
func (l *Limiter) SetStatusCode(statusCode int) *Limiter {
l.Lock()
l.statusCode = statusCode
l.Unlock()
return l
}
// GetStatusCode is thread-safe way of getting HTTP status code when limit is reached.
func (l *Limiter) GetStatusCode() int {
l.RLock()
defer l.RUnlock()
return l.statusCode
}
// SetOnLimitReached is thread-safe way of setting after-rejection function when limit is reached.
func (l *Limiter) SetOnLimitReached(fn func(w http.ResponseWriter, r *http.Request)) *Limiter {
l.Lock()
l.onLimitReached = fn
l.Unlock()
return l
}
// ExecOnLimitReached is thread-safe way of executing after-rejection function when limit is reached.
func (l *Limiter) ExecOnLimitReached(w http.ResponseWriter, r *http.Request) {
l.RLock()
defer l.RUnlock()
fn := l.onLimitReached
if fn != nil {
fn(w, r)
}
}
// SetIPLookups is thread-safe way of setting list of places to look up IP address.
func (l *Limiter) SetIPLookups(ipLookups []string) *Limiter {
l.Lock()
l.ipLookups = ipLookups
l.Unlock()
return l
}
// GetIPLookups is thread-safe way of getting list of places to look up IP address.
func (l *Limiter) GetIPLookups() []string {
l.RLock()
defer l.RUnlock()
return l.ipLookups
}
// SetForwardedForIndexFromBehind is thread-safe way of setting which X-Forwarded-For index to choose.
func (l *Limiter) SetForwardedForIndexFromBehind(forwardedForIndex int) *Limiter {
l.Lock()
l.forwardedForIndex = forwardedForIndex
l.Unlock()
return l
}
// GetForwardedForIndexFromBehind is thread-safe way of getting which X-Forwarded-For index to choose.
func (l *Limiter) GetForwardedForIndexFromBehind() int {
l.RLock()
defer l.RUnlock()
return l.forwardedForIndex
}
// SetMethods is thread-safe way of setting list of HTTP Methods to limit (GET, POST, PUT, etc.).
func (l *Limiter) SetMethods(methods []string) *Limiter {
l.Lock()
l.methods = methods
l.Unlock()
return l
}
// GetMethods is thread-safe way of getting list of HTTP Methods to limit (GET, POST, PUT, etc.).
func (l *Limiter) GetMethods() []string {
l.RLock()
defer l.RUnlock()
return l.methods
}
// SetBasicAuthUsers is thread-safe way of setting list of basic auth usernames to limit.
func (l *Limiter) SetBasicAuthUsers(basicAuthUsers []string) *Limiter {
ttl := l.GetBasicAuthExpirationTTL()
if ttl <= 0 {
ttl = l.generalExpirableOptions.DefaultExpirationTTL
}
for _, basicAuthUser := range basicAuthUsers {
l.basicAuthUsers.Set(basicAuthUser, true, ttl)
}
return l
}
// GetBasicAuthUsers is thread-safe way of getting list of basic auth usernames to limit.
func (l *Limiter) GetBasicAuthUsers() []string {
asMap := l.basicAuthUsers.Items()
var basicAuthUsers []string
for basicAuthUser, _ := range asMap {
basicAuthUsers = append(basicAuthUsers, basicAuthUser)
}
return basicAuthUsers
}
// RemoveBasicAuthUsers is thread-safe way of removing basic auth usernames from existing list.
func (l *Limiter) RemoveBasicAuthUsers(basicAuthUsers []string) *Limiter {
for _, toBeRemoved := range basicAuthUsers {
l.basicAuthUsers.Delete(toBeRemoved)
}
return l
}
// SetHeaders is thread-safe way of setting map of HTTP headers to limit.
func (l *Limiter) SetHeaders(headers map[string][]string) *Limiter {
if l.headers == nil {
l.headers = make(map[string]*gocache.Cache)
}
for header, entries := range headers {
l.SetHeader(header, entries)
}
return l
}
// GetHeaders is thread-safe way of getting map of HTTP headers to limit.
func (l *Limiter) GetHeaders() map[string][]string {
results := make(map[string][]string)
l.RLock()
defer l.RUnlock()
for header, entriesAsGoCache := range l.headers {
entries := make([]string, 0)
for entry, _ := range entriesAsGoCache.Items() {
entries = append(entries, entry)
}
results[header] = entries
}
return results
}
// SetHeader is thread-safe way of setting entries of 1 HTTP header.
func (l *Limiter) SetHeader(header string, entries []string) *Limiter {
l.RLock()
existing, found := l.headers[header]
l.RUnlock()
ttl := l.GetHeaderEntryExpirationTTL()
if ttl <= 0 {
ttl = l.generalExpirableOptions.DefaultExpirationTTL
}
if !found {
existing = gocache.New(ttl, l.generalExpirableOptions.ExpireJobInterval)
}
for _, entry := range entries {
existing.Set(entry, true, ttl)
}
l.Lock()
l.headers[header] = existing
l.Unlock()
return l
}
// GetHeader is thread-safe way of getting entries of 1 HTTP header.
func (l *Limiter) GetHeader(header string) []string {
l.RLock()
entriesAsGoCache := l.headers[header]
l.RUnlock()
entriesAsMap := entriesAsGoCache.Items()
entries := make([]string, 0)
for entry, _ := range entriesAsMap {
entries = append(entries, entry)
}
return entries
}
// RemoveHeader is thread-safe way of removing entries of 1 HTTP header.
func (l *Limiter) RemoveHeader(header string) *Limiter {
ttl := l.GetHeaderEntryExpirationTTL()
if ttl <= 0 {
ttl = l.generalExpirableOptions.DefaultExpirationTTL
}
l.Lock()
l.headers[header] = gocache.New(ttl, l.generalExpirableOptions.ExpireJobInterval)
l.Unlock()
return l
}
// RemoveHeaderEntries is thread-safe way of adding new entries to 1 HTTP header rule.
func (l *Limiter) RemoveHeaderEntries(header string, entriesForRemoval []string) *Limiter {
l.RLock()
entries, found := l.headers[header]
l.RUnlock()
if !found {
return l
}
for _, toBeRemoved := range entriesForRemoval {
entries.Delete(toBeRemoved)
}
return l
}
func (l *Limiter) limitReachedWithTokenBucketTTL(key string, tokenBucketTTL time.Duration) bool {
lmtMax := l.GetMax()
lmtBurst := l.GetBurst()
l.Lock()
defer l.Unlock()
if _, found := l.tokenBuckets.Get(key); !found {
l.tokenBuckets.Set(
key,
rate.NewLimiter(rate.Limit(lmtMax), lmtBurst),
tokenBucketTTL,
)
}
expiringMap, found := l.tokenBuckets.Get(key)
if !found {
return false
}
return !expiringMap.(*rate.Limiter).Allow()
}
// LimitReached returns a bool indicating if the Bucket identified by key ran out of tokens.
func (l *Limiter) LimitReached(key string) bool {
ttl := l.GetTokenBucketExpirationTTL()
if ttl <= 0 {
ttl = l.generalExpirableOptions.DefaultExpirationTTL
}
return l.limitReachedWithTokenBucketTTL(key, ttl)
}

View file

@ -0,0 +1,12 @@
package limiter
import (
"time"
)
type ExpirableOptions struct {
DefaultExpirationTTL time.Duration
// How frequently expire job triggers
ExpireJobInterval time.Duration
}

View file

@ -3,64 +3,41 @@ package tollbooth
import (
"net/http"
"strconv"
"strings"
"time"
"github.com/didip/tollbooth/config"
"fmt"
"github.com/didip/tollbooth/errors"
"github.com/didip/tollbooth/libstring"
"github.com/didip/tollbooth/limiter"
"math"
)
// NewLimiter is a convenience function to config.NewLimiter.
func NewLimiter(max int64, ttl time.Duration) *config.Limiter {
return config.NewLimiter(max, ttl)
// setResponseHeaders configures X-Rate-Limit-Limit and X-Rate-Limit-Duration
func setResponseHeaders(lmt *limiter.Limiter, w http.ResponseWriter, r *http.Request) {
w.Header().Add("X-Rate-Limit-Limit", fmt.Sprintf("%.2f", lmt.GetMax()))
w.Header().Add("X-Rate-Limit-Duration", "1")
w.Header().Add("X-Rate-Limit-Request-Forwarded-For", r.Header.Get("X-Forwarded-For"))
w.Header().Add("X-Rate-Limit-Request-Remote-Addr", r.RemoteAddr)
}
func NewLimiterExpiringBuckets(max int64, ttl, bucketDefaultExpirationTTL, bucketExpireJobInterval time.Duration) *config.Limiter {
return config.NewLimiterExpiringBuckets(max, ttl, bucketDefaultExpirationTTL, bucketExpireJobInterval)
// NewLimiter is a convenience function to limiter.New.
func NewLimiter(max float64, tbOptions *limiter.ExpirableOptions) *limiter.Limiter {
return limiter.New(tbOptions).SetMax(max).SetBurst(int(math.Max(1, max)))
}
// LimitByKeys keeps track number of request made by keys separated by pipe.
// It returns HTTPError when limit is exceeded.
func LimitByKeys(limiter *config.Limiter, keys []string) *errors.HTTPError {
if limiter.LimitReached(strings.Join(keys, "|")) {
return &errors.HTTPError{Message: limiter.Message, StatusCode: limiter.StatusCode}
func LimitByKeys(lmt *limiter.Limiter, keys []string) *errors.HTTPError {
if lmt.LimitReached(strings.Join(keys, "|")) {
return &errors.HTTPError{Message: lmt.GetMessage(), StatusCode: lmt.GetStatusCode()}
}
return nil
}
// LimitByKeysWithCustomTokenBucketTTL keeps track number of request made by keys separated by pipe.
// It returns HTTPError when limit is exceeded.
// User can define a TTL for the key to expire
func LimitByKeysWithCustomTokenBucketTTL(limiter *config.Limiter, keys []string, bucketExpireTTL time.Duration) *errors.HTTPError {
if limiter.LimitReachedWithCustomTokenBucketTTL(strings.Join(keys, "|"), bucketExpireTTL) {
return &errors.HTTPError{Message: limiter.Message, StatusCode: limiter.StatusCode}
}
return nil
}
// LimitByRequest builds keys based on http.Request struct,
// loops through all the keys, and check if any one of them returns HTTPError.
func LimitByRequest(limiter *config.Limiter, r *http.Request) *errors.HTTPError {
sliceKeys := BuildKeys(limiter, r)
// Loop sliceKeys and check if one of them has error.
for _, keys := range sliceKeys {
httpError := LimitByKeys(limiter, keys)
if httpError != nil {
return httpError
}
}
return nil
}
// BuildKeys generates a slice of keys to rate-limit by given config and request structs.
func BuildKeys(limiter *config.Limiter, r *http.Request) [][]string {
remoteIP := libstring.RemoteIP(limiter.IPLookups, r)
// BuildKeys generates a slice of keys to rate-limit by given limiter and request structs.
func BuildKeys(lmt *limiter.Limiter, r *http.Request) [][]string {
remoteIP := libstring.RemoteIP(lmt.GetIPLookups(), lmt.GetForwardedForIndexFromBehind(), r)
path := r.URL.Path
sliceKeys := make([][]string, 0)
@ -69,14 +46,21 @@ func BuildKeys(limiter *config.Limiter, r *http.Request) [][]string {
return sliceKeys
}
if limiter.Methods != nil && limiter.Headers != nil && limiter.BasicAuthUsers != nil {
lmtMethods := lmt.GetMethods()
lmtHeaders := lmt.GetHeaders()
lmtBasicAuthUsers := lmt.GetBasicAuthUsers()
lmtHeadersIsSet := len(lmtHeaders) > 0
lmtBasicAuthUsersIsSet := len(lmtBasicAuthUsers) > 0
if lmtMethods != nil && lmtHeadersIsSet && lmtBasicAuthUsersIsSet {
// Limit by HTTP methods and HTTP headers+values and Basic Auth credentials.
if libstring.StringInSlice(limiter.Methods, r.Method) {
for headerKey, headerValues := range limiter.Headers {
if libstring.StringInSlice(lmtMethods, r.Method) {
for headerKey, headerValues := range lmtHeaders {
if (headerValues == nil || len(headerValues) <= 0) && r.Header.Get(headerKey) != "" {
// If header values are empty, rate-limit all request with headerKey.
username, _, ok := r.BasicAuth()
if ok && libstring.StringInSlice(limiter.BasicAuthUsers, username) {
if ok && libstring.StringInSlice(lmtBasicAuthUsers, username) {
sliceKeys = append(sliceKeys, []string{remoteIP, path, r.Method, headerKey, username})
}
@ -84,7 +68,7 @@ func BuildKeys(limiter *config.Limiter, r *http.Request) [][]string {
// If header values are not empty, rate-limit all request with headerKey and headerValues.
for _, headerValue := range headerValues {
username, _, ok := r.BasicAuth()
if ok && libstring.StringInSlice(limiter.BasicAuthUsers, username) {
if ok && libstring.StringInSlice(lmtBasicAuthUsers, username) {
sliceKeys = append(sliceKeys, []string{remoteIP, path, r.Method, headerKey, headerValue, username})
}
}
@ -92,10 +76,10 @@ func BuildKeys(limiter *config.Limiter, r *http.Request) [][]string {
}
}
} else if limiter.Methods != nil && limiter.Headers != nil {
} else if lmtMethods != nil && lmtHeadersIsSet {
// Limit by HTTP methods and HTTP headers+values.
if libstring.StringInSlice(limiter.Methods, r.Method) {
for headerKey, headerValues := range limiter.Headers {
if libstring.StringInSlice(lmtMethods, r.Method) {
for headerKey, headerValues := range lmtHeaders {
if (headerValues == nil || len(headerValues) <= 0) && r.Header.Get(headerKey) != "" {
// If header values are empty, rate-limit all request with headerKey.
sliceKeys = append(sliceKeys, []string{remoteIP, path, r.Method, headerKey})
@ -109,24 +93,24 @@ func BuildKeys(limiter *config.Limiter, r *http.Request) [][]string {
}
}
} else if limiter.Methods != nil && limiter.BasicAuthUsers != nil {
} else if lmtMethods != nil && lmtBasicAuthUsersIsSet {
// Limit by HTTP methods and Basic Auth credentials.
if libstring.StringInSlice(limiter.Methods, r.Method) {
if libstring.StringInSlice(lmtMethods, r.Method) {
username, _, ok := r.BasicAuth()
if ok && libstring.StringInSlice(limiter.BasicAuthUsers, username) {
if ok && libstring.StringInSlice(lmtBasicAuthUsers, username) {
sliceKeys = append(sliceKeys, []string{remoteIP, path, r.Method, username})
}
}
} else if limiter.Methods != nil {
} else if lmtMethods != nil {
// Limit by HTTP methods.
if libstring.StringInSlice(limiter.Methods, r.Method) {
if libstring.StringInSlice(lmtMethods, r.Method) {
sliceKeys = append(sliceKeys, []string{remoteIP, path, r.Method})
}
} else if limiter.Headers != nil {
} else if lmtHeadersIsSet {
// Limit by HTTP headers+values.
for headerKey, headerValues := range limiter.Headers {
for headerKey, headerValues := range lmtHeaders {
if (headerValues == nil || len(headerValues) <= 0) && r.Header.Get(headerKey) != "" {
// If header values are empty, rate-limit all request with headerKey.
sliceKeys = append(sliceKeys, []string{remoteIP, path, headerKey})
@ -139,10 +123,10 @@ func BuildKeys(limiter *config.Limiter, r *http.Request) [][]string {
}
}
} else if limiter.BasicAuthUsers != nil {
} else if lmtBasicAuthUsersIsSet {
// Limit by Basic Auth credentials.
username, _, ok := r.BasicAuth()
if ok && libstring.StringInSlice(limiter.BasicAuthUsers, username) {
if ok && libstring.StringInSlice(lmtBasicAuthUsers, username) {
sliceKeys = append(sliceKeys, []string{remoteIP, path, username})
}
} else {
@ -153,20 +137,31 @@ func BuildKeys(limiter *config.Limiter, r *http.Request) [][]string {
return sliceKeys
}
// SetResponseHeaders configures X-Rate-Limit-Limit and X-Rate-Limit-Duration
func SetResponseHeaders(limiter *config.Limiter, w http.ResponseWriter) {
w.Header().Add("X-Rate-Limit-Limit", strconv.FormatInt(limiter.Max, 10))
w.Header().Add("X-Rate-Limit-Duration", limiter.TTL.String())
// LimitByRequest builds keys based on http.Request struct,
// loops through all the keys, and check if any one of them returns HTTPError.
func LimitByRequest(lmt *limiter.Limiter, w http.ResponseWriter, r *http.Request) *errors.HTTPError {
setResponseHeaders(lmt, w, r)
sliceKeys := BuildKeys(lmt, r)
// Loop sliceKeys and check if one of them has error.
for _, keys := range sliceKeys {
httpError := LimitByKeys(lmt, keys)
if httpError != nil {
return httpError
}
}
return nil
}
// LimitHandler is a middleware that performs rate-limiting given http.Handler struct.
func LimitHandler(limiter *config.Limiter, next http.Handler) http.Handler {
func LimitHandler(lmt *limiter.Limiter, next http.Handler) http.Handler {
middle := func(w http.ResponseWriter, r *http.Request) {
SetResponseHeaders(limiter, w)
httpError := LimitByRequest(limiter, r)
httpError := LimitByRequest(lmt, w, r)
if httpError != nil {
w.Header().Add("Content-Type", limiter.MessageContentType)
lmt.ExecOnLimitReached(w, r)
w.Header().Add("Content-Type", lmt.GetMessageContentType())
w.WriteHeader(httpError.StatusCode)
w.Write([]byte(httpError.Message))
return
@ -180,6 +175,6 @@ func LimitHandler(limiter *config.Limiter, next http.Handler) http.Handler {
}
// LimitFuncHandler is a middleware that performs rate-limiting given request handler function.
func LimitFuncHandler(limiter *config.Limiter, nextFunc func(http.ResponseWriter, *http.Request)) http.Handler {
return LimitHandler(limiter, http.HandlerFunc(nextFunc))
func LimitFuncHandler(lmt *limiter.Limiter, nextFunc func(http.ResponseWriter, *http.Request)) http.Handler {
return LimitHandler(lmt, http.HandlerFunc(nextFunc))
}

View file

@ -6,13 +6,9 @@
## Installation
go get -u github.com/fogleman/gg
go get github.com/fogleman/gg
Alternatively, you may use gopkg.in to grab a specific major-version:
go get -u gopkg.in/fogleman/gg.v1
## Documentation
## GoDoc
https://godoc.org/github.com/fogleman/gg
@ -155,6 +151,8 @@ It is often desired to rotate or scale about a point that is not the origin. The
`InvertY` is provided in case Y should increase from bottom to top vs. the default top to bottom.
Note: transforms do not currently affect `DrawImage` or `DrawString`.
## Stack Functions
Save and restore the state of the context. These can be nested.
@ -189,6 +187,12 @@ SavePNG(path string, im image.Image) error
![Separator](http://i.imgur.com/fsUvnPB.png)
## How Do it Do?
`gg` is mostly a wrapper around `github.com/golang/freetype/raster`. The goal
is to provide some more functionality and a nicer API that will suffice for
most use cases.
## Another Example
See the output of this example below.

View file

@ -4,15 +4,14 @@ package gg
import (
"image"
"image/color"
"image/draw"
"image/png"
"io"
"math"
"github.com/golang/freetype/raster"
"golang.org/x/image/draw"
"golang.org/x/image/font"
"golang.org/x/image/font/basicfont"
"golang.org/x/image/math/f64"
)
type LineCap int
@ -565,6 +564,7 @@ func (dc *Context) DrawRegularPolygon(n int, x, y, r, rotation float64) {
}
// DrawImage draws the specified image at the specified point.
// Currently, rotation and scaling transforms are not supported.
func (dc *Context) DrawImage(im image.Image, x, y int) {
dc.DrawImageAnchored(im, x, y, 0, 0)
}
@ -576,17 +576,12 @@ func (dc *Context) DrawImageAnchored(im image.Image, x, y int, ax, ay float64) {
s := im.Bounds().Size()
x -= int(ax * float64(s.X))
y -= int(ay * float64(s.Y))
transformer := draw.BiLinear
fx, fy := float64(x), float64(y)
m := dc.matrix.Translate(fx, fy)
s2d := f64.Aff3{m.XX, m.XY, m.X0, m.YX, m.YY, m.Y0}
p := image.Pt(x, y)
r := image.Rectangle{p, p.Add(s)}
if dc.mask == nil {
transformer.Transform(dc.im, s2d, im, im.Bounds(), draw.Over, nil)
draw.Draw(dc.im, r, im, image.ZP, draw.Over)
} else {
transformer.Transform(dc.im, s2d, im, im.Bounds(), draw.Over, &draw.Options{
DstMask: dc.mask,
DstMaskP: image.ZP,
})
draw.DrawMask(dc.im, r, im, image.ZP, dc.mask, p, draw.Over)
}
}
@ -613,34 +608,11 @@ func (dc *Context) drawString(im *image.RGBA, s string, x, y float64) {
Face: dc.fontFace,
Dot: fixp(x, y),
}
// based on Drawer.DrawString() in golang.org/x/image/font/font.go
prevC := rune(-1)
for _, c := range s {
if prevC >= 0 {
d.Dot.X += d.Face.Kern(prevC, c)
}
dr, mask, maskp, advance, ok := d.Face.Glyph(d.Dot, c)
if !ok {
// TODO: is falling back on the U+FFFD glyph the responsibility of
// the Drawer or the Face?
// TODO: set prevC = '\ufffd'?
continue
}
sr := dr.Sub(dr.Min)
transformer := draw.BiLinear
fx, fy := float64(dr.Min.X), float64(dr.Min.Y)
m := dc.matrix.Translate(fx, fy)
s2d := f64.Aff3{m.XX, m.XY, m.X0, m.YX, m.YY, m.Y0}
transformer.Transform(d.Dst, s2d, d.Src, sr, draw.Over, &draw.Options{
SrcMask: mask,
SrcMaskP: maskp,
})
d.Dot.X += advance
prevC = c
}
d.DrawString(s)
}
// DrawString draws the specified text at the specified point.
// Currently, rotation and scaling transforms are not supported.
func (dc *Context) DrawString(s string, x, y float64) {
dc.DrawStringAnchored(s, x, y, 0, 0)
}
@ -650,6 +622,7 @@ func (dc *Context) DrawString(s string, x, y float64) {
// text. Use ax=0.5, ay=0.5 to center the text at the specified point.
func (dc *Context) DrawStringAnchored(s string, x, y, ax, ay float64) {
w, h := dc.MeasureString(s)
x, y = dc.TransformPoint(x, y)
x -= ax * w
y += ay * h
if dc.mask == nil {

View file

@ -209,6 +209,7 @@ func (g *GlyphBuf) load(recursion uint32, i Index, useMyMetrics bool) (err error
g.addPhantomsAndScale(len(g.Points), len(g.Points), true, true)
copy(g.phantomPoints[:], g.Points[len(g.Points)-4:])
g.Points = g.Points[:len(g.Points)-4]
// TODO: also trim g.InFontUnits and g.Unhinted?
return nil
}
@ -282,6 +283,10 @@ func (g *GlyphBuf) loadSimple(glyf []byte, ne int) (program []byte) {
program = glyf[offset : offset+instrLen]
offset += instrLen
if ne == 0 {
return program
}
np0 := len(g.Points)
np1 := np0 + int(g.Ends[len(g.Ends)-1])

View file

@ -57,7 +57,8 @@ const (
// A 32-bit encoding consists of a most-significant 16-bit Platform ID and a
// least-significant 16-bit Platform Specific ID. The magic numbers are
// specified at https://www.microsoft.com/typography/otspec/name.htm
unicodeEncoding = 0x00000003 // PID = 0 (Unicode), PSID = 3 (Unicode 2.0)
unicodeEncodingBMPOnly = 0x00000003 // PID = 0 (Unicode), PSID = 3 (Unicode 2.0 BMP Only)
unicodeEncodingFull = 0x00000004 // PID = 0 (Unicode), PSID = 4 (Unicode 2.0 Full Repertoire)
microsoftSymbolEncoding = 0x00030000 // PID = 3 (Microsoft), PSID = 0 (Symbol)
microsoftUCS2Encoding = 0x00030001 // PID = 3 (Microsoft), PSID = 1 (UCS-2)
microsoftUCS4Encoding = 0x0003000a // PID = 3 (Microsoft), PSID = 10 (UCS-4)
@ -142,7 +143,7 @@ func parseSubtables(table []byte, name string, offset, size int, pred func([]byt
pidPsid := u32(table, offset)
// We prefer the Unicode cmap encoding. Failing to find that, we fall
// back onto the Microsoft cmap encoding.
if pidPsid == unicodeEncoding {
if pidPsid == unicodeEncodingBMPOnly || pidPsid == unicodeEncodingFull {
bestOffset, bestPID, ok = offset, pidPsid>>16, true
break
@ -323,11 +324,20 @@ func (f *Font) parseKern() error {
if version != 0 {
return UnsupportedError(fmt.Sprintf("kern version: %d", version))
}
n, offset := u16(f.kern, offset), offset+2
if n != 1 {
return UnsupportedError(fmt.Sprintf("kern nTables: %d", n))
if n == 0 {
return UnsupportedError("kern nTables: 0")
}
offset += 2
// TODO: support multiple subtables. In practice, almost all .ttf files
// have only one subtable, if they have a kern table at all. But it's not
// impossible. Xolonium Regular (https://fontlibrary.org/en/font/xolonium)
// has 3 subtables. Those subtables appear to be disjoint, rather than
// being the same kerning pairs encoded in three different ways.
//
// For now, we'll use only the first subtable.
offset += 2 // Skip the version.
length, offset := int(u16(f.kern, offset)), offset+2
coverage, offset := u16(f.kern, offset), offset+2
if coverage != 0x0001 {
@ -550,8 +560,7 @@ func parse(ttf []byte, offset int) (font *Font, err error) {
return
}
ttcVersion, offset := u32(ttf, offset), offset+4
if ttcVersion != 0x00010000 {
// TODO: support TTC version 2.0, once I have such a .ttc file to test with.
if ttcVersion != 0x00010000 && ttcVersion != 0x00020000 {
err = FormatError("bad TTC version")
return
}
@ -578,14 +587,15 @@ func parse(ttf []byte, offset int) (font *Font, err error) {
return
}
n, offset := int(u16(ttf, offset)), offset+2
if len(ttf) < 16*n+12 {
offset += 6 // Skip the searchRange, entrySelector and rangeShift.
if len(ttf) < 16*n+offset {
err = FormatError("TTF data is too short")
return
}
f := new(Font)
// Assign the table slices.
for i := 0; i < n; i++ {
x := 16*i + 12
x := 16*i + offset
switch string(ttf[x : x+4]) {
case "cmap":
f.cmap, err = readTable(ttf, ttf[x+8:x+16])

View file

@ -1,18 +1,16 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package r1 implements types and functions for working with geometry in ¹.

View file

@ -1,18 +1,16 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package r1

View file

@ -1,18 +1,16 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package r2 implements types and functions for working with geometry in ².

View file

@ -1,18 +1,16 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package r2
@ -248,9 +246,9 @@ func (r Rect) Intersection(other Rect) Rect {
return Rect{xx, yy}
}
// ApproxEquals returns true if the x- and y-intervals of the two rectangles are
// ApproxEqual returns true if the x- and y-intervals of the two rectangles are
// the same up to the given tolerance.
func (r Rect) ApproxEquals(r2 Rect) bool {
func (r Rect) ApproxEqual(r2 Rect) bool {
return r.X.ApproxEqual(r2.X) && r.Y.ApproxEqual(r2.Y)
}

View file

@ -1,18 +1,16 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package r3 implements types and functions for working with geometry in ³.

View file

@ -1,18 +1,16 @@
/*
Copyright 2016 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2016 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package r3
@ -92,16 +90,16 @@ func (v PreciseVector) Vector() Vector {
x, _ := v.X.Float64()
y, _ := v.Y.Float64()
z, _ := v.Z.Float64()
return Vector{x, y, z}
return Vector{x, y, z}.Normalize()
}
// Equals reports whether v and ov are equal.
func (v PreciseVector) Equals(ov PreciseVector) bool {
// Equal reports whether v and ov are equal.
func (v PreciseVector) Equal(ov PreciseVector) bool {
return v.X.Cmp(ov.X) == 0 && v.Y.Cmp(ov.Y) == 0 && v.Z.Cmp(ov.Z) == 0
}
func (v PreciseVector) String() string {
return fmt.Sprintf("(%v, %v, %v)", v.X, v.Y, v.Z)
return fmt.Sprintf("(%10g, %10g, %10g)", v.X, v.Y, v.Z)
}
// Norm2 returns the square of the norm.

View file

@ -1,18 +1,16 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package r3
@ -44,10 +42,11 @@ func (v Vector) Norm2() float64 { return v.Dot(v) }
// Normalize returns a unit vector in the same direction as v.
func (v Vector) Normalize() Vector {
if v == (Vector{0, 0, 0}) {
return v
n2 := v.Norm2()
if n2 == 0 {
return Vector{0, 0, 0}
}
return v.Mul(1 / v.Norm())
return v.Mul(1 / math.Sqrt(n2))
}
// IsUnit returns whether this vector is of approximately unit length.

View file

@ -1,18 +1,16 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s1

View file

@ -1,18 +1,16 @@
/*
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s1
@ -40,8 +38,8 @@ type ChordAngle float64
const (
// NegativeChordAngle represents a chord angle smaller than the zero angle.
// The only valid operations on a NegativeChordAngle are comparisons and
// Angle conversions.
// The only valid operations on a NegativeChordAngle are comparisons,
// Angle conversions, and Successor/Predecessor.
NegativeChordAngle = ChordAngle(-1)
// RightChordAngle represents a chord angle of 90 degrees (a "right angle").
@ -50,6 +48,9 @@ const (
// StraightChordAngle represents a chord angle of 180 degrees (a "straight angle").
// This is the maximum finite chord angle.
StraightChordAngle = ChordAngle(4)
// maxLength2 is the square of the maximum length allowed in a ChordAngle.
maxLength2 = 4.0
)
// ChordAngleFromAngle returns a ChordAngle from the given Angle.
@ -65,10 +66,10 @@ func ChordAngleFromAngle(a Angle) ChordAngle {
}
// ChordAngleFromSquaredLength returns a ChordAngle from the squared chord length.
// Note that the argument is automatically clamped to a maximum of 4.0 to
// Note that the argument is automatically clamped to a maximum of 4 to
// handle possible roundoff errors. The argument must be non-negative.
func ChordAngleFromSquaredLength(length2 float64) ChordAngle {
if length2 > 4 {
if length2 > maxLength2 {
return StraightChordAngle
}
return ChordAngle(length2)
@ -84,7 +85,7 @@ func (c ChordAngle) Expanded(e float64) ChordAngle {
if c.isSpecial() {
return c
}
return ChordAngle(math.Max(0.0, math.Min(4.0, float64(c)+e)))
return ChordAngle(math.Max(0.0, math.Min(maxLength2, float64(c)+e)))
}
// Angle converts this ChordAngle to an Angle.
@ -99,7 +100,8 @@ func (c ChordAngle) Angle() Angle {
}
// InfChordAngle returns a chord angle larger than any finite chord angle.
// The only valid operations on an InfChordAngle are comparisons and Angle conversions.
// The only valid operations on an InfChordAngle are comparisons, Angle
// conversions, and Successor/Predecessor.
func InfChordAngle() ChordAngle {
return ChordAngle(math.Inf(1))
}
@ -116,7 +118,41 @@ func (c ChordAngle) isSpecial() bool {
// isValid reports whether this ChordAngle is valid or not.
func (c ChordAngle) isValid() bool {
return (c >= 0 && c <= 4) || c.isSpecial()
return (c >= 0 && c <= maxLength2) || c.isSpecial()
}
// Successor returns the smallest representable ChordAngle larger than this one.
// This can be used to convert a "<" comparison to a "<=" comparison.
//
// Note the following special cases:
// NegativeChordAngle.Successor == 0
// StraightChordAngle.Successor == InfChordAngle
// InfChordAngle.Successor == InfChordAngle
func (c ChordAngle) Successor() ChordAngle {
if c >= maxLength2 {
return InfChordAngle()
}
if c < 0 {
return 0
}
return ChordAngle(math.Nextafter(float64(c), 10.0))
}
// Predecessor returns the largest representable ChordAngle less than this one.
//
// Note the following special cases:
// InfChordAngle.Predecessor == StraightChordAngle
// ChordAngle(0).Predecessor == NegativeChordAngle
// NegativeChordAngle.Predecessor == NegativeChordAngle
func (c ChordAngle) Predecessor() ChordAngle {
if c <= 0 {
return NegativeChordAngle
}
if c > maxLength2 {
return StraightChordAngle
}
return ChordAngle(math.Nextafter(float64(c), -10.0))
}
// MaxPointError returns the maximum error size for a ChordAngle constructed
@ -125,9 +161,10 @@ func (c ChordAngle) isValid() bool {
// the true distance after the points are projected to lie exactly on the sphere.
func (c ChordAngle) MaxPointError() float64 {
// There is a relative error of (2.5*dblEpsilon) when computing the squared
// distance, plus an absolute error of (16 * dblEpsilon**2) because the
// lengths of the input points may differ from 1 by up to (2*dblEpsilon) each.
return 2.5*dblEpsilon*float64(c) + 16*dblEpsilon*dblEpsilon
// distance, plus a relative error of 2 * dblEpsilon, plus an absolute error
// of (16 * dblEpsilon**2) because the lengths of the input points may differ
// from 1 by up to (2*dblEpsilon) each. (This is the maximum error in Normalize).
return 4.5*dblEpsilon*float64(c) + 16*dblEpsilon*dblEpsilon
}
// MaxAngleError returns the maximum error for a ChordAngle constructed
@ -150,7 +187,7 @@ func (c ChordAngle) Add(other ChordAngle) ChordAngle {
}
// Clamp the angle sum to at most 180 degrees.
if c+other >= 4 {
if c+other >= maxLength2 {
return StraightChordAngle
}
@ -161,7 +198,7 @@ func (c ChordAngle) Add(other ChordAngle) ChordAngle {
// cos(X) = sqrt(1 - sin^2(X))
x := float64(c * (1 - 0.25*other))
y := float64(other * (1 - 0.25*c))
return ChordAngle(math.Min(4.0, x+y+2*math.Sqrt(x*y)))
return ChordAngle(math.Min(maxLength2, x+y+2*math.Sqrt(x*y)))
}
// Sub subtracts the other ChordAngle from this one and returns the resulting

View file

@ -1,18 +1,16 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package s1 implements types and functions for working with geometry in S¹ (circular geometry).

View file

@ -1,18 +1,16 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s1

53
vendor/github.com/golang/geo/s2/bits_go18.go generated vendored Normal file
View file

@ -0,0 +1,53 @@
// Copyright 2018 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !go1.9
package s2
// This file is for the bit manipulation code pre-Go 1.9.
// findMSBSetNonZero64 returns the index (between 0 and 63) of the most
// significant set bit. Passing zero to this function returns zero.
func findMSBSetNonZero64(x uint64) int {
val := []uint64{0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000, 0xFFFFFFFF00000000}
shift := []uint64{1, 2, 4, 8, 16, 32}
var msbPos uint64
for i := 5; i >= 0; i-- {
if x&val[i] != 0 {
x >>= shift[i]
msbPos |= shift[i]
}
}
return int(msbPos)
}
const deBruijn64 = 0x03f79d71b4ca8b09
const digitMask = uint64(1<<64 - 1)
var deBruijn64Lookup = []byte{
0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4,
62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5,
63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11,
54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6,
}
// findLSBSetNonZero64 returns the index (between 0 and 63) of the least
// significant set bit. Passing zero to this function returns zero.
//
// This code comes from trailingZeroBits in https://golang.org/src/math/big/nat.go
// which references (Knuth, volume 4, section 7.3.1).
func findLSBSetNonZero64(x uint64) int {
return int(deBruijn64Lookup[((x&-x)*(deBruijn64&digitMask))>>58])
}

39
vendor/github.com/golang/geo/s2/bits_go19.go generated vendored Normal file
View file

@ -0,0 +1,39 @@
// Copyright 2018 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.9
package s2
// This file is for the bit manipulation code post-Go 1.9.
import "math/bits"
// findMSBSetNonZero64 returns the index (between 0 and 63) of the most
// significant set bit. Passing zero to this function return zero.
func findMSBSetNonZero64(x uint64) int {
if x == 0 {
return 0
}
return 63 - bits.LeadingZeros64(x)
}
// findLSBSetNonZero64 returns the index (between 0 and 63) of the least
// significant set bit. Passing zero to this function return zero.
func findLSBSetNonZero64(x uint64) int {
if x == 0 {
return 0
}
return bits.TrailingZeros64(x)
}

View file

@ -1,23 +1,22 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
import (
"fmt"
"io"
"math"
"github.com/golang/geo/r1"
@ -416,6 +415,30 @@ func (c Cap) intersects(cell Cell, vertices [4]Point) bool {
return false
}
// CellUnionBound computes a covering of the Cap. In general the covering
// consists of at most 4 cells except for very large caps, which may need
// up to 6 cells. The output is not sorted.
func (c Cap) CellUnionBound() []CellID {
// TODO(roberts): The covering could be made quite a bit tighter by mapping
// the cap to a rectangle in (i,j)-space and finding a covering for that.
// Find the maximum level such that the cap contains at most one cell vertex
// and such that CellID.AppendVertexNeighbors() can be called.
level := MinWidthMetric.MaxLevel(c.Radius().Radians()) - 1
// If level < 0, more than three face cells are required.
if level < 0 {
cellIDs := make([]CellID, 6)
for face := 0; face < 6; face++ {
cellIDs[face] = CellIDFromFace(face)
}
return cellIDs
}
// The covering consists of the 4 cells at the given level that share the
// cell vertex that is closest to the cap center.
return cellIDFromPoint(c.center).VertexNeighbors(level)
}
// Centroid returns the true centroid of the cap multiplied by its surface area
// The result lies on the ray from the origin through the cap's center, but it
// is not unit length. Note that if you just want the "surface centroid", i.e.
@ -466,3 +489,31 @@ func (c Cap) Union(other Cap) Cap {
resCenter := InterpolateAtDistance(0.5*(distance-cRadius+otherRadius), c.center, other.center)
return CapFromCenterAngle(resCenter, resRadius)
}
// Encode encodes the Cap.
func (c Cap) Encode(w io.Writer) error {
e := &encoder{w: w}
c.encode(e)
return e.err
}
func (c Cap) encode(e *encoder) {
e.writeFloat64(c.center.X)
e.writeFloat64(c.center.Y)
e.writeFloat64(c.center.Z)
e.writeFloat64(float64(c.radius))
}
// Decode decodes the Cap.
func (c *Cap) Decode(r io.Reader) error {
d := &decoder{r: asByteReader(r)}
c.decode(d)
return d.err
}
func (c *Cap) decode(d *decoder) {
c.center.X = d.readFloat64()
c.center.Y = d.readFloat64()
c.center.Z = d.readFloat64()
c.radius = s1.ChordAngle(d.readFloat64())
}

View file

@ -1,26 +1,26 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
import (
"io"
"math"
"github.com/golang/geo/r1"
"github.com/golang/geo/r2"
"github.com/golang/geo/r3"
"github.com/golang/geo/s1"
)
@ -62,6 +62,11 @@ func (c Cell) Face() int {
return int(c.face)
}
// oppositeFace returns the face opposite the given face.
func oppositeFace(face int) int {
return (face + 3) % 6
}
// Level returns the level of this cell.
func (c Cell) Level() int {
return int(c.level)
@ -77,11 +82,16 @@ func (c Cell) IsLeaf() bool {
return c.level == maxLevel
}
// SizeIJ returns the CellID value for the cells level.
// SizeIJ returns the edge length of this cell in (i,j)-space.
func (c Cell) SizeIJ() int {
return sizeIJ(int(c.level))
}
// SizeST returns the edge length of this cell in (s,t)-space.
func (c Cell) SizeST() float64 {
return c.id.sizeST(int(c.level))
}
// Vertex returns the k-th vertex of the cell (k = 0,1,2,3) in CCW order
// (lower left, lower right, upper right, upper left in the UV plane).
func (c Cell) Vertex(k int) Point {
@ -212,7 +222,13 @@ func (c Cell) ContainsCell(oc Cell) bool {
return c.id.Contains(oc.id)
}
// latitude returns the latitude of the cell vertex given by (i,j), where "i" and "j" are either 0 or 1.
// CellUnionBound computes a covering of the Cell.
func (c Cell) CellUnionBound() []CellID {
return c.CapBound().CellUnionBound()
}
// latitude returns the latitude of the cell vertex in radians given by (i,j),
// where i and j indicate the Hi (1) or Lo (0) corner.
func (c Cell) latitude(i, j int) float64 {
var u, v float64
switch {
@ -229,12 +245,13 @@ func (c Cell) latitude(i, j int) float64 {
u = c.uv.X.Hi
v = c.uv.Y.Hi
default:
panic("i and/or j is out of bound")
panic("i and/or j is out of bounds")
}
return latitude(Point{faceUVToXYZ(int(c.face), u, v)}).Radians()
}
// longitude returns the longitude of the cell vertex given by (i,j), where "i" and "j" are either 0 or 1.
// longitude returns the longitude of the cell vertex in radians given by (i,j),
// where i and j indicate the Hi (1) or Lo (0) corner.
func (c Cell) longitude(i, j int) float64 {
var u, v float64
switch {
@ -251,7 +268,7 @@ func (c Cell) longitude(i, j int) float64 {
u = c.uv.X.Hi
v = c.uv.Y.Hi
default:
panic("i and/or j is out of bound")
panic("i and/or j is out of bounds")
}
return longitude(Point{faceUVToXYZ(int(c.face), u, v)}).Radians()
}
@ -378,8 +395,304 @@ func (c Cell) ContainsPoint(p Point) bool {
return c.uv.ExpandedByMargin(dblEpsilon).ContainsPoint(uv)
}
// BUG(roberts): Differences from C++:
// Subdivide
// BoundUV
// Distance/DistanceToEdge
// VertexChordDistance
// Encode encodes the Cell.
func (c Cell) Encode(w io.Writer) error {
e := &encoder{w: w}
c.encode(e)
return e.err
}
func (c Cell) encode(e *encoder) {
c.id.encode(e)
}
// Decode decodes the Cell.
func (c *Cell) Decode(r io.Reader) error {
d := &decoder{r: asByteReader(r)}
c.decode(d)
return d.err
}
func (c *Cell) decode(d *decoder) {
c.id.decode(d)
*c = CellFromCellID(c.id)
}
// vertexChordDist2 returns the squared chord distance from point P to the
// given corner vertex specified by the Hi or Lo values of each.
func (c Cell) vertexChordDist2(p Point, xHi, yHi bool) s1.ChordAngle {
x := c.uv.X.Lo
y := c.uv.Y.Lo
if xHi {
x = c.uv.X.Hi
}
if yHi {
y = c.uv.Y.Hi
}
return ChordAngleBetweenPoints(p, PointFromCoords(x, y, 1))
}
// uEdgeIsClosest reports whether a point P is closer to the interior of the specified
// Cell edge (either the lower or upper edge of the Cell) or to the endpoints.
func (c Cell) uEdgeIsClosest(p Point, vHi bool) bool {
u0 := c.uv.X.Lo
u1 := c.uv.X.Hi
v := c.uv.Y.Lo
if vHi {
v = c.uv.Y.Hi
}
// These are the normals to the planes that are perpendicular to the edge
// and pass through one of its two endpoints.
dir0 := r3.Vector{v*v + 1, -u0 * v, -u0}
dir1 := r3.Vector{v*v + 1, -u1 * v, -u1}
return p.Dot(dir0) > 0 && p.Dot(dir1) < 0
}
// vEdgeIsClosest reports whether a point P is closer to the interior of the specified
// Cell edge (either the right or left edge of the Cell) or to the endpoints.
func (c Cell) vEdgeIsClosest(p Point, uHi bool) bool {
v0 := c.uv.Y.Lo
v1 := c.uv.Y.Hi
u := c.uv.X.Lo
if uHi {
u = c.uv.X.Hi
}
dir0 := r3.Vector{-u * v0, u*u + 1, -v0}
dir1 := r3.Vector{-u * v1, u*u + 1, -v1}
return p.Dot(dir0) > 0 && p.Dot(dir1) < 0
}
// edgeDistance reports the distance from a Point P to a given Cell edge. The point
// P is given by its dot product, and the uv edge by its normal in the
// given coordinate value.
func edgeDistance(ij, uv float64) s1.ChordAngle {
// Let P by the target point and let R be the closest point on the given
// edge AB. The desired distance PR can be expressed as PR^2 = PQ^2 + QR^2
// where Q is the point P projected onto the plane through the great circle
// through AB. We can compute the distance PQ^2 perpendicular to the plane
// from "dirIJ" (the dot product of the target point P with the edge
// normal) and the squared length the edge normal (1 + uv**2).
pq2 := (ij * ij) / (1 + uv*uv)
// We can compute the distance QR as (1 - OQ) where O is the sphere origin,
// and we can compute OQ^2 = 1 - PQ^2 using the Pythagorean theorem.
// (This calculation loses accuracy as angle POQ approaches Pi/2.)
qr := 1 - math.Sqrt(1-pq2)
return s1.ChordAngleFromSquaredLength(pq2 + qr*qr)
}
// distanceInternal reports the distance from the given point to the interior of
// the cell if toInterior is true or to the boundary of the cell otherwise.
func (c Cell) distanceInternal(targetXYZ Point, toInterior bool) s1.ChordAngle {
// All calculations are done in the (u,v,w) coordinates of this cell's face.
target := faceXYZtoUVW(int(c.face), targetXYZ)
// Compute dot products with all four upward or rightward-facing edge
// normals. dirIJ is the dot product for the edge corresponding to axis
// I, endpoint J. For example, dir01 is the right edge of the Cell
// (corresponding to the upper endpoint of the u-axis).
dir00 := target.X - target.Z*c.uv.X.Lo
dir01 := target.X - target.Z*c.uv.X.Hi
dir10 := target.Y - target.Z*c.uv.Y.Lo
dir11 := target.Y - target.Z*c.uv.Y.Hi
inside := true
if dir00 < 0 {
inside = false // Target is to the left of the cell
if c.vEdgeIsClosest(target, false) {
return edgeDistance(-dir00, c.uv.X.Lo)
}
}
if dir01 > 0 {
inside = false // Target is to the right of the cell
if c.vEdgeIsClosest(target, true) {
return edgeDistance(dir01, c.uv.X.Hi)
}
}
if dir10 < 0 {
inside = false // Target is below the cell
if c.uEdgeIsClosest(target, false) {
return edgeDistance(-dir10, c.uv.Y.Lo)
}
}
if dir11 > 0 {
inside = false // Target is above the cell
if c.uEdgeIsClosest(target, true) {
return edgeDistance(dir11, c.uv.Y.Hi)
}
}
if inside {
if toInterior {
return s1.ChordAngle(0)
}
// Although you might think of Cells as rectangles, they are actually
// arbitrary quadrilaterals after they are projected onto the sphere.
// Therefore the simplest approach is just to find the minimum distance to
// any of the four edges.
return minChordAngle(edgeDistance(-dir00, c.uv.X.Lo),
edgeDistance(dir01, c.uv.X.Hi),
edgeDistance(-dir10, c.uv.Y.Lo),
edgeDistance(dir11, c.uv.Y.Hi))
}
// Otherwise, the closest point is one of the four cell vertices. Note that
// it is *not* trivial to narrow down the candidates based on the edge sign
// tests above, because (1) the edges don't meet at right angles and (2)
// there are points on the far side of the sphere that are both above *and*
// below the cell, etc.
return minChordAngle(c.vertexChordDist2(target, false, false),
c.vertexChordDist2(target, true, false),
c.vertexChordDist2(target, false, true),
c.vertexChordDist2(target, true, true))
}
// Distance reports the distance from the cell to the given point. Returns zero if
// the point is inside the cell.
func (c Cell) Distance(target Point) s1.ChordAngle {
return c.distanceInternal(target, true)
}
// MaxDistance reports the maximum distance from the cell (including its interior) to the
// given point.
func (c Cell) MaxDistance(target Point) s1.ChordAngle {
// First check the 4 cell vertices. If all are within the hemisphere
// centered around target, the max distance will be to one of these vertices.
targetUVW := faceXYZtoUVW(int(c.face), target)
maxDist := maxChordAngle(c.vertexChordDist2(targetUVW, false, false),
c.vertexChordDist2(targetUVW, true, false),
c.vertexChordDist2(targetUVW, false, true),
c.vertexChordDist2(targetUVW, true, true))
if maxDist <= s1.RightChordAngle {
return maxDist
}
// Otherwise, find the minimum distance dMin to the antipodal point and the
// maximum distance will be pi - dMin.
return s1.StraightChordAngle - c.BoundaryDistance(Point{target.Mul(-1)})
}
// BoundaryDistance reports the distance from the cell boundary to the given point.
func (c Cell) BoundaryDistance(target Point) s1.ChordAngle {
return c.distanceInternal(target, false)
}
// DistanceToEdge returns the minimum distance from the cell to the given edge AB. Returns
// zero if the edge intersects the cell interior.
func (c Cell) DistanceToEdge(a, b Point) s1.ChordAngle {
// Possible optimizations:
// - Currently the (cell vertex, edge endpoint) distances are computed
// twice each, and the length of AB is computed 4 times.
// - To fix this, refactor GetDistance(target) so that it skips calculating
// the distance to each cell vertex. Instead, compute the cell vertices
// and distances in this function, and add a low-level UpdateMinDistance
// that allows the XA, XB, and AB distances to be passed in.
// - It might also be more efficient to do all calculations in UVW-space,
// since this would involve transforming 2 points rather than 4.
// First, check the minimum distance to the edge endpoints A and B.
// (This also detects whether either endpoint is inside the cell.)
minDist := minChordAngle(c.Distance(a), c.Distance(b))
if minDist == 0 {
return minDist
}
// Otherwise, check whether the edge crosses the cell boundary.
crosser := NewChainEdgeCrosser(a, b, c.Vertex(3))
for i := 0; i < 4; i++ {
if crosser.ChainCrossingSign(c.Vertex(i)) != DoNotCross {
return 0
}
}
// Finally, check whether the minimum distance occurs between a cell vertex
// and the interior of the edge AB. (Some of this work is redundant, since
// it also checks the distance to the endpoints A and B again.)
//
// Note that we don't need to check the distance from the interior of AB to
// the interior of a cell edge, because the only way that this distance can
// be minimal is if the two edges cross (already checked above).
for i := 0; i < 4; i++ {
minDist, _ = UpdateMinDistance(c.Vertex(i), a, b, minDist)
}
return minDist
}
// MaxDistanceToEdge returns the maximum distance from the cell (including its interior)
// to the given edge AB.
func (c Cell) MaxDistanceToEdge(a, b Point) s1.ChordAngle {
// If the maximum distance from both endpoints to the cell is less than π/2
// then the maximum distance from the edge to the cell is the maximum of the
// two endpoint distances.
maxDist := maxChordAngle(c.MaxDistance(a), c.MaxDistance(b))
if maxDist <= s1.RightChordAngle {
return maxDist
}
return s1.StraightChordAngle - c.DistanceToEdge(Point{a.Mul(-1)}, Point{b.Mul(-1)})
}
// DistanceToCell returns the minimum distance from this cell to the given cell.
// It returns zero if one cell contains the other.
func (c Cell) DistanceToCell(target Cell) s1.ChordAngle {
// If the cells intersect, the distance is zero. We use the (u,v) ranges
// rather than CellID intersects so that cells that share a partial edge or
// corner are considered to intersect.
if c.face == target.face && c.uv.Intersects(target.uv) {
return 0
}
// Otherwise, the minimum distance always occurs between a vertex of one
// cell and an edge of the other cell (including the edge endpoints). This
// represents a total of 32 possible (vertex, edge) pairs.
//
// TODO(roberts): This could be optimized to be at least 5x faster by pruning
// the set of possible closest vertex/edge pairs using the faces and (u,v)
// ranges of both cells.
var va, vb [4]Point
for i := 0; i < 4; i++ {
va[i] = c.Vertex(i)
vb[i] = target.Vertex(i)
}
minDist := s1.InfChordAngle()
for i := 0; i < 4; i++ {
for j := 0; j < 4; j++ {
minDist, _ = UpdateMinDistance(va[i], vb[j], vb[(j+1)&3], minDist)
minDist, _ = UpdateMinDistance(vb[i], va[j], va[(j+1)&3], minDist)
}
}
return minDist
}
// MaxDistanceToCell returns the maximum distance from the cell (including its
// interior) to the given target cell.
func (c Cell) MaxDistanceToCell(target Cell) s1.ChordAngle {
// Need to check the antipodal target for intersection with the cell. If it
// intersects, the distance is the straight ChordAngle.
// antipodalUV is the transpose of the original UV, interpreted within the opposite face.
antipodalUV := r2.Rect{target.uv.Y, target.uv.X}
if int(c.face) == oppositeFace(int(target.face)) && c.uv.Intersects(antipodalUV) {
return s1.StraightChordAngle
}
// Otherwise, the maximum distance always occurs between a vertex of one
// cell and an edge of the other cell (including the edge endpoints). This
// represents a total of 32 possible (vertex, edge) pairs.
//
// TODO(roberts): When the maximum distance is at most π/2, the maximum is
// always attained between a pair of vertices, and this could be made much
// faster by testing each vertex pair once rather than the current 4 times.
var va, vb [4]Point
for i := 0; i < 4; i++ {
va[i] = c.Vertex(i)
vb[i] = target.Vertex(i)
}
maxDist := s1.NegativeChordAngle
for i := 0; i < 4; i++ {
for j := 0; j < 4; j++ {
maxDist, _ = UpdateMaxDistance(va[i], vb[j], vb[(j+1)&3], maxDist)
maxDist, _ = UpdateMaxDistance(vb[i], va[j], va[(j+1)&3], maxDist)
}
}
return maxDist
}

View file

@ -1,25 +1,25 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
import (
"bytes"
"fmt"
"io"
"math"
"sort"
"strconv"
"strings"
@ -55,16 +55,41 @@ import (
// discrete point, it is better to use Cells.
type CellID uint64
// SentinelCellID is an invalid cell ID guaranteed to be larger than any
// valid cell ID. It is used primarily by ShapeIndex. The value is also used
// by some S2 types when encoding data.
// Note that the sentinel's RangeMin == RangeMax == itself.
const SentinelCellID = CellID(^uint64(0))
// sortCellIDs sorts the slice of CellIDs in place.
func sortCellIDs(ci []CellID) {
sort.Sort(cellIDs(ci))
}
// cellIDs implements the Sort interface for slices of CellIDs.
type cellIDs []CellID
func (c cellIDs) Len() int { return len(c) }
func (c cellIDs) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
func (c cellIDs) Less(i, j int) bool { return c[i] < c[j] }
// TODO(dsymonds): Some of these constants should probably be exported.
const (
faceBits = 3
numFaces = 6
// This is the number of levels needed to specify a leaf cell.
maxLevel = 30
// The extra position bit (61 rather than 60) lets us encode each cell as its
// Hilbert curve position at the cell center (which is halfway along the
// portion of the Hilbert curve that fills that cell).
posBits = 2*maxLevel + 1
maxSize = 1 << maxLevel
posBits = 2*maxLevel + 1
// The maximum index of a valid leaf cell plus one. The range of valid leaf
// cell indices is [0..maxSize-1].
maxSize = 1 << maxLevel
wrapOffset = uint64(numFaces) << posBits
)
@ -424,11 +449,33 @@ func (ci CellID) AdvanceWrap(steps int64) CellID {
return CellID(uint64(ci) + (uint64(steps) << shift))
}
// Encode encodes the CellID.
func (ci CellID) Encode(w io.Writer) error {
e := &encoder{w: w}
ci.encode(e)
return e.err
}
func (ci CellID) encode(e *encoder) {
e.writeUint64(uint64(ci))
}
// Decode encodes the CellID.
func (ci *CellID) Decode(r io.Reader) error {
d := &decoder{r: asByteReader(r)}
ci.decode(d)
return d.err
}
func (ci *CellID) decode(d *decoder) {
*ci = CellID(d.readUint64())
}
// TODO: the methods below are not exported yet. Settle on the entire API design
// before doing this. Do we want to mirror the C++ one as closely as possible?
// distanceFromBegin returns the number of steps that this cell is from the first
// node in the S2 heirarchy at our level. (i.e., FromFace(0).ChildBeginAtLevel(ci.Level())).
// node in the S2 hierarchy at our level. (i.e., FromFace(0).ChildBeginAtLevel(ci.Level())).
// The return value is always non-negative.
func (ci CellID) distanceFromBegin() int64 {
return int64(ci >> uint64(2*(maxLevel-ci.Level())+1))
@ -442,7 +489,7 @@ func (ci CellID) rawPoint() r3.Vector {
}
// faceSiTi returns the Face/Si/Ti coordinates of the center of the cell.
func (ci CellID) faceSiTi() (face, si, ti int) {
func (ci CellID) faceSiTi() (face int, si, ti uint32) {
face, i, j, _ := ci.faceIJOrientation()
delta := 0
if ci.IsLeaf() {
@ -452,7 +499,7 @@ func (ci CellID) faceSiTi() (face, si, ti int) {
delta = 2
}
}
return face, 2*i + delta, 2*j + delta
return face, uint32(2*i + delta), uint32(2*j + delta)
}
// faceIJOrientation uses the global lookupIJ table to unfiddle the bits of ci.
@ -461,8 +508,16 @@ func (ci CellID) faceIJOrientation() (f, i, j, orientation int) {
orientation = f & swapMask
nbits := maxLevel - 7*lookupBits // first iteration
// Each iteration maps 8 bits of the Hilbert curve position into
// 4 bits of "i" and "j". The lookup table transforms a key of the
// form "ppppppppoo" to a value of the form "iiiijjjjoo", where the
// letters [ijpo] represents bits of "i", "j", the Hilbert curve
// position, and the Hilbert curve orientation respectively.
//
// On the first iteration we need to be careful to clear out the bits
// representing the cube face.
for k := 7; k >= 0; k-- {
orientation += (int(uint64(ci)>>uint64(k*2*lookupBits+1)) & ((1 << uint((2 * nbits))) - 1)) << 2
orientation += (int(uint64(ci)>>uint64(k*2*lookupBits+1)) & ((1 << uint(2*nbits)) - 1)) << 2
orientation = lookupIJ[orientation]
i += (orientation >> (lookupBits + 2)) << uint(k*lookupBits)
j += ((orientation >> 2) & ((1 << lookupBits) - 1)) << uint(k*lookupBits)
@ -470,6 +525,13 @@ func (ci CellID) faceIJOrientation() (f, i, j, orientation int) {
nbits = lookupBits // following iterations
}
// The position of a non-leaf cell at level "n" consists of a prefix of
// 2*n bits that identifies the cell, followed by a suffix of
// 2*(maxLevel-n)+1 bits of the form 10*. If n==maxLevel, the suffix is
// just "1" and has no effect. Otherwise, it consists of "10", followed
// by (maxLevel-n-1) repetitions of "00", followed by "0". The "10" has
// no effect, while each occurrence of "00" has the effect of reversing
// the swapMask bit.
if ci.lsb()&0x1111111111111110 != 0 {
orientation ^= swapMask
}
@ -506,8 +568,8 @@ func cellIDFromFaceIJWrap(f, i, j int) CellID {
// Convert i and j to the coordinates of a leaf cell just beyond the
// boundary of this face. This prevents 32-bit overflow in the case
// of finding the neighbors of a face cell.
i = clamp(i, -1, maxSize)
j = clamp(j, -1, maxSize)
i = clampInt(i, -1, maxSize)
j = clampInt(j, -1, maxSize)
// We want to wrap these coordinates onto the appropriate adjacent face.
// The easiest way to do this is to convert the (i,j) coordinates to (x,y,z)
@ -540,17 +602,6 @@ func cellIDFromFaceIJSame(f, i, j int, sameFace bool) CellID {
return cellIDFromFaceIJWrap(f, i, j)
}
// clamp returns number closest to x within the range min..max.
func clamp(x, min, max int) int {
if x < min {
return min
}
if x > max {
return max
}
return x
}
// ijToSTMin converts the i- or j-index of a leaf cell to the minimum corresponding
// s- or t-value contained by that cell. The argument must be in the range
// [0..2**30], i.e. up to one position beyond the normal range of valid leaf
@ -561,7 +612,7 @@ func ijToSTMin(i int) float64 {
// stToIJ converts value in ST coordinates to a value in IJ coordinates.
func stToIJ(s float64) int {
return clamp(int(math.Floor(maxSize*s)), 0, maxSize-1)
return clampInt(int(math.Floor(maxSize*s)), 0, maxSize-1)
}
// cellIDFromPoint returns a leaf cell containing point p. Usually there is
@ -606,6 +657,21 @@ const (
invertMask = 0x02
)
// The following lookup tables are used to convert efficiently between an
// (i,j) cell index and the corresponding position along the Hilbert curve.
//
// lookupPos maps 4 bits of "i", 4 bits of "j", and 2 bits representing the
// orientation of the current cell into 8 bits representing the order in which
// that subcell is visited by the Hilbert curve, plus 2 bits indicating the
// new orientation of the Hilbert curve within that subcell. (Cell
// orientations are represented as combination of swapMask and invertMask.)
//
// lookupIJ is an inverted table used for mapping in the opposite
// direction.
//
// We also experimented with looking up 16 bits at a time (14 bits of position
// plus 2 of orientation) but found that smaller lookup tables gave better
// performance. (2KB fits easily in the primary cache.)
var (
ijToPos = [4][4]int{
{0, 1, 3, 2}, // canonical order
@ -668,40 +734,6 @@ func (ci CellID) CommonAncestorLevel(other CellID) (level int, ok bool) {
return (60 - msbPos) >> 1, true
}
// findMSBSetNonZero64 returns the index (between 0 and 63) of the most
// significant set bit. Passing zero to this function has undefined behavior.
func findMSBSetNonZero64(bits uint64) int {
val := []uint64{0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000, 0xFFFFFFFF00000000}
shift := []uint64{1, 2, 4, 8, 16, 32}
var msbPos uint64
for i := 5; i >= 0; i-- {
if bits&val[i] != 0 {
bits >>= shift[i]
msbPos |= shift[i]
}
}
return int(msbPos)
}
const deBruijn64 = 0x03f79d71b4ca8b09
const digitMask = uint64(1<<64 - 1)
var deBruijn64Lookup = []byte{
0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4,
62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5,
63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11,
54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6,
}
// findLSBSetNonZero64 returns the index (between 0 and 63) of the least
// significant set bit. Passing zero to this function has undefined behavior.
//
// This code comes from trailingZeroBits in https://golang.org/src/math/big/nat.go
// which references (Knuth, volume 4, section 7.3.1).
func findLSBSetNonZero64(bits uint64) int {
return int(deBruijn64Lookup[((bits&-bits)*(deBruijn64&digitMask))>>58])
}
// Advance advances or retreats the indicated number of steps along the
// Hilbert curve at the current level, and returns the new position. The
// position is never advanced past End() or before Begin().
@ -731,7 +763,7 @@ func (ci CellID) Advance(steps int64) CellID {
// centerST return the center of the CellID in (s,t)-space.
func (ci CellID) centerST() r2.Point {
_, si, ti := ci.faceSiTi()
return r2.Point{siTiToST(uint64(si)), siTiToST(uint64(ti))}
return r2.Point{siTiToST(si), siTiToST(ti)}
}
// sizeST returns the edge length of this CellID in (s,t)-space at the given level.
@ -751,7 +783,7 @@ func (ci CellID) boundST() r2.Rect {
// the (u,v) rectangle covered by the cell.
func (ci CellID) centerUV() r2.Point {
_, si, ti := ci.faceSiTi()
return r2.Point{stToUV(siTiToST(uint64(si))), stToUV(siTiToST(uint64(ti)))}
return r2.Point{stToUV(siTiToST(si)), stToUV(siTiToST(ti))}
}
// boundUV returns the bound of this CellID in (u,v)-space.

View file

@ -1,23 +1,25 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
import (
"fmt"
"io"
"sort"
"github.com/golang/geo/s1"
)
// A CellUnion is a collection of CellIDs.
@ -26,6 +28,9 @@ import (
// Specifically, it may not contain the same CellID twice, nor a CellID that
// is contained by another, nor the four sibling CellIDs that are children of
// a single higher level CellID.
//
// CellUnions are not required to be normalized, but certain operations will
// return different results if they are not (e.g. Contains).
type CellUnion []CellID
// CellUnionFromRange creates a CellUnion that covers the half-open range
@ -38,12 +43,157 @@ func CellUnionFromRange(begin, end CellID) CellUnion {
for id := begin.MaxTile(end); id != end; id = id.Next().MaxTile(end) {
cu = append(cu, id)
}
// The output is normalized because the cells are added in order by the iteration.
return cu
}
// CellUnionFromUnion creates a CellUnion from the union of the given CellUnions.
func CellUnionFromUnion(cellUnions ...CellUnion) CellUnion {
var cu CellUnion
for _, cellUnion := range cellUnions {
cu = append(cu, cellUnion...)
}
cu.Normalize()
return cu
}
// CellUnionFromIntersection creates a CellUnion from the intersection of the given CellUnions.
func CellUnionFromIntersection(x, y CellUnion) CellUnion {
var cu CellUnion
// This is a fairly efficient calculation that uses binary search to skip
// over sections of both input vectors. It takes constant time if all the
// cells of x come before or after all the cells of y in CellID order.
var i, j int
for i < len(x) && j < len(y) {
iMin := x[i].RangeMin()
jMin := y[j].RangeMin()
if iMin > jMin {
// Either j.Contains(i) or the two cells are disjoint.
if x[i] <= y[j].RangeMax() {
cu = append(cu, x[i])
i++
} else {
// Advance j to the first cell possibly contained by x[i].
j = y.lowerBound(j+1, len(y), iMin)
// The previous cell y[j-1] may now contain x[i].
if x[i] <= y[j-1].RangeMax() {
j--
}
}
} else if jMin > iMin {
// Identical to the code above with i and j reversed.
if y[j] <= x[i].RangeMax() {
cu = append(cu, y[j])
j++
} else {
i = x.lowerBound(i+1, len(x), jMin)
if y[j] <= x[i-1].RangeMax() {
i--
}
}
} else {
// i and j have the same RangeMin(), so one contains the other.
if x[i] < y[j] {
cu = append(cu, x[i])
i++
} else {
cu = append(cu, y[j])
j++
}
}
}
// The output is generated in sorted order.
cu.Normalize()
return cu
}
// CellUnionFromIntersectionWithCellID creates a CellUnion from the intersection
// of a CellUnion with the given CellID. This can be useful for splitting a
// CellUnion into chunks.
func CellUnionFromIntersectionWithCellID(x CellUnion, id CellID) CellUnion {
var cu CellUnion
if x.ContainsCellID(id) {
cu = append(cu, id)
cu.Normalize()
return cu
}
idmax := id.RangeMax()
for i := x.lowerBound(0, len(x), id.RangeMin()); i < len(x) && x[i] <= idmax; i++ {
cu = append(cu, x[i])
}
cu.Normalize()
return cu
}
// CellUnionFromDifference creates a CellUnion from the difference (x - y)
// of the given CellUnions.
func CellUnionFromDifference(x, y CellUnion) CellUnion {
// TODO(roberts): This is approximately O(N*log(N)), but could probably
// use similar techniques as CellUnionFromIntersectionWithCellID to be more efficient.
var cu CellUnion
for _, xid := range x {
cu.cellUnionDifferenceInternal(xid, &y)
}
// The output is generated in sorted order, and there should not be any
// cells that can be merged (provided that both inputs were normalized).
return cu
}
// The C++ constructor methods FromNormalized and FromVerbatim are not necessary
// since they don't call Normalize, and just set the CellIDs directly on the object,
// so straight casting is sufficient in Go to replicate this behavior.
// IsValid reports whether the cell union is valid, meaning that the CellIDs are
// valid, non-overlapping, and sorted in increasing order.
func (cu *CellUnion) IsValid() bool {
for i, cid := range *cu {
if !cid.IsValid() {
return false
}
if i == 0 {
continue
}
if (*cu)[i-1].RangeMax() >= cid.RangeMin() {
return false
}
}
return true
}
// IsNormalized reports whether the cell union is normalized, meaning that it is
// satisfies IsValid and that no four cells have a common parent.
// Certain operations such as Contains will return a different
// result if the cell union is not normalized.
func (cu *CellUnion) IsNormalized() bool {
for i, cid := range *cu {
if !cid.IsValid() {
return false
}
if i == 0 {
continue
}
if (*cu)[i-1].RangeMax() >= cid.RangeMin() {
return false
}
if i < 3 {
continue
}
if areSiblings((*cu)[i-3], (*cu)[i-2], (*cu)[i-1], cid) {
return false
}
}
return true
}
// Normalize normalizes the CellUnion.
func (cu *CellUnion) Normalize() {
sort.Sort(byID(*cu))
sortCellIDs(*cu)
output := make([]CellID, 0, len(*cu)) // the list of accepted cells
// Loop invariant: output is a sorted list of cells with no redundancy.
@ -76,24 +226,8 @@ func (cu *CellUnion) Normalize() {
// See if the last three cells plus this one can be collapsed.
// We loop because collapsing three accepted cells and adding a higher level cell
// could cascade into previously accepted cells.
for len(output) >= 3 {
fin := output[len(output)-3:]
// fast XOR test; a necessary but not sufficient condition
if fin[0]^fin[1]^fin[2]^ci != 0 {
break
}
// more expensive test; exact.
// Compute the two bit mask for the encoded child position,
// then see if they all agree.
mask := CellID(ci.lsb() << 1)
mask = ^(mask + mask<<1)
should := ci & mask
if (fin[0]&mask != should) || (fin[1]&mask != should) || (fin[2]&mask != should) || ci.isFace() {
break
}
for len(output) >= 3 && areSiblings(output[len(output)-3], output[len(output)-2], output[len(output)-1], ci) {
// Replace four children by their parent cell.
output = output[:len(output)-3]
ci = ci.immediateParent() // checked !ci.isFace above
}
@ -102,9 +236,7 @@ func (cu *CellUnion) Normalize() {
*cu = output
}
// IntersectsCellID reports whether this cell union intersects the given cell ID.
//
// This method assumes that the CellUnion has been normalized.
// IntersectsCellID reports whether this CellUnion intersects the given cell ID.
func (cu *CellUnion) IntersectsCellID(id CellID) bool {
// Find index of array item that occurs directly after our probe cell:
i := sort.Search(len(*cu), func(i int) bool { return id < (*cu)[i] })
@ -115,10 +247,12 @@ func (cu *CellUnion) IntersectsCellID(id CellID) bool {
return i != 0 && (*cu)[i-1].RangeMax() >= id.RangeMin()
}
// ContainsCellID reports whether the cell union contains the given cell ID.
// ContainsCellID reports whether the CellUnion contains the given cell ID.
// Containment is defined with respect to regions, e.g. a cell contains its 4 children.
//
// This method assumes that the CellUnion has been normalized.
// CAVEAT: If you have constructed a non-normalized CellUnion, note that groups
// of 4 child cells are *not* considered to contain their parent cell. To get
// this behavior you must use one of the call Normalize() explicitly.
func (cu *CellUnion) ContainsCellID(id CellID) bool {
// Find index of array item that occurs directly after our probe cell:
i := sort.Search(len(*cu), func(i int) bool { return id < (*cu)[i] })
@ -129,12 +263,6 @@ func (cu *CellUnion) ContainsCellID(id CellID) bool {
return i != 0 && (*cu)[i-1].RangeMax() >= id
}
type byID []CellID
func (cu byID) Len() int { return len(cu) }
func (cu byID) Less(i, j int) bool { return cu[i] < cu[j] }
func (cu byID) Swap(i, j int) { cu[i], cu[j] = cu[j], cu[i] }
// Denormalize replaces this CellUnion with an expanded version of the
// CellUnion where any cell whose level is less than minLevel or where
// (level - minLevel) is not a multiple of levelMod is replaced by its
@ -223,6 +351,11 @@ func (cu *CellUnion) ContainsPoint(p Point) bool {
return cu.ContainsCell(CellFromPoint(p))
}
// CellUnionBound computes a covering of the CellUnion.
func (cu *CellUnion) CellUnionBound() []CellID {
return cu.CapBound().CellUnionBound()
}
// LeafCellsCovered reports the number of leaf cells covered by this cell union.
// This will be no more than 6*2^60 for the whole sphere.
func (cu *CellUnion) LeafCellsCovered() int64 {
@ -233,9 +366,225 @@ func (cu *CellUnion) LeafCellsCovered() int64 {
return numLeaves
}
// BUG: Differences from C++:
// Contains(CellUnion)/Intersects(CellUnion)
// Union(CellUnion)/Intersection(CellUnion)/Difference(CellUnion)
// Expand
// ContainsPoint
// AverageArea/ApproxArea/ExactArea
// Returns true if the given four cells have a common parent.
// This requires that the four CellIDs are distinct.
func areSiblings(a, b, c, d CellID) bool {
// A necessary (but not sufficient) condition is that the XOR of the
// four cell IDs must be zero. This is also very fast to test.
if (a ^ b ^ c) != d {
return false
}
// Now we do a slightly more expensive but exact test. First, compute a
// mask that blocks out the two bits that encode the child position of
// "id" with respect to its parent, then check that the other three
// children all agree with "mask".
mask := uint64(d.lsb() << 1)
mask = ^(mask + (mask << 1))
idMasked := (uint64(d) & mask)
return ((uint64(a)&mask) == idMasked &&
(uint64(b)&mask) == idMasked &&
(uint64(c)&mask) == idMasked &&
!d.isFace())
}
// Contains reports whether this CellUnion contains all of the CellIDs of the given CellUnion.
func (cu *CellUnion) Contains(o CellUnion) bool {
// TODO(roberts): Investigate alternatives such as divide-and-conquer
// or alternating-skip-search that may be significantly faster in both
// the average and worst case. This applies to Intersects as well.
for _, id := range o {
if !cu.ContainsCellID(id) {
return false
}
}
return true
}
// Intersects reports whether this CellUnion intersects any of the CellIDs of the given CellUnion.
func (cu *CellUnion) Intersects(o CellUnion) bool {
for _, c := range *cu {
if o.ContainsCellID(c) {
return true
}
}
return false
}
// lowerBound returns the index in this CellUnion to the first element whose value
// is not considered to go before the given cell id. (i.e., either it is equivalent
// or comes after the given id.) If there is no match, then end is returned.
func (cu *CellUnion) lowerBound(begin, end int, id CellID) int {
for i := begin; i < end; i++ {
if (*cu)[i] >= id {
return i
}
}
return end
}
// cellUnionDifferenceInternal adds the difference between the CellID and the union to
// the result CellUnion. If they intersect but the difference is non-empty, it divides
// and conquers.
func (cu *CellUnion) cellUnionDifferenceInternal(id CellID, other *CellUnion) {
if !other.IntersectsCellID(id) {
(*cu) = append((*cu), id)
return
}
if !other.ContainsCellID(id) {
for _, child := range id.Children() {
cu.cellUnionDifferenceInternal(child, other)
}
}
}
// ExpandAtLevel expands this CellUnion by adding a rim of cells at expandLevel
// around the unions boundary.
//
// For each cell c in the union, we add all cells at level
// expandLevel that abut c. There are typically eight of those
// (four edge-abutting and four sharing a vertex). However, if c is
// finer than expandLevel, we add all cells abutting
// c.Parent(expandLevel) as well as c.Parent(expandLevel) itself,
// as an expandLevel cell rarely abuts a smaller cell.
//
// Note that the size of the output is exponential in
// expandLevel. For example, if expandLevel == 20 and the input
// has a cell at level 10, there will be on the order of 4000
// adjacent cells in the output. For most applications the
// ExpandByRadius method below is easier to use.
func (cu *CellUnion) ExpandAtLevel(level int) {
var output CellUnion
levelLsb := lsbForLevel(level)
for i := len(*cu) - 1; i >= 0; i-- {
id := (*cu)[i]
if id.lsb() < levelLsb {
id = id.Parent(level)
// Optimization: skip over any cells contained by this one. This is
// especially important when very small regions are being expanded.
for i > 0 && id.Contains((*cu)[i-1]) {
i--
}
}
output = append(output, id)
output = append(output, id.AllNeighbors(level)...)
}
sortCellIDs(output)
*cu = output
cu.Normalize()
}
// ExpandByRadius expands this CellUnion such that it contains all points whose
// distance to the CellUnion is at most minRadius, but do not use cells that
// are more than maxLevelDiff levels higher than the largest cell in the input.
// The second parameter controls the tradeoff between accuracy and output size
// when a large region is being expanded by a small amount (e.g. expanding Canada
// by 1km). For example, if maxLevelDiff == 4 the region will always be expanded
// by approximately 1/16 the width of its largest cell. Note that in the worst case,
// the number of cells in the output can be up to 4 * (1 + 2 ** maxLevelDiff) times
// larger than the number of cells in the input.
func (cu *CellUnion) ExpandByRadius(minRadius s1.Angle, maxLevelDiff int) {
minLevel := maxLevel
for _, cid := range *cu {
minLevel = minInt(minLevel, cid.Level())
}
// Find the maximum level such that all cells are at least "minRadius" wide.
radiusLevel := MinWidthMetric.MaxLevel(minRadius.Radians())
if radiusLevel == 0 && minRadius.Radians() > MinWidthMetric.Value(0) {
// The requested expansion is greater than the width of a face cell.
// The easiest way to handle this is to expand twice.
cu.ExpandAtLevel(0)
}
cu.ExpandAtLevel(minInt(minLevel+maxLevelDiff, radiusLevel))
}
// Equal reports whether the two CellUnions are equal.
func (cu CellUnion) Equal(o CellUnion) bool {
if len(cu) != len(o) {
return false
}
for i := 0; i < len(cu); i++ {
if cu[i] != o[i] {
return false
}
}
return true
}
// AverageArea returns the average area of this CellUnion.
// This is accurate to within a factor of 1.7.
func (cu *CellUnion) AverageArea() float64 {
return AvgAreaMetric.Value(maxLevel) * float64(cu.LeafCellsCovered())
}
// ApproxArea returns the approximate area of this CellUnion. This method is accurate
// to within 3% percent for all cell sizes and accurate to within 0.1% for cells
// at level 5 or higher within the union.
func (cu *CellUnion) ApproxArea() float64 {
var area float64
for _, id := range *cu {
area += CellFromCellID(id).ApproxArea()
}
return area
}
// ExactArea returns the area of this CellUnion as accurately as possible.
func (cu *CellUnion) ExactArea() float64 {
var area float64
for _, id := range *cu {
area += CellFromCellID(id).ExactArea()
}
return area
}
// Encode encodes the CellUnion.
func (cu *CellUnion) Encode(w io.Writer) error {
e := &encoder{w: w}
cu.encode(e)
return e.err
}
func (cu *CellUnion) encode(e *encoder) {
e.writeInt8(encodingVersion)
e.writeInt64(int64(len(*cu)))
for _, ci := range *cu {
ci.encode(e)
}
}
// Decode decodes the CellUnion.
func (cu *CellUnion) Decode(r io.Reader) error {
d := &decoder{r: asByteReader(r)}
cu.decode(d)
return d.err
}
func (cu *CellUnion) decode(d *decoder) {
version := d.readInt8()
if d.err != nil {
return
}
if version != encodingVersion {
d.err = fmt.Errorf("only version %d is supported", encodingVersion)
return
}
n := d.readInt64()
if d.err != nil {
return
}
const maxCells = 1000000
if n > maxCells {
d.err = fmt.Errorf("too many cells (%d; max is %d)", n, maxCells)
return
}
*cu = make([]CellID, n)
for i := range *cu {
(*cu)[i].decode(d)
}
}

View file

@ -0,0 +1,63 @@
// Copyright 2017 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
// ContainsVertexQuery is used to track the edges entering and leaving the
// given vertex of a Polygon in order to be able to determine if the point is
// contained by the Polygon.
//
// Point containment is defined according to the semi-open boundary model
// which means that if several polygons tile the region around a vertex,
// then exactly one of those polygons contains that vertex.
type ContainsVertexQuery struct {
target Point
edgeMap map[Point]int
}
// NewContainsVertexQuery returns a new query for the given vertex whose
// containment will be determined.
func NewContainsVertexQuery(target Point) *ContainsVertexQuery {
return &ContainsVertexQuery{
target: target,
edgeMap: make(map[Point]int),
}
}
// AddEdge adds the edge between target and v with the given direction.
// (+1 = outgoing, -1 = incoming, 0 = degenerate).
func (q *ContainsVertexQuery) AddEdge(v Point, direction int) {
q.edgeMap[v] += direction
}
// ContainsVertex reports a +1 if the target vertex is contained, -1 if it is
// not contained, and 0 if the incident edges consisted of matched sibling pairs.
func (q *ContainsVertexQuery) ContainsVertex() int {
// Find the unmatched edge that is immediately clockwise from Ortho(P).
referenceDir := Point{q.target.Ortho()}
bestPoint := referenceDir
bestDir := 0
for k, v := range q.edgeMap {
if v == 0 {
continue // This is a "matched" edge.
}
if OrderedCCW(referenceDir, bestPoint, k, q.target) {
bestPoint = k
bestDir = v
}
}
return bestDir
}

410
vendor/github.com/golang/geo/s2/crossing_edge_query.go generated vendored Normal file
View file

@ -0,0 +1,410 @@
// Copyright 2017 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
import (
"sort"
"github.com/golang/geo/r2"
)
// CrossingEdgeQuery is used to find the Edge IDs of Shapes that are crossed by
// a given edge(s).
//
// Note that if you need to query many edges, it is more efficient to declare
// a single CrossingEdgeQuery instance and reuse it.
//
// If you want to find *all* the pairs of crossing edges, it is more efficient to
// use the not yet implemented VisitCrossings in shapeutil.
type CrossingEdgeQuery struct {
index *ShapeIndex
// temporary values used while processing a query.
a, b r2.Point
iter *ShapeIndexIterator
// candidate cells generated when finding crossings.
cells []*ShapeIndexCell
}
// NewCrossingEdgeQuery creates a CrossingEdgeQuery for the given index.
func NewCrossingEdgeQuery(index *ShapeIndex) *CrossingEdgeQuery {
c := &CrossingEdgeQuery{
index: index,
iter: index.Iterator(),
}
return c
}
// Crossings returns the set of edge of the shape S that intersect the given edge AB.
// If the CrossingType is Interior, then only intersections at a point interior to both
// edges are reported, while if it is CrossingTypeAll then edges that share a vertex
// are also reported.
func (c *CrossingEdgeQuery) Crossings(a, b Point, shape Shape, crossType CrossingType) []int {
edges := c.candidates(a, b, shape)
if len(edges) == 0 {
return nil
}
crosser := NewEdgeCrosser(a, b)
out := 0
n := len(edges)
for in := 0; in < n; in++ {
b := shape.Edge(edges[in])
sign := crosser.CrossingSign(b.V0, b.V1)
if crossType == CrossingTypeAll && (sign == MaybeCross || sign == Cross) || crossType != CrossingTypeAll && sign == Cross {
edges[out] = edges[in]
out++
}
}
if out < n {
edges = edges[0:out]
}
return edges
}
// EdgeMap stores a sorted set of edge ids for each shape.
type EdgeMap map[Shape][]int
// CrossingsEdgeMap returns the set of all edges in the index that intersect the given
// edge AB. If crossType is CrossingTypeInterior, then only intersections at a
// point interior to both edges are reported, while if it is CrossingTypeAll
// then edges that share a vertex are also reported.
//
// The edges are returned as a mapping from shape to the edges of that shape
// that intersect AB. Every returned shape has at least one crossing edge.
func (c *CrossingEdgeQuery) CrossingsEdgeMap(a, b Point, crossType CrossingType) EdgeMap {
edgeMap := c.candidatesEdgeMap(a, b)
if len(edgeMap) == 0 {
return nil
}
crosser := NewEdgeCrosser(a, b)
for shape, edges := range edgeMap {
out := 0
n := len(edges)
for in := 0; in < n; in++ {
edge := shape.Edge(edges[in])
sign := crosser.CrossingSign(edge.V0, edge.V1)
if (crossType == CrossingTypeAll && (sign == MaybeCross || sign == Cross)) || (crossType != CrossingTypeAll && sign == Cross) {
edgeMap[shape][out] = edges[in]
out++
}
}
if out == 0 {
delete(edgeMap, shape)
} else {
if out < n {
edgeMap[shape] = edgeMap[shape][0:out]
}
}
}
return edgeMap
}
// candidates returns a superset of the edges of the given shape that intersect
// the edge AB.
func (c *CrossingEdgeQuery) candidates(a, b Point, shape Shape) []int {
var edges []int
// For small loops it is faster to use brute force. The threshold below was
// determined using benchmarks.
const maxBruteForceEdges = 27
maxEdges := shape.NumEdges()
if maxEdges <= maxBruteForceEdges {
edges = make([]int, maxEdges)
for i := 0; i < maxEdges; i++ {
edges[i] = i
}
return edges
}
// Compute the set of index cells intersected by the query edge.
c.getCellsForEdge(a, b)
if len(c.cells) == 0 {
return nil
}
// Gather all the edges that intersect those cells and sort them.
// TODO(roberts): Shapes don't track their ID, so we need to range over
// the index to find the ID manually.
var shapeID int32
for k, v := range c.index.shapes {
if v == shape {
shapeID = k
}
}
for _, cell := range c.cells {
if cell == nil {
}
clipped := cell.findByShapeID(shapeID)
if clipped == nil {
continue
}
for _, j := range clipped.edges {
edges = append(edges, j)
}
}
if len(c.cells) > 1 {
edges = uniqueInts(edges)
}
return edges
}
// uniqueInts returns the sorted uniqued values from the given input.
func uniqueInts(in []int) []int {
var edges []int
m := make(map[int]bool)
for _, i := range in {
if m[i] {
continue
}
m[i] = true
edges = append(edges, i)
}
sort.Ints(edges)
return edges
}
// candidatesEdgeMap returns a map from shapes to the superse of edges for that
// shape that intersect the edge AB.
//
// CAVEAT: This method may return shapes that have an empty set of candidate edges.
// However the return value is non-empty only if at least one shape has a candidate edge.
func (c *CrossingEdgeQuery) candidatesEdgeMap(a, b Point) EdgeMap {
edgeMap := make(EdgeMap, 0)
// If there are only a few edges then it's faster to use brute force. We
// only bother with this optimization when there is a single shape.
if len(c.index.shapes) == 1 {
// Typically this method is called many times, so it is worth checking
// whether the edge map is empty or already consists of a single entry for
// this shape, and skip clearing edge map in that case.
shape := c.index.Shape(0)
// Note that we leave the edge map non-empty even if there are no candidates
// (i.e., there is a single entry with an empty set of edges).
edgeMap[shape] = c.candidates(a, b, shape)
return edgeMap
}
// Compute the set of index cells intersected by the query edge.
c.getCellsForEdge(a, b)
if len(c.cells) == 0 {
return edgeMap
}
// Gather all the edges that intersect those cells and sort them.
for _, cell := range c.cells {
for _, clipped := range cell.shapes {
s := c.index.Shape(clipped.shapeID)
for j := 0; j < clipped.numEdges(); j++ {
edgeMap[s] = append(edgeMap[s], clipped.edges[j])
}
}
}
if len(c.cells) > 1 {
for s, edges := range edgeMap {
edgeMap[s] = uniqueInts(edges)
}
}
return edgeMap
}
// getCells returns the set of ShapeIndexCells that might contain edges intersecting
// the edge AB in the given cell root. This method is used primarly by loop and shapeutil.
func (c *CrossingEdgeQuery) getCells(a, b Point, root *PaddedCell) []*ShapeIndexCell {
aUV, bUV, ok := ClipToFace(a, b, root.id.Face())
if ok {
c.a = aUV
c.b = bUV
edgeBound := r2.RectFromPoints(c.a, c.b)
if root.Bound().Intersects(edgeBound) {
c.computeCellsIntersected(root, edgeBound)
}
}
if len(c.cells) == 0 {
return nil
}
return c.cells
}
// getCellsForEdge populates the cells field to the set of index cells intersected by an edge AB.
func (c *CrossingEdgeQuery) getCellsForEdge(a, b Point) {
c.cells = nil
segments := FaceSegments(a, b)
for _, segment := range segments {
c.a = segment.a
c.b = segment.b
// Optimization: rather than always starting the recursive subdivision at
// the top level face cell, instead we start at the smallest S2CellId that
// contains the edge (the edge root cell). This typically lets us skip
// quite a few levels of recursion since most edges are short.
edgeBound := r2.RectFromPoints(c.a, c.b)
pcell := PaddedCellFromCellID(CellIDFromFace(segment.face), 0)
edgeRoot := pcell.ShrinkToFit(edgeBound)
// Now we need to determine how the edge root cell is related to the cells
// in the spatial index (cellMap). There are three cases:
//
// 1. edgeRoot is an index cell or is contained within an index cell.
// In this case we only need to look at the contents of that cell.
// 2. edgeRoot is subdivided into one or more index cells. In this case
// we recursively subdivide to find the cells intersected by AB.
// 3. edgeRoot does not intersect any index cells. In this case there
// is nothing to do.
relation := c.iter.LocateCellID(edgeRoot)
if relation == Indexed {
// edgeRoot is an index cell or is contained by an index cell (case 1).
c.cells = append(c.cells, c.iter.IndexCell())
} else if relation == Subdivided {
// edgeRoot is subdivided into one or more index cells (case 2). We
// find the cells intersected by AB using recursive subdivision.
if !edgeRoot.isFace() {
pcell = PaddedCellFromCellID(edgeRoot, 0)
}
c.computeCellsIntersected(pcell, edgeBound)
}
}
}
// computeCellsIntersected computes the index cells intersected by the current
// edge that are descendants of pcell and adds them to this queries set of cells.
func (c *CrossingEdgeQuery) computeCellsIntersected(pcell *PaddedCell, edgeBound r2.Rect) {
c.iter.seek(pcell.id.RangeMin())
if c.iter.Done() || c.iter.CellID() > pcell.id.RangeMax() {
// The index does not contain pcell or any of its descendants.
return
}
if c.iter.CellID() == pcell.id {
// The index contains this cell exactly.
c.cells = append(c.cells, c.iter.IndexCell())
return
}
// Otherwise, split the edge among the four children of pcell.
center := pcell.Middle().Lo()
if edgeBound.X.Hi < center.X {
// Edge is entirely contained in the two left children.
c.clipVAxis(edgeBound, center.Y, 0, pcell)
return
} else if edgeBound.X.Lo >= center.X {
// Edge is entirely contained in the two right children.
c.clipVAxis(edgeBound, center.Y, 1, pcell)
return
}
childBounds := c.splitUBound(edgeBound, center.X)
if edgeBound.Y.Hi < center.Y {
// Edge is entirely contained in the two lower children.
c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 0, 0), childBounds[0])
c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 1, 0), childBounds[1])
} else if edgeBound.Y.Lo >= center.Y {
// Edge is entirely contained in the two upper children.
c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 0, 1), childBounds[0])
c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 1, 1), childBounds[1])
} else {
// The edge bound spans all four children. The edge itself intersects
// at most three children (since no padding is being used).
c.clipVAxis(childBounds[0], center.Y, 0, pcell)
c.clipVAxis(childBounds[1], center.Y, 1, pcell)
}
}
// clipVAxis computes the intersected cells recursively for a given padded cell.
// Given either the left (i=0) or right (i=1) side of a padded cell pcell,
// determine whether the current edge intersects the lower child, upper child,
// or both children, and call c.computeCellsIntersected recursively on those children.
// The center is the v-coordinate at the center of pcell.
func (c *CrossingEdgeQuery) clipVAxis(edgeBound r2.Rect, center float64, i int, pcell *PaddedCell) {
if edgeBound.Y.Hi < center {
// Edge is entirely contained in the lower child.
c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 0), edgeBound)
} else if edgeBound.Y.Lo >= center {
// Edge is entirely contained in the upper child.
c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 1), edgeBound)
} else {
// The edge intersects both children.
childBounds := c.splitVBound(edgeBound, center)
c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 0), childBounds[0])
c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 1), childBounds[1])
}
}
// splitUBound returns the bound for two children as a result of spliting the
// current edge at the given value U.
func (c *CrossingEdgeQuery) splitUBound(edgeBound r2.Rect, u float64) [2]r2.Rect {
v := edgeBound.Y.ClampPoint(interpolateFloat64(u, c.a.X, c.b.X, c.a.Y, c.b.Y))
// diag indicates which diagonal of the bounding box is spanned by AB:
// it is 0 if AB has positive slope, and 1 if AB has negative slope.
var diag int
if (c.a.X > c.b.X) != (c.a.Y > c.b.Y) {
diag = 1
}
return splitBound(edgeBound, 0, diag, u, v)
}
// splitVBound returns the bound for two children as a result of spliting the
// current edge into two child edges at the given value V.
func (c *CrossingEdgeQuery) splitVBound(edgeBound r2.Rect, v float64) [2]r2.Rect {
u := edgeBound.X.ClampPoint(interpolateFloat64(v, c.a.Y, c.b.Y, c.a.X, c.b.X))
var diag int
if (c.a.X > c.b.X) != (c.a.Y > c.b.Y) {
diag = 1
}
return splitBound(edgeBound, diag, 0, u, v)
}
// splitBound returns the bounds for the two childrenn as a result of spliting
// the current edge into two child edges at the given point (u,v). uEnd and vEnd
// indicate which bound endpoints of the first child will be updated.
func splitBound(edgeBound r2.Rect, uEnd, vEnd int, u, v float64) [2]r2.Rect {
var childBounds = [2]r2.Rect{
edgeBound,
edgeBound,
}
if uEnd == 1 {
childBounds[0].X.Lo = u
childBounds[1].X.Hi = u
} else {
childBounds[0].X.Hi = u
childBounds[1].X.Lo = u
}
if vEnd == 1 {
childBounds[0].Y.Lo = v
childBounds[1].Y.Hi = v
} else {
childBounds[0].Y.Hi = v
childBounds[1].Y.Lo = v
}
return childBounds
}

View file

@ -1,18 +1,16 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package s2 implements types and functions for working with geometry in S² (spherical geometry).

672
vendor/github.com/golang/geo/s2/edge_clipping.go generated vendored Normal file
View file

@ -0,0 +1,672 @@
// Copyright 2017 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
// This file contains a collection of methods for:
//
// (1) Robustly clipping geodesic edges to the faces of the S2 biunit cube
// (see s2stuv), and
//
// (2) Robustly clipping 2D edges against 2D rectangles.
//
// These functions can be used to efficiently find the set of CellIDs that
// are intersected by a geodesic edge (e.g., see CrossingEdgeQuery).
import (
"math"
"github.com/golang/geo/r1"
"github.com/golang/geo/r2"
"github.com/golang/geo/r3"
)
const (
// edgeClipErrorUVCoord is the maximum error in a u- or v-coordinate
// compared to the exact result, assuming that the points A and B are in
// the rectangle [-1,1]x[1,1] or slightly outside it (by 1e-10 or less).
edgeClipErrorUVCoord = 2.25 * dblEpsilon
// edgeClipErrorUVDist is the maximum distance from a clipped point to
// the corresponding exact result. It is equal to the error in a single
// coordinate because at most one coordinate is subject to error.
edgeClipErrorUVDist = 2.25 * dblEpsilon
// faceClipErrorRadians is the maximum angle between a returned vertex
// and the nearest point on the exact edge AB. It is equal to the
// maximum directional error in PointCross, plus the error when
// projecting points onto a cube face.
faceClipErrorRadians = 3 * dblEpsilon
// faceClipErrorDist is the same angle expressed as a maximum distance
// in (u,v)-space. In other words, a returned vertex is at most this far
// from the exact edge AB projected into (u,v)-space.
faceClipErrorUVDist = 9 * dblEpsilon
// faceClipErrorUVCoord is the maximum angle between a returned vertex
// and the nearest point on the exact edge AB expressed as the maximum error
// in an individual u- or v-coordinate. In other words, for each
// returned vertex there is a point on the exact edge AB whose u- and
// v-coordinates differ from the vertex by at most this amount.
faceClipErrorUVCoord = 9.0 * (1.0 / math.Sqrt2) * dblEpsilon
// intersectsRectErrorUVDist is the maximum error when computing if a point
// intersects with a given Rect. If some point of AB is inside the
// rectangle by at least this distance, the result is guaranteed to be true;
// if all points of AB are outside the rectangle by at least this distance,
// the result is guaranteed to be false. This bound assumes that rect is
// a subset of the rectangle [-1,1]x[-1,1] or extends slightly outside it
// (e.g., by 1e-10 or less).
intersectsRectErrorUVDist = 3 * math.Sqrt2 * dblEpsilon
)
// ClipToFace returns the (u,v) coordinates for the portion of the edge AB that
// intersects the given face, or false if the edge AB does not intersect.
// This method guarantees that the clipped vertices lie within the [-1,1]x[-1,1]
// cube face rectangle and are within faceClipErrorUVDist of the line AB, but
// the results may differ from those produced by FaceSegments.
func ClipToFace(a, b Point, face int) (aUV, bUV r2.Point, intersects bool) {
return ClipToPaddedFace(a, b, face, 0.0)
}
// ClipToPaddedFace returns the (u,v) coordinates for the portion of the edge AB that
// intersects the given face, but rather than clipping to the square [-1,1]x[-1,1]
// in (u,v) space, this method clips to [-R,R]x[-R,R] where R=(1+padding).
// Padding must be non-negative.
func ClipToPaddedFace(a, b Point, f int, padding float64) (aUV, bUV r2.Point, intersects bool) {
// Fast path: both endpoints are on the given face.
if face(a.Vector) == f && face(b.Vector) == f {
au, av := validFaceXYZToUV(f, a.Vector)
bu, bv := validFaceXYZToUV(f, b.Vector)
return r2.Point{au, av}, r2.Point{bu, bv}, true
}
// Convert everything into the (u,v,w) coordinates of the given face. Note
// that the cross product *must* be computed in the original (x,y,z)
// coordinate system because PointCross (unlike the mathematical cross
// product) can produce different results in different coordinate systems
// when one argument is a linear multiple of the other, due to the use of
// symbolic perturbations.
normUVW := pointUVW(faceXYZtoUVW(f, a.PointCross(b)))
aUVW := pointUVW(faceXYZtoUVW(f, a))
bUVW := pointUVW(faceXYZtoUVW(f, b))
// Padding is handled by scaling the u- and v-components of the normal.
// Letting R=1+padding, this means that when we compute the dot product of
// the normal with a cube face vertex (such as (-1,-1,1)), we will actually
// compute the dot product with the scaled vertex (-R,-R,1). This allows
// methods such as intersectsFace, exitAxis, etc, to handle padding
// with no further modifications.
scaleUV := 1 + padding
scaledN := pointUVW{r3.Vector{X: scaleUV * normUVW.X, Y: scaleUV * normUVW.Y, Z: normUVW.Z}}
if !scaledN.intersectsFace() {
return aUV, bUV, false
}
// TODO(roberts): This is a workaround for extremely small vectors where some
// loss of precision can occur in Normalize causing underflow. When PointCross
// is updated to work around this, this can be removed.
if math.Max(math.Abs(normUVW.X), math.Max(math.Abs(normUVW.Y), math.Abs(normUVW.Z))) < math.Ldexp(1, -511) {
normUVW = pointUVW{normUVW.Mul(math.Ldexp(1, 563))}
}
normUVW = pointUVW{normUVW.Normalize()}
aTan := pointUVW{normUVW.Cross(aUVW.Vector)}
bTan := pointUVW{bUVW.Cross(normUVW.Vector)}
// As described in clipDestination, if the sum of the scores from clipping the two
// endpoints is 3 or more, then the segment does not intersect this face.
aUV, aScore := clipDestination(bUVW, aUVW, pointUVW{scaledN.Mul(-1)}, bTan, aTan, scaleUV)
bUV, bScore := clipDestination(aUVW, bUVW, scaledN, aTan, bTan, scaleUV)
return aUV, bUV, aScore+bScore < 3
}
// ClipEdge returns the portion of the edge defined by AB that is contained by the
// given rectangle. If there is no intersection, false is returned and aClip and bClip
// are undefined.
func ClipEdge(a, b r2.Point, clip r2.Rect) (aClip, bClip r2.Point, intersects bool) {
// Compute the bounding rectangle of AB, clip it, and then extract the new
// endpoints from the clipped bound.
bound := r2.RectFromPoints(a, b)
if bound, intersects = clipEdgeBound(a, b, clip, bound); !intersects {
return aClip, bClip, false
}
ai := 0
if a.X > b.X {
ai = 1
}
aj := 0
if a.Y > b.Y {
aj = 1
}
return bound.VertexIJ(ai, aj), bound.VertexIJ(1-ai, 1-aj), true
}
// The three functions below (sumEqual, intersectsFace, intersectsOppositeEdges)
// all compare a sum (u + v) to a third value w. They are implemented in such a
// way that they produce an exact result even though all calculations are done
// with ordinary floating-point operations. Here are the principles on which these
// functions are based:
//
// A. If u + v < w in floating-point, then u + v < w in exact arithmetic.
//
// B. If u + v < w in exact arithmetic, then at least one of the following
// expressions is true in floating-point:
// u + v < w
// u < w - v
// v < w - u
//
// Proof: By rearranging terms and substituting ">" for "<", we can assume
// that all values are non-negative. Now clearly "w" is not the smallest
// value, so assume WLOG that "u" is the smallest. We want to show that
// u < w - v in floating-point. If v >= w/2, the calculation of w - v is
// exact since the result is smaller in magnitude than either input value,
// so the result holds. Otherwise we have u <= v < w/2 and w - v >= w/2
// (even in floating point), so the result also holds.
// sumEqual reports whether u + v == w exactly.
func sumEqual(u, v, w float64) bool {
return (u+v == w) && (u == w-v) && (v == w-u)
}
// pointUVW represents a Point in (u,v,w) coordinate space of a cube face.
type pointUVW Point
// intersectsFace reports whether a given directed line L intersects the cube face F.
// The line L is defined by its normal N in the (u,v,w) coordinates of F.
func (p pointUVW) intersectsFace() bool {
// L intersects the [-1,1]x[-1,1] square in (u,v) if and only if the dot
// products of N with the four corner vertices (-1,-1,1), (1,-1,1), (1,1,1),
// and (-1,1,1) do not all have the same sign. This is true exactly when
// |Nu| + |Nv| >= |Nw|. The code below evaluates this expression exactly.
u := math.Abs(p.X)
v := math.Abs(p.Y)
w := math.Abs(p.Z)
// We only need to consider the cases where u or v is the smallest value,
// since if w is the smallest then both expressions below will have a
// positive LHS and a negative RHS.
return (v >= w-u) && (u >= w-v)
}
// intersectsOppositeEdges reports whether a directed line L intersects two
// opposite edges of a cube face F. This includs the case where L passes
// exactly through a corner vertex of F. The directed line L is defined
// by its normal N in the (u,v,w) coordinates of F.
func (p pointUVW) intersectsOppositeEdges() bool {
// The line L intersects opposite edges of the [-1,1]x[-1,1] (u,v) square if
// and only exactly two of the corner vertices lie on each side of L. This
// is true exactly when ||Nu| - |Nv|| >= |Nw|. The code below evaluates this
// expression exactly.
u := math.Abs(p.X)
v := math.Abs(p.Y)
w := math.Abs(p.Z)
// If w is the smallest, the following line returns an exact result.
if math.Abs(u-v) != w {
return math.Abs(u-v) >= w
}
// Otherwise u - v = w exactly, or w is not the smallest value. In either
// case the following returns the correct result.
if u >= v {
return u-w >= v
}
return v-w >= u
}
// axis represents the possible results of exitAxis.
type axis int
const (
axisU axis = iota
axisV
)
// exitAxis reports which axis the directed line L exits the cube face F on.
// The directed line L is represented by its CCW normal N in the (u,v,w) coordinates
// of F. It returns axisU if L exits through the u=-1 or u=+1 edge, and axisV if L exits
// through the v=-1 or v=+1 edge. Either result is acceptable if L exits exactly
// through a corner vertex of the cube face.
func (p pointUVW) exitAxis() axis {
if p.intersectsOppositeEdges() {
// The line passes through through opposite edges of the face.
// It exits through the v=+1 or v=-1 edge if the u-component of N has a
// larger absolute magnitude than the v-component.
if math.Abs(p.X) >= math.Abs(p.Y) {
return axisV
}
return axisU
}
// The line passes through through two adjacent edges of the face.
// It exits the v=+1 or v=-1 edge if an even number of the components of N
// are negative. We test this using signbit() rather than multiplication
// to avoid the possibility of underflow.
var x, y, z int
if math.Signbit(p.X) {
x = 1
}
if math.Signbit(p.Y) {
y = 1
}
if math.Signbit(p.Z) {
z = 1
}
if x^y^z == 0 {
return axisV
}
return axisU
}
// exitPoint returns the UV coordinates of the point where a directed line L (represented
// by the CCW normal of this point), exits the cube face this point is derived from along
// the given axis.
func (p pointUVW) exitPoint(a axis) r2.Point {
if a == axisU {
u := -1.0
if p.Y > 0 {
u = 1.0
}
return r2.Point{u, (-u*p.X - p.Z) / p.Y}
}
v := -1.0
if p.X < 0 {
v = 1.0
}
return r2.Point{(-v*p.Y - p.Z) / p.X, v}
}
// clipDestination returns a score which is used to indicate if the clipped edge AB
// on the given face intersects the face at all. This function returns the score for
// the given endpoint, which is an integer ranging from 0 to 3. If the sum of the scores
// from both of the endpoints is 3 or more, then edge AB does not intersect this face.
//
// First, it clips the line segment AB to find the clipped destination B' on a given
// face. (The face is specified implicitly by expressing *all arguments* in the (u,v,w)
// coordinates of that face.) Second, it partially computes whether the segment AB
// intersects this face at all. The actual condition is fairly complicated, but it
// turns out that it can be expressed as a "score" that can be computed independently
// when clipping the two endpoints A and B.
func clipDestination(a, b, scaledN, aTan, bTan pointUVW, scaleUV float64) (r2.Point, int) {
var uv r2.Point
// Optimization: if B is within the safe region of the face, use it.
maxSafeUVCoord := 1 - faceClipErrorUVCoord
if b.Z > 0 {
uv = r2.Point{b.X / b.Z, b.Y / b.Z}
if math.Max(math.Abs(uv.X), math.Abs(uv.Y)) <= maxSafeUVCoord {
return uv, 0
}
}
// Otherwise find the point B' where the line AB exits the face.
uv = scaledN.exitPoint(scaledN.exitAxis()).Mul(scaleUV)
p := pointUVW(Point{r3.Vector{uv.X, uv.Y, 1.0}})
// Determine if the exit point B' is contained within the segment. We do this
// by computing the dot products with two inward-facing tangent vectors at A
// and B. If either dot product is negative, we say that B' is on the "wrong
// side" of that point. As the point B' moves around the great circle AB past
// the segment endpoint B, it is initially on the wrong side of B only; as it
// moves further it is on the wrong side of both endpoints; and then it is on
// the wrong side of A only. If the exit point B' is on the wrong side of
// either endpoint, we can't use it; instead the segment is clipped at the
// original endpoint B.
//
// We reject the segment if the sum of the scores of the two endpoints is 3
// or more. Here is what that rule encodes:
// - If B' is on the wrong side of A, then the other clipped endpoint A'
// must be in the interior of AB (otherwise AB' would go the wrong way
// around the circle). There is a similar rule for A'.
// - If B' is on the wrong side of either endpoint (and therefore we must
// use the original endpoint B instead), then it must be possible to
// project B onto this face (i.e., its w-coordinate must be positive).
// This rule is only necessary to handle certain zero-length edges (A=B).
score := 0
if p.Sub(a.Vector).Dot(aTan.Vector) < 0 {
score = 2 // B' is on wrong side of A.
} else if p.Sub(b.Vector).Dot(bTan.Vector) < 0 {
score = 1 // B' is on wrong side of B.
}
if score > 0 { // B' is not in the interior of AB.
if b.Z <= 0 {
score = 3 // B cannot be projected onto this face.
} else {
uv = r2.Point{b.X / b.Z, b.Y / b.Z}
}
}
return uv, score
}
// updateEndpoint returns the interval with the specified endpoint updated to
// the given value. If the value lies beyond the opposite endpoint, nothing is
// changed and false is returned.
func updateEndpoint(bound r1.Interval, highEndpoint bool, value float64) (r1.Interval, bool) {
if !highEndpoint {
if bound.Hi < value {
return bound, false
}
if bound.Lo < value {
bound.Lo = value
}
return bound, true
}
if bound.Lo > value {
return bound, false
}
if bound.Hi > value {
bound.Hi = value
}
return bound, true
}
// clipBoundAxis returns the clipped versions of the bounding intervals for the given
// axes for the line segment from (a0,a1) to (b0,b1) so that neither extends beyond the
// given clip interval. negSlope is a precomputed helper variable that indicates which
// diagonal of the bounding box is spanned by AB; it is false if AB has positive slope,
// and true if AB has negative slope. If the clipping interval doesn't overlap the bounds,
// false is returned.
func clipBoundAxis(a0, b0 float64, bound0 r1.Interval, a1, b1 float64, bound1 r1.Interval,
negSlope bool, clip r1.Interval) (bound0c, bound1c r1.Interval, updated bool) {
if bound0.Lo < clip.Lo {
// If the upper bound is below the clips lower bound, there is nothing to do.
if bound0.Hi < clip.Lo {
return bound0, bound1, false
}
// narrow the intervals lower bound to the clip bound.
bound0.Lo = clip.Lo
if bound1, updated = updateEndpoint(bound1, negSlope, interpolateFloat64(clip.Lo, a0, b0, a1, b1)); !updated {
return bound0, bound1, false
}
}
if bound0.Hi > clip.Hi {
// If the lower bound is above the clips upper bound, there is nothing to do.
if bound0.Lo > clip.Hi {
return bound0, bound1, false
}
// narrow the intervals upper bound to the clip bound.
bound0.Hi = clip.Hi
if bound1, updated = updateEndpoint(bound1, !negSlope, interpolateFloat64(clip.Hi, a0, b0, a1, b1)); !updated {
return bound0, bound1, false
}
}
return bound0, bound1, true
}
// edgeIntersectsRect reports whether the edge defined by AB intersects the
// given closed rectangle to within the error bound.
func edgeIntersectsRect(a, b r2.Point, r r2.Rect) bool {
// First check whether the bounds of a Rect around AB intersects the given rect.
if !r.Intersects(r2.RectFromPoints(a, b)) {
return false
}
// Otherwise AB intersects the rect if and only if all four vertices of rect
// do not lie on the same side of the extended line AB. We test this by finding
// the two vertices of rect with minimum and maximum projections onto the normal
// of AB, and computing their dot products with the edge normal.
n := b.Sub(a).Ortho()
i := 0
if n.X >= 0 {
i = 1
}
j := 0
if n.Y >= 0 {
j = 1
}
max := n.Dot(r.VertexIJ(i, j).Sub(a))
min := n.Dot(r.VertexIJ(1-i, 1-j).Sub(a))
return (max >= 0) && (min <= 0)
}
// clippedEdgeBound returns the bounding rectangle of the portion of the edge defined
// by AB intersected by clip. The resulting bound may be empty. This is a convenience
// function built on top of clipEdgeBound.
func clippedEdgeBound(a, b r2.Point, clip r2.Rect) r2.Rect {
bound := r2.RectFromPoints(a, b)
if b1, intersects := clipEdgeBound(a, b, clip, bound); intersects {
return b1
}
return r2.EmptyRect()
}
// clipEdgeBound clips an edge AB to sequence of rectangles efficiently.
// It represents the clipped edges by their bounding boxes rather than as a pair of
// endpoints. Specifically, let A'B' be some portion of an edge AB, and let bound be
// a tight bound of A'B'. This function returns the bound that is a tight bound
// of A'B' intersected with a given rectangle. If A'B' does not intersect clip,
// it returns false and the original bound.
func clipEdgeBound(a, b r2.Point, clip, bound r2.Rect) (r2.Rect, bool) {
// negSlope indicates which diagonal of the bounding box is spanned by AB: it
// is false if AB has positive slope, and true if AB has negative slope. This is
// used to determine which interval endpoints need to be updated each time
// the edge is clipped.
negSlope := (a.X > b.X) != (a.Y > b.Y)
b0x, b0y, up1 := clipBoundAxis(a.X, b.X, bound.X, a.Y, b.Y, bound.Y, negSlope, clip.X)
if !up1 {
return bound, false
}
b1y, b1x, up2 := clipBoundAxis(a.Y, b.Y, b0y, a.X, b.X, b0x, negSlope, clip.Y)
if !up2 {
return r2.Rect{b0x, b0y}, false
}
return r2.Rect{X: b1x, Y: b1y}, true
}
// interpolateFloat64 returns a value with the same combination of a1 and b1 as the
// given value x is of a and b. This function makes the following guarantees:
// - If x == a, then x1 = a1 (exactly).
// - If x == b, then x1 = b1 (exactly).
// - If a <= x <= b, then a1 <= x1 <= b1 (even if a1 == b1).
// This requires a != b.
func interpolateFloat64(x, a, b, a1, b1 float64) float64 {
// To get results that are accurate near both A and B, we interpolate
// starting from the closer of the two points.
if math.Abs(a-x) <= math.Abs(b-x) {
return a1 + (b1-a1)*(x-a)/(b-a)
}
return b1 + (a1-b1)*(x-b)/(a-b)
}
// FaceSegment represents an edge AB clipped to an S2 cube face. It is
// represented by a face index and a pair of (u,v) coordinates.
type FaceSegment struct {
face int
a, b r2.Point
}
// FaceSegments subdivides the given edge AB at every point where it crosses the
// boundary between two S2 cube faces and returns the corresponding FaceSegments.
// The segments are returned in order from A toward B. The input points must be
// unit length.
//
// This function guarantees that the returned segments form a continuous path
// from A to B, and that all vertices are within faceClipErrorUVDist of the
// line AB. All vertices lie within the [-1,1]x[-1,1] cube face rectangles.
// The results are consistent with Sign, i.e. the edge is well-defined even its
// endpoints are antipodal.
// TODO(roberts): Extend the implementation of PointCross so that this is true.
func FaceSegments(a, b Point) []FaceSegment {
var segment FaceSegment
// Fast path: both endpoints are on the same face.
var aFace, bFace int
aFace, segment.a.X, segment.a.Y = xyzToFaceUV(a.Vector)
bFace, segment.b.X, segment.b.Y = xyzToFaceUV(b.Vector)
if aFace == bFace {
segment.face = aFace
return []FaceSegment{segment}
}
// Starting at A, we follow AB from face to face until we reach the face
// containing B. The following code is designed to ensure that we always
// reach B, even in the presence of numerical errors.
//
// First we compute the normal to the plane containing A and B. This normal
// becomes the ultimate definition of the line AB; it is used to resolve all
// questions regarding where exactly the line goes. Unfortunately due to
// numerical errors, the line may not quite intersect the faces containing
// the original endpoints. We handle this by moving A and/or B slightly if
// necessary so that they are on faces intersected by the line AB.
ab := a.PointCross(b)
aFace, segment.a = moveOriginToValidFace(aFace, a, ab, segment.a)
bFace, segment.b = moveOriginToValidFace(bFace, b, Point{ab.Mul(-1)}, segment.b)
// Now we simply follow AB from face to face until we reach B.
var segments []FaceSegment
segment.face = aFace
bSaved := segment.b
for face := aFace; face != bFace; {
// Complete the current segment by finding the point where AB
// exits the current face.
z := faceXYZtoUVW(face, ab)
n := pointUVW{z.Vector}
exitAxis := n.exitAxis()
segment.b = n.exitPoint(exitAxis)
segments = append(segments, segment)
// Compute the next face intersected by AB, and translate the exit
// point of the current segment into the (u,v) coordinates of the
// next face. This becomes the first point of the next segment.
exitXyz := faceUVToXYZ(face, segment.b.X, segment.b.Y)
face = nextFace(face, segment.b, exitAxis, n, bFace)
exitUvw := faceXYZtoUVW(face, Point{exitXyz})
segment.face = face
segment.a = r2.Point{exitUvw.X, exitUvw.Y}
}
// Finish the last segment.
segment.b = bSaved
return append(segments, segment)
}
// moveOriginToValidFace updates the origin point to a valid face if necessary.
// Given a line segment AB whose origin A has been projected onto a given cube
// face, determine whether it is necessary to project A onto a different face
// instead. This can happen because the normal of the line AB is not computed
// exactly, so that the line AB (defined as the set of points perpendicular to
// the normal) may not intersect the cube face containing A. Even if it does
// intersect the face, the exit point of the line from that face may be on
// the wrong side of A (i.e., in the direction away from B). If this happens,
// we reproject A onto the adjacent face where the line AB approaches A most
// closely. This moves the origin by a small amount, but never more than the
// error tolerances.
func moveOriginToValidFace(face int, a, ab Point, aUV r2.Point) (int, r2.Point) {
// Fast path: if the origin is sufficiently far inside the face, it is
// always safe to use it.
const maxSafeUVCoord = 1 - faceClipErrorUVCoord
if math.Max(math.Abs((aUV).X), math.Abs((aUV).Y)) <= maxSafeUVCoord {
return face, aUV
}
// Otherwise check whether the normal AB even intersects this face.
z := faceXYZtoUVW(face, ab)
n := pointUVW{z.Vector}
if n.intersectsFace() {
// Check whether the point where the line AB exits this face is on the
// wrong side of A (by more than the acceptable error tolerance).
uv := n.exitPoint(n.exitAxis())
exit := faceUVToXYZ(face, uv.X, uv.Y)
aTangent := ab.Normalize().Cross(a.Vector)
// We can use the given face.
if exit.Sub(a.Vector).Dot(aTangent) >= -faceClipErrorRadians {
return face, aUV
}
}
// Otherwise we reproject A to the nearest adjacent face. (If line AB does
// not pass through a given face, it must pass through all adjacent faces.)
var dir int
if math.Abs((aUV).X) >= math.Abs((aUV).Y) {
// U-axis
if aUV.X > 0 {
dir = 1
}
face = uvwFace(face, 0, dir)
} else {
// V-axis
if aUV.Y > 0 {
dir = 1
}
face = uvwFace(face, 1, dir)
}
aUV.X, aUV.Y = validFaceXYZToUV(face, a.Vector)
aUV.X = math.Max(-1.0, math.Min(1.0, aUV.X))
aUV.Y = math.Max(-1.0, math.Min(1.0, aUV.Y))
return face, aUV
}
// nextFace returns the next face that should be visited by FaceSegments, given that
// we have just visited face and we are following the line AB (represented
// by its normal N in the (u,v,w) coordinates of that face). The other
// arguments include the point where AB exits face, the corresponding
// exit axis, and the target face containing the destination point B.
func nextFace(face int, exit r2.Point, axis axis, n pointUVW, targetFace int) int {
// this bit is to work around C++ cleverly casting bools to ints for you.
exitA := exit.X
exit1MinusA := exit.Y
if axis == axisV {
exitA = exit.Y
exit1MinusA = exit.X
}
exitAPos := 0
if exitA > 0 {
exitAPos = 1
}
exit1MinusAPos := 0
if exit1MinusA > 0 {
exit1MinusAPos = 1
}
// We return the face that is adjacent to the exit point along the given
// axis. If line AB exits *exactly* through a corner of the face, there are
// two possible next faces. If one is the target face containing B, then
// we guarantee that we advance to that face directly.
//
// The three conditions below check that (1) AB exits approximately through
// a corner, (2) the adjacent face along the non-exit axis is the target
// face, and (3) AB exits *exactly* through the corner. (The sumEqual
// code checks whether the dot product of (u,v,1) and n is exactly zero.)
if math.Abs(exit1MinusA) == 1 &&
uvwFace(face, int(1-axis), exit1MinusAPos) == targetFace &&
sumEqual(exit.X*n.X, exit.Y*n.Y, -n.Z) {
return targetFace
}
// Otherwise return the face that is adjacent to the exit point in the
// direction of the exit axis.
return uvwFace(face, int(axis), exitAPos)
}

227
vendor/github.com/golang/geo/s2/edge_crosser.go generated vendored Normal file
View file

@ -0,0 +1,227 @@
// Copyright 2017 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
import (
"math"
)
// EdgeCrosser allows edges to be efficiently tested for intersection with a
// given fixed edge AB. It is especially efficient when testing for
// intersection with an edge chain connecting vertices v0, v1, v2, ...
//
// Example usage:
//
// func CountIntersections(a, b Point, edges []Edge) int {
// count := 0
// crosser := NewEdgeCrosser(a, b)
// for _, edge := range edges {
// if crosser.CrossingSign(&edge.First, &edge.Second) != DoNotCross {
// count++
// }
// }
// return count
// }
//
type EdgeCrosser struct {
a Point
b Point
aXb Point
// To reduce the number of calls to expensiveSign, we compute an
// outward-facing tangent at A and B if necessary. If the plane
// perpendicular to one of these tangents separates AB from CD (i.e., one
// edge on each side) then there is no intersection.
aTangent Point // Outward-facing tangent at A.
bTangent Point // Outward-facing tangent at B.
// The fields below are updated for each vertex in the chain.
c Point // Previous vertex in the vertex chain.
acb Direction // The orientation of triangle ACB.
}
// NewEdgeCrosser returns an EdgeCrosser with the fixed edge AB.
func NewEdgeCrosser(a, b Point) *EdgeCrosser {
norm := a.PointCross(b)
return &EdgeCrosser{
a: a,
b: b,
aXb: Point{a.Cross(b.Vector)},
aTangent: Point{a.Cross(norm.Vector)},
bTangent: Point{norm.Cross(b.Vector)},
}
}
// CrossingSign reports whether the edge AB intersects the edge CD. If any two
// vertices from different edges are the same, returns MaybeCross. If either edge
// is degenerate (A == B or C == D), returns either DoNotCross or MaybeCross.
//
// Properties of CrossingSign:
//
// (1) CrossingSign(b,a,c,d) == CrossingSign(a,b,c,d)
// (2) CrossingSign(c,d,a,b) == CrossingSign(a,b,c,d)
// (3) CrossingSign(a,b,c,d) == MaybeCross if a==c, a==d, b==c, b==d
// (3) CrossingSign(a,b,c,d) == DoNotCross or MaybeCross if a==b or c==d
//
// Note that if you want to check an edge against a chain of other edges,
// it is slightly more efficient to use the single-argument version
// ChainCrossingSign below.
func (e *EdgeCrosser) CrossingSign(c, d Point) Crossing {
if c != e.c {
e.RestartAt(c)
}
return e.ChainCrossingSign(d)
}
// EdgeOrVertexCrossing reports whether if CrossingSign(c, d) > 0, or AB and
// CD share a vertex and VertexCrossing(a, b, c, d) is true.
//
// This method extends the concept of a "crossing" to the case where AB
// and CD have a vertex in common. The two edges may or may not cross,
// according to the rules defined in VertexCrossing above. The rules
// are designed so that point containment tests can be implemented simply
// by counting edge crossings. Similarly, determining whether one edge
// chain crosses another edge chain can be implemented by counting.
func (e *EdgeCrosser) EdgeOrVertexCrossing(c, d Point) bool {
if c != e.c {
e.RestartAt(c)
}
return e.EdgeOrVertexChainCrossing(d)
}
// NewChainEdgeCrosser is a convenience constructor that uses AB as the fixed edge,
// and C as the first vertex of the vertex chain (equivalent to calling RestartAt(c)).
//
// You don't need to use this or any of the chain functions unless you're trying to
// squeeze out every last drop of performance. Essentially all you are saving is a test
// whether the first vertex of the current edge is the same as the second vertex of the
// previous edge.
func NewChainEdgeCrosser(a, b, c Point) *EdgeCrosser {
e := NewEdgeCrosser(a, b)
e.RestartAt(c)
return e
}
// RestartAt sets the current point of the edge crosser to be c.
// Call this method when your chain 'jumps' to a new place.
// The argument must point to a value that persists until the next call.
func (e *EdgeCrosser) RestartAt(c Point) {
e.c = c
e.acb = -triageSign(e.a, e.b, e.c)
}
// ChainCrossingSign is like CrossingSign, but uses the last vertex passed to one of
// the crossing methods (or RestartAt) as the first vertex of the current edge.
func (e *EdgeCrosser) ChainCrossingSign(d Point) Crossing {
// For there to be an edge crossing, the triangles ACB, CBD, BDA, DAC must
// all be oriented the same way (CW or CCW). We keep the orientation of ACB
// as part of our state. When each new point D arrives, we compute the
// orientation of BDA and check whether it matches ACB. This checks whether
// the points C and D are on opposite sides of the great circle through AB.
// Recall that triageSign is invariant with respect to rotating its
// arguments, i.e. ABD has the same orientation as BDA.
bda := triageSign(e.a, e.b, d)
if e.acb == -bda && bda != Indeterminate {
// The most common case -- triangles have opposite orientations. Save the
// current vertex D as the next vertex C, and also save the orientation of
// the new triangle ACB (which is opposite to the current triangle BDA).
e.c = d
e.acb = -bda
return DoNotCross
}
return e.crossingSign(d, bda)
}
// EdgeOrVertexChainCrossing is like EdgeOrVertexCrossing, but uses the last vertex
// passed to one of the crossing methods (or RestartAt) as the first vertex of the current edge.
func (e *EdgeCrosser) EdgeOrVertexChainCrossing(d Point) bool {
// We need to copy e.c since it is clobbered by ChainCrossingSign.
c := e.c
switch e.ChainCrossingSign(d) {
case DoNotCross:
return false
case Cross:
return true
}
return VertexCrossing(e.a, e.b, c, d)
}
// crossingSign handle the slow path of CrossingSign.
func (e *EdgeCrosser) crossingSign(d Point, bda Direction) Crossing {
// Compute the actual result, and then save the current vertex D as the next
// vertex C, and save the orientation of the next triangle ACB (which is
// opposite to the current triangle BDA).
defer func() {
e.c = d
e.acb = -bda
}()
// At this point, a very common situation is that A,B,C,D are four points on
// a line such that AB does not overlap CD. (For example, this happens when
// a line or curve is sampled finely, or when geometry is constructed by
// computing the union of S2CellIds.) Most of the time, we can determine
// that AB and CD do not intersect using the two outward-facing
// tangents at A and B (parallel to AB) and testing whether AB and CD are on
// opposite sides of the plane perpendicular to one of these tangents. This
// is moderately expensive but still much cheaper than expensiveSign.
// The error in RobustCrossProd is insignificant. The maximum error in
// the call to CrossProd (i.e., the maximum norm of the error vector) is
// (0.5 + 1/sqrt(3)) * dblEpsilon. The maximum error in each call to
// DotProd below is dblEpsilon. (There is also a small relative error
// term that is insignificant because we are comparing the result against a
// constant that is very close to zero.)
maxError := (1.5 + 1/math.Sqrt(3)) * dblEpsilon
if (e.c.Dot(e.aTangent.Vector) > maxError && d.Dot(e.aTangent.Vector) > maxError) || (e.c.Dot(e.bTangent.Vector) > maxError && d.Dot(e.bTangent.Vector) > maxError) {
return DoNotCross
}
// Otherwise, eliminate the cases where two vertices from different edges are
// equal. (These cases could be handled in the code below, but we would rather
// avoid calling ExpensiveSign if possible.)
if e.a == e.c || e.a == d || e.b == e.c || e.b == d {
return MaybeCross
}
// Eliminate the cases where an input edge is degenerate. (Note that in
// most cases, if CD is degenerate then this method is not even called
// because acb and bda have different signs.)
if e.a == e.b || e.c == d {
return DoNotCross
}
// Otherwise it's time to break out the big guns.
if e.acb == Indeterminate {
e.acb = -expensiveSign(e.a, e.b, e.c)
}
if bda == Indeterminate {
bda = expensiveSign(e.a, e.b, d)
}
if bda != e.acb {
return DoNotCross
}
cbd := -RobustSign(e.c, d, e.b)
if cbd != e.acb {
return DoNotCross
}
dac := RobustSign(e.c, d, e.a)
if dac != e.acb {
return DoNotCross
}
return Cross
}

394
vendor/github.com/golang/geo/s2/edge_crossings.go generated vendored Normal file
View file

@ -0,0 +1,394 @@
// Copyright 2017 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
import (
"fmt"
"math"
"github.com/golang/geo/r3"
"github.com/golang/geo/s1"
)
const (
// intersectionError can be set somewhat arbitrarily, because the algorithm
// uses more precision if necessary in order to achieve the specified error.
// The only strict requirement is that intersectionError >= dblEpsilon
// radians. However, using a larger error tolerance makes the algorithm more
// efficient because it reduces the number of cases where exact arithmetic is
// needed.
intersectionError = s1.Angle(8 * dblEpsilon)
// intersectionMergeRadius is used to ensure that intersection points that
// are supposed to be coincident are merged back together into a single
// vertex. This is required in order for various polygon operations (union,
// intersection, etc) to work correctly. It is twice the intersection error
// because two coincident intersection points might have errors in
// opposite directions.
intersectionMergeRadius = 2 * intersectionError
)
// A Crossing indicates how edges cross.
type Crossing int
const (
// Cross means the edges cross.
Cross Crossing = iota
// MaybeCross means two vertices from different edges are the same.
MaybeCross
// DoNotCross means the edges do not cross.
DoNotCross
)
func (c Crossing) String() string {
switch c {
case Cross:
return "Cross"
case MaybeCross:
return "MaybeCross"
case DoNotCross:
return "DoNotCross"
default:
return fmt.Sprintf("(BAD CROSSING %d)", c)
}
}
// CrossingSign reports whether the edge AB intersects the edge CD.
// If AB crosses CD at a point that is interior to both edges, Cross is returned.
// If any two vertices from different edges are the same it returns MaybeCross.
// Otherwise it returns DoNotCross.
// If either edge is degenerate (A == B or C == D), the return value is MaybeCross
// if two vertices from different edges are the same and DoNotCross otherwise.
//
// Properties of CrossingSign:
//
// (1) CrossingSign(b,a,c,d) == CrossingSign(a,b,c,d)
// (2) CrossingSign(c,d,a,b) == CrossingSign(a,b,c,d)
// (3) CrossingSign(a,b,c,d) == MaybeCross if a==c, a==d, b==c, b==d
// (3) CrossingSign(a,b,c,d) == DoNotCross or MaybeCross if a==b or c==d
//
// This method implements an exact, consistent perturbation model such
// that no three points are ever considered to be collinear. This means
// that even if you have 4 points A, B, C, D that lie exactly in a line
// (say, around the equator), C and D will be treated as being slightly to
// one side or the other of AB. This is done in a way such that the
// results are always consistent (see RobustSign).
func CrossingSign(a, b, c, d Point) Crossing {
crosser := NewChainEdgeCrosser(a, b, c)
return crosser.ChainCrossingSign(d)
}
// VertexCrossing reports whether two edges "cross" in such a way that point-in-polygon
// containment tests can be implemented by counting the number of edge crossings.
//
// Given two edges AB and CD where at least two vertices are identical
// (i.e. CrossingSign(a,b,c,d) == 0), the basic rule is that a "crossing"
// occurs if AB is encountered after CD during a CCW sweep around the shared
// vertex starting from a fixed reference point.
//
// Note that according to this rule, if AB crosses CD then in general CD
// does not cross AB. However, this leads to the correct result when
// counting polygon edge crossings. For example, suppose that A,B,C are
// three consecutive vertices of a CCW polygon. If we now consider the edge
// crossings of a segment BP as P sweeps around B, the crossing number
// changes parity exactly when BP crosses BA or BC.
//
// Useful properties of VertexCrossing (VC):
//
// (1) VC(a,a,c,d) == VC(a,b,c,c) == false
// (2) VC(a,b,a,b) == VC(a,b,b,a) == true
// (3) VC(a,b,c,d) == VC(a,b,d,c) == VC(b,a,c,d) == VC(b,a,d,c)
// (3) If exactly one of a,b equals one of c,d, then exactly one of
// VC(a,b,c,d) and VC(c,d,a,b) is true
//
// It is an error to call this method with 4 distinct vertices.
func VertexCrossing(a, b, c, d Point) bool {
// If A == B or C == D there is no intersection. We need to check this
// case first in case 3 or more input points are identical.
if a == b || c == d {
return false
}
// If any other pair of vertices is equal, there is a crossing if and only
// if OrderedCCW indicates that the edge AB is further CCW around the
// shared vertex O (either A or B) than the edge CD, starting from an
// arbitrary fixed reference point.
switch {
case a == d:
return OrderedCCW(Point{a.Ortho()}, c, b, a)
case b == c:
return OrderedCCW(Point{b.Ortho()}, d, a, b)
case a == c:
return OrderedCCW(Point{a.Ortho()}, d, b, a)
case b == d:
return OrderedCCW(Point{b.Ortho()}, c, a, b)
}
return false
}
// EdgeOrVertexCrossing is a convenience function that calls CrossingSign to
// handle cases where all four vertices are distinct, and VertexCrossing to
// handle cases where two or more vertices are the same. This defines a crossing
// function such that point-in-polygon containment tests can be implemented
// by simply counting edge crossings.
func EdgeOrVertexCrossing(a, b, c, d Point) bool {
switch CrossingSign(a, b, c, d) {
case DoNotCross:
return false
case Cross:
return true
default:
return VertexCrossing(a, b, c, d)
}
}
// Intersection returns the intersection point of two edges AB and CD that cross
// (CrossingSign(a,b,c,d) == Crossing).
//
// Useful properties of Intersection:
//
// (1) Intersection(b,a,c,d) == Intersection(a,b,d,c) == Intersection(a,b,c,d)
// (2) Intersection(c,d,a,b) == Intersection(a,b,c,d)
//
// The returned intersection point X is guaranteed to be very close to the
// true intersection point of AB and CD, even if the edges intersect at a
// very small angle.
func Intersection(a0, a1, b0, b1 Point) Point {
// It is difficult to compute the intersection point of two edges accurately
// when the angle between the edges is very small. Previously we handled
// this by only guaranteeing that the returned intersection point is within
// intersectionError of each edge. However, this means that when the edges
// cross at a very small angle, the computed result may be very far from the
// true intersection point.
//
// Instead this function now guarantees that the result is always within
// intersectionError of the true intersection. This requires using more
// sophisticated techniques and in some cases extended precision.
//
// - intersectionStable computes the intersection point using
// projection and interpolation, taking care to minimize cancellation
// error.
//
// - intersectionExact computes the intersection point using precision
// arithmetic and converts the final result back to an Point.
pt, ok := intersectionStable(a0, a1, b0, b1)
if !ok {
pt = intersectionExact(a0, a1, b0, b1)
}
// Make sure the intersection point is on the correct side of the sphere.
// Since all vertices are unit length, and edges are less than 180 degrees,
// (a0 + a1) and (b0 + b1) both have positive dot product with the
// intersection point. We use the sum of all vertices to make sure that the
// result is unchanged when the edges are swapped or reversed.
if pt.Dot((a0.Add(a1.Vector)).Add(b0.Add(b1.Vector))) < 0 {
pt = Point{pt.Mul(-1)}
}
return pt
}
// Computes the cross product of two vectors, normalized to be unit length.
// Also returns the length of the cross
// product before normalization, which is useful for estimating the amount of
// error in the result. For numerical stability, the vectors should both be
// approximately unit length.
func robustNormalWithLength(x, y r3.Vector) (r3.Vector, float64) {
var pt r3.Vector
// This computes 2 * (x.Cross(y)), but has much better numerical
// stability when x and y are unit length.
tmp := x.Sub(y).Cross(x.Add(y))
length := tmp.Norm()
if length != 0 {
pt = tmp.Mul(1 / length)
}
return pt, 0.5 * length // Since tmp == 2 * (x.Cross(y))
}
/*
// intersectionSimple is not used by the C++ so it is skipped here.
*/
// projection returns the projection of aNorm onto X (x.Dot(aNorm)), and a bound
// on the error in the result. aNorm is not necessarily unit length.
//
// The remaining parameters (the length of aNorm (aNormLen) and the edge endpoints
// a0 and a1) allow this dot product to be computed more accurately and efficiently.
func projection(x, aNorm r3.Vector, aNormLen float64, a0, a1 Point) (proj, bound float64) {
// The error in the dot product is proportional to the lengths of the input
// vectors, so rather than using x itself (a unit-length vector) we use
// the vectors from x to the closer of the two edge endpoints. This
// typically reduces the error by a huge factor.
x0 := x.Sub(a0.Vector)
x1 := x.Sub(a1.Vector)
x0Dist2 := x0.Norm2()
x1Dist2 := x1.Norm2()
// If both distances are the same, we need to be careful to choose one
// endpoint deterministically so that the result does not change if the
// order of the endpoints is reversed.
var dist float64
if x0Dist2 < x1Dist2 || (x0Dist2 == x1Dist2 && x0.Cmp(x1) == -1) {
dist = math.Sqrt(x0Dist2)
proj = x0.Dot(aNorm)
} else {
dist = math.Sqrt(x1Dist2)
proj = x1.Dot(aNorm)
}
// This calculation bounds the error from all sources: the computation of
// the normal, the subtraction of one endpoint, and the dot product itself.
// dblEpsilon appears because the input points are assumed to be
// normalized in double precision.
//
// For reference, the bounds that went into this calculation are:
// ||N'-N|| <= ((1 + 2 * sqrt(3))||N|| + 32 * sqrt(3) * dblEpsilon) * epsilon
// |(A.B)'-(A.B)| <= (1.5 * (A.B) + 1.5 * ||A|| * ||B||) * epsilon
// ||(X-Y)'-(X-Y)|| <= ||X-Y|| * epsilon
bound = (((3.5+2*math.Sqrt(3))*aNormLen+32*math.Sqrt(3)*dblEpsilon)*dist + 1.5*math.Abs(proj)) * epsilon
return proj, bound
}
// compareEdges reports whether (a0,a1) is less than (b0,b1) with respect to a total
// ordering on edges that is invariant under edge reversals.
func compareEdges(a0, a1, b0, b1 Point) bool {
if a0.Cmp(a1.Vector) != -1 {
a0, a1 = a1, a0
}
if b0.Cmp(b1.Vector) != -1 {
b0, b1 = b1, b0
}
return a0.Cmp(b0.Vector) == -1 || (a0 == b0 && b0.Cmp(b1.Vector) == -1)
}
// intersectionStable returns the intersection point of the edges (a0,a1) and
// (b0,b1) if it can be computed to within an error of at most intersectionError
// by this function.
//
// The intersection point is not guaranteed to have the correct sign because we
// choose to use the longest of the two edges first. The sign is corrected by
// Intersection.
func intersectionStable(a0, a1, b0, b1 Point) (Point, bool) {
// Sort the two edges so that (a0,a1) is longer, breaking ties in a
// deterministic way that does not depend on the ordering of the endpoints.
// This is desirable for two reasons:
// - So that the result doesn't change when edges are swapped or reversed.
// - It reduces error, since the first edge is used to compute the edge
// normal (where a longer edge means less error), and the second edge
// is used for interpolation (where a shorter edge means less error).
aLen2 := a1.Sub(a0.Vector).Norm2()
bLen2 := b1.Sub(b0.Vector).Norm2()
if aLen2 < bLen2 || (aLen2 == bLen2 && compareEdges(a0, a1, b0, b1)) {
return intersectionStableSorted(b0, b1, a0, a1)
}
return intersectionStableSorted(a0, a1, b0, b1)
}
// intersectionStableSorted is a helper function for intersectionStable.
// It expects that the edges (a0,a1) and (b0,b1) have been sorted so that
// the first edge passed in is longer.
func intersectionStableSorted(a0, a1, b0, b1 Point) (Point, bool) {
var pt Point
// Compute the normal of the plane through (a0, a1) in a stable way.
aNorm := a0.Sub(a1.Vector).Cross(a0.Add(a1.Vector))
aNormLen := aNorm.Norm()
bLen := b1.Sub(b0.Vector).Norm()
// Compute the projection (i.e., signed distance) of b0 and b1 onto the
// plane through (a0, a1). Distances are scaled by the length of aNorm.
b0Dist, b0Error := projection(b0.Vector, aNorm, aNormLen, a0, a1)
b1Dist, b1Error := projection(b1.Vector, aNorm, aNormLen, a0, a1)
// The total distance from b0 to b1 measured perpendicularly to (a0,a1) is
// |b0Dist - b1Dist|. Note that b0Dist and b1Dist generally have
// opposite signs because b0 and b1 are on opposite sides of (a0, a1). The
// code below finds the intersection point by interpolating along the edge
// (b0, b1) to a fractional distance of b0Dist / (b0Dist - b1Dist).
//
// It can be shown that the maximum error in the interpolation fraction is
//
// (b0Dist * b1Error - b1Dist * b0Error) / (distSum * (distSum - errorSum))
//
// We save ourselves some work by scaling the result and the error bound by
// "distSum", since the result is normalized to be unit length anyway.
distSum := math.Abs(b0Dist - b1Dist)
errorSum := b0Error + b1Error
if distSum <= errorSum {
return pt, false // Error is unbounded in this case.
}
x := b1.Mul(b0Dist).Sub(b0.Mul(b1Dist))
err := bLen*math.Abs(b0Dist*b1Error-b1Dist*b0Error)/
(distSum-errorSum) + 2*distSum*epsilon
// Finally we normalize the result, compute the corresponding error, and
// check whether the total error is acceptable.
xLen := x.Norm()
maxError := intersectionError
if err > (float64(maxError)-epsilon)*xLen {
return pt, false
}
return Point{x.Mul(1 / xLen)}, true
}
// intersectionExact returns the intersection point of (a0, a1) and (b0, b1)
// using precise arithmetic. Note that the result is not exact because it is
// rounded down to double precision at the end. Also, the intersection point
// is not guaranteed to have the correct sign (i.e., the return value may need
// to be negated).
func intersectionExact(a0, a1, b0, b1 Point) Point {
// Since we are using presice arithmetic, we don't need to worry about
// numerical stability.
a0P := r3.PreciseVectorFromVector(a0.Vector)
a1P := r3.PreciseVectorFromVector(a1.Vector)
b0P := r3.PreciseVectorFromVector(b0.Vector)
b1P := r3.PreciseVectorFromVector(b1.Vector)
aNormP := a0P.Cross(a1P)
bNormP := b0P.Cross(b1P)
xP := aNormP.Cross(bNormP)
// The final Normalize() call is done in double precision, which creates a
// directional error of up to 2*dblEpsilon. (Precise conversion and Normalize()
// each contribute up to dblEpsilon of directional error.)
x := xP.Vector()
if x == (r3.Vector{}) {
// The two edges are exactly collinear, but we still consider them to be
// "crossing" because of simulation of simplicity. Out of the four
// endpoints, exactly two lie in the interior of the other edge. Of
// those two we return the one that is lexicographically smallest.
x = r3.Vector{10, 10, 10} // Greater than any valid S2Point
aNorm := Point{aNormP.Vector()}
bNorm := Point{bNormP.Vector()}
if OrderedCCW(b0, a0, b1, bNorm) && a0.Cmp(x) == -1 {
return a0
}
if OrderedCCW(b0, a1, b1, bNorm) && a1.Cmp(x) == -1 {
return a1
}
if OrderedCCW(a0, b0, a1, aNorm) && b0.Cmp(x) == -1 {
return b0
}
if OrderedCCW(a0, b1, a1, aNorm) && b1.Cmp(x) == -1 {
return b1
}
}
return Point{x}
}

378
vendor/github.com/golang/geo/s2/edge_distances.go generated vendored Normal file
View file

@ -0,0 +1,378 @@
// Copyright 2017 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
// This file defines a collection of methods for computing the distance to an edge,
// interpolating along an edge, projecting points onto edges, etc.
import (
"math"
"github.com/golang/geo/s1"
)
// DistanceFromSegment returns the distance of point X from line segment AB.
// The points are expected to be normalized. The result is very accurate for small
// distances but may have some numerical error if the distance is large
// (approximately pi/2 or greater). The case A == B is handled correctly.
func DistanceFromSegment(x, a, b Point) s1.Angle {
var minDist s1.ChordAngle
minDist, _ = updateMinDistance(x, a, b, minDist, true)
return minDist.Angle()
}
// IsDistanceLess reports whether the distance from X to the edge AB is less
// than limit. This method is faster than DistanceFromSegment(). If you want to
// compare against a fixed s1.Angle, you should convert it to an s1.ChordAngle
// once and save the value, since this conversion is relatively expensive.
func IsDistanceLess(x, a, b Point, limit s1.ChordAngle) bool {
_, less := UpdateMinDistance(x, a, b, limit)
return less
}
// UpdateMinDistance checks if the distance from X to the edge AB is less
// than minDist, and if so, returns the updated value and true.
// The case A == B is handled correctly.
//
// Use this method when you want to compute many distances and keep track of
// the minimum. It is significantly faster than using DistanceFromSegment
// because (1) using s1.ChordAngle is much faster than s1.Angle, and (2) it
// can save a lot of work by not actually computing the distance when it is
// obviously larger than the current minimum.
func UpdateMinDistance(x, a, b Point, minDist s1.ChordAngle) (s1.ChordAngle, bool) {
return updateMinDistance(x, a, b, minDist, false)
}
// UpdateMaxDistance checks if the distance from X to the edge AB is greater
// than maxDist, and if so, returns the updated value and true.
// Otherwise it returns false. The case A == B is handled correctly.
func UpdateMaxDistance(x, a, b Point, maxDist s1.ChordAngle) (s1.ChordAngle, bool) {
dist := maxChordAngle(ChordAngleBetweenPoints(x, a), ChordAngleBetweenPoints(x, b))
if dist > s1.RightChordAngle {
dist, _ = updateMinDistance(Point{x.Mul(-1)}, a, b, dist, true)
dist = s1.StraightChordAngle - dist
}
if maxDist < dist {
return dist, true
}
return maxDist, false
}
// IsInteriorDistanceLess reports whether the minimum distance from X to the
// edge AB is attained at an interior point of AB (i.e., not an endpoint), and
// that distance is less than limit.
func IsInteriorDistanceLess(x, a, b Point, limit s1.ChordAngle) bool {
_, less := UpdateMinInteriorDistance(x, a, b, limit)
return less
}
// UpdateMinInteriorDistance reports whether the minimum distance from X to AB
// is attained at an interior point of AB (i.e., not an endpoint), and that distance
// is less than minDist. If so, the value of minDist is updated and true is returned.
// Otherwise it is unchanged and returns false.
func UpdateMinInteriorDistance(x, a, b Point, minDist s1.ChordAngle) (s1.ChordAngle, bool) {
return interiorDist(x, a, b, minDist, false)
}
// Project returns the point along the edge AB that is closest to the point X.
// The fractional distance of this point along the edge AB can be obtained
// using DistanceFraction.
//
// This requires that all points are unit length.
func Project(x, a, b Point) Point {
aXb := a.PointCross(b)
// Find the closest point to X along the great circle through AB.
p := x.Sub(aXb.Mul(x.Dot(aXb.Vector) / aXb.Vector.Norm2()))
// If this point is on the edge AB, then it's the closest point.
if Sign(aXb, a, Point{p}) && Sign(Point{p}, b, aXb) {
return Point{p.Normalize()}
}
// Otherwise, the closest point is either A or B.
if x.Sub(a.Vector).Norm2() <= x.Sub(b.Vector).Norm2() {
return a
}
return b
}
// DistanceFraction returns the distance ratio of the point X along an edge AB.
// If X is on the line segment AB, this is the fraction T such
// that X == Interpolate(T, A, B).
//
// This requires that A and B are distinct.
func DistanceFraction(x, a, b Point) float64 {
d0 := x.Angle(a.Vector)
d1 := x.Angle(b.Vector)
return float64(d0 / (d0 + d1))
}
// Interpolate returns the point X along the line segment AB whose distance from A
// is the given fraction "t" of the distance AB. Does NOT require that "t" be
// between 0 and 1. Note that all distances are measured on the surface of
// the sphere, so this is more complicated than just computing (1-t)*a + t*b
// and normalizing the result.
func Interpolate(t float64, a, b Point) Point {
if t == 0 {
return a
}
if t == 1 {
return b
}
ab := a.Angle(b.Vector)
return InterpolateAtDistance(s1.Angle(t)*ab, a, b)
}
// InterpolateAtDistance returns the point X along the line segment AB whose
// distance from A is the angle ax.
func InterpolateAtDistance(ax s1.Angle, a, b Point) Point {
aRad := ax.Radians()
// Use PointCross to compute the tangent vector at A towards B. The
// result is always perpendicular to A, even if A=B or A=-B, but it is not
// necessarily unit length. (We effectively normalize it below.)
normal := a.PointCross(b)
tangent := normal.Vector.Cross(a.Vector)
// Now compute the appropriate linear combination of A and "tangent". With
// infinite precision the result would always be unit length, but we
// normalize it anyway to ensure that the error is within acceptable bounds.
// (Otherwise errors can build up when the result of one interpolation is
// fed into another interpolation.)
return Point{(a.Mul(math.Cos(aRad)).Add(tangent.Mul(math.Sin(aRad) / tangent.Norm()))).Normalize()}
}
// minUpdateDistanceMaxError returns the maximum error in the result of
// UpdateMinDistance (and the associated functions such as
// UpdateMinInteriorDistance, IsDistanceLess, etc), assuming that all
// input points are normalized to within the bounds guaranteed by r3.Vector's
// Normalize. The error can be added or subtracted from an s1.ChordAngle
// using its Expanded method.
func minUpdateDistanceMaxError(dist s1.ChordAngle) float64 {
// There are two cases for the maximum error in UpdateMinDistance(),
// depending on whether the closest point is interior to the edge.
return math.Max(minUpdateInteriorDistanceMaxError(dist), dist.MaxPointError())
}
// minUpdateInteriorDistanceMaxError returns the maximum error in the result of
// UpdateMinInteriorDistance, assuming that all input points are normalized
// to within the bounds guaranteed by Point's Normalize. The error can be added
// or subtracted from an s1.ChordAngle using its Expanded method.
//
// Note that accuracy goes down as the distance approaches 0 degrees or 180
// degrees (for different reasons). Near 0 degrees the error is acceptable
// for all practical purposes (about 1.2e-15 radians ~= 8 nanometers). For
// exactly antipodal points the maximum error is quite high (0.5 meters),
// but this error drops rapidly as the points move away from antipodality
// (approximately 1 millimeter for points that are 50 meters from antipodal,
// and 1 micrometer for points that are 50km from antipodal).
//
// TODO(roberts): Currently the error bound does not hold for edges whose endpoints
// are antipodal to within about 1e-15 radians (less than 1 micron). This could
// be fixed by extending PointCross to use higher precision when necessary.
func minUpdateInteriorDistanceMaxError(dist s1.ChordAngle) float64 {
// If a point is more than 90 degrees from an edge, then the minimum
// distance is always to one of the endpoints, not to the edge interior.
if dist >= s1.RightChordAngle {
return 0.0
}
// This bound includes all source of error, assuming that the input points
// are normalized. a and b are components of chord length that are
// perpendicular and parallel to a plane containing the edge respectively.
b := math.Min(1.0, 0.5*float64(dist)*float64(dist))
a := math.Sqrt(b * (2 - b))
return ((2.5+2*math.Sqrt(3)+8.5*a)*a +
(2+2*math.Sqrt(3)/3+6.5*(1-b))*b +
(23+16/math.Sqrt(3))*dblEpsilon) * dblEpsilon
}
// updateMinDistance computes the distance from a point X to a line segment AB,
// and if either the distance was less than the given minDist, or alwaysUpdate is
// true, the value and whether it was updated are returned.
func updateMinDistance(x, a, b Point, minDist s1.ChordAngle, alwaysUpdate bool) (s1.ChordAngle, bool) {
if d, ok := interiorDist(x, a, b, minDist, alwaysUpdate); ok {
// Minimum distance is attained along the edge interior.
return d, true
}
// Otherwise the minimum distance is to one of the endpoints.
xa2, xb2 := (x.Sub(a.Vector)).Norm2(), x.Sub(b.Vector).Norm2()
dist := s1.ChordAngle(math.Min(xa2, xb2))
if !alwaysUpdate && dist >= minDist {
return minDist, false
}
return dist, true
}
// interiorDist returns the shortest distance from point x to edge ab, assuming
// that the closest point to X is interior to AB. If the closest point is not
// interior to AB, interiorDist returns (minDist, false). If alwaysUpdate is set to
// false, the distance is only updated when the value exceeds certain the given minDist.
func interiorDist(x, a, b Point, minDist s1.ChordAngle, alwaysUpdate bool) (s1.ChordAngle, bool) {
// Chord distance of x to both end points a and b.
xa2, xb2 := (x.Sub(a.Vector)).Norm2(), x.Sub(b.Vector).Norm2()
// The closest point on AB could either be one of the two vertices (the
// vertex case) or in the interior (the interior case). Let C = A x B.
// If X is in the spherical wedge extending from A to B around the axis
// through C, then we are in the interior case. Otherwise we are in the
// vertex case.
//
// Check whether we might be in the interior case. For this to be true, XAB
// and XBA must both be acute angles. Checking this condition exactly is
// expensive, so instead we consider the planar triangle ABX (which passes
// through the sphere's interior). The planar angles XAB and XBA are always
// less than the corresponding spherical angles, so if we are in the
// interior case then both of these angles must be acute.
//
// We check this by computing the squared edge lengths of the planar
// triangle ABX, and testing acuteness using the law of cosines:
//
// max(XA^2, XB^2) < min(XA^2, XB^2) + AB^2
if math.Max(xa2, xb2) >= math.Min(xa2, xb2)+(a.Sub(b.Vector)).Norm2() {
return minDist, false
}
// The minimum distance might be to a point on the edge interior. Let R
// be closest point to X that lies on the great circle through AB. Rather
// than computing the geodesic distance along the surface of the sphere,
// instead we compute the "chord length" through the sphere's interior.
//
// The squared chord length XR^2 can be expressed as XQ^2 + QR^2, where Q
// is the point X projected onto the plane through the great circle AB.
// The distance XQ^2 can be written as (X.C)^2 / |C|^2 where C = A x B.
// We ignore the QR^2 term and instead use XQ^2 as a lower bound, since it
// is faster and the corresponding distance on the Earth's surface is
// accurate to within 1% for distances up to about 1800km.
c := a.PointCross(b)
c2 := c.Norm2()
xDotC := x.Dot(c.Vector)
xDotC2 := xDotC * xDotC
if !alwaysUpdate && xDotC2 >= c2*float64(minDist) {
// The closest point on the great circle AB is too far away.
return minDist, false
}
// Otherwise we do the exact, more expensive test for the interior case.
// This test is very likely to succeed because of the conservative planar
// test we did initially.
cx := c.Cross(x.Vector)
if a.Dot(cx) >= 0 || b.Dot(cx) <= 0 {
return minDist, false
}
// Compute the squared chord length XR^2 = XQ^2 + QR^2 (see above).
// This calculation has good accuracy for all chord lengths since it
// is based on both the dot product and cross product (rather than
// deriving one from the other). However, note that the chord length
// representation itself loses accuracy as the angle approaches π.
qr := 1 - math.Sqrt(cx.Norm2()/c2)
dist := s1.ChordAngle((xDotC2 / c2) + (qr * qr))
if !alwaysUpdate && dist >= minDist {
return minDist, false
}
return dist, true
}
// updateEdgePairMinDistance computes the minimum distance between the given
// pair of edges. If the two edges cross, the distance is zero. The cases
// a0 == a1 and b0 == b1 are handled correctly.
func updateEdgePairMinDistance(a0, a1, b0, b1 Point, minDist s1.ChordAngle) (s1.ChordAngle, bool) {
if minDist == 0 {
return 0, false
}
if CrossingSign(a0, a1, b0, b1) == Cross {
minDist = 0
return 0, true
}
// Otherwise, the minimum distance is achieved at an endpoint of at least
// one of the two edges. We ensure that all four possibilities are always checked.
//
// The calculation below computes each of the six vertex-vertex distances
// twice (this could be optimized).
var ok1, ok2, ok3, ok4 bool
minDist, ok1 = UpdateMinDistance(a0, b0, b1, minDist)
minDist, ok2 = UpdateMinDistance(a1, b0, b1, minDist)
minDist, ok3 = UpdateMinDistance(b0, a0, a1, minDist)
minDist, ok4 = UpdateMinDistance(b1, a0, a1, minDist)
return minDist, ok1 || ok2 || ok3 || ok4
}
// updateEdgePairMaxDistance reports the minimum distance between the given pair of edges.
// If one edge crosses the antipodal reflection of the other, the distance is pi.
func updateEdgePairMaxDistance(a0, a1, b0, b1 Point, maxDist s1.ChordAngle) (s1.ChordAngle, bool) {
if maxDist == s1.StraightChordAngle {
return s1.StraightChordAngle, false
}
if CrossingSign(a0, a1, Point{b0.Mul(-1)}, Point{b1.Mul(-1)}) == Cross {
return s1.StraightChordAngle, true
}
// Otherwise, the maximum distance is achieved at an endpoint of at least
// one of the two edges. We ensure that all four possibilities are always checked.
//
// The calculation below computes each of the six vertex-vertex distances
// twice (this could be optimized).
var ok1, ok2, ok3, ok4 bool
maxDist, ok1 = UpdateMaxDistance(a0, b0, b1, maxDist)
maxDist, ok2 = UpdateMaxDistance(a1, b0, b1, maxDist)
maxDist, ok3 = UpdateMaxDistance(b0, a0, a1, maxDist)
maxDist, ok4 = UpdateMaxDistance(b1, a0, a1, maxDist)
return maxDist, ok1 || ok2 || ok3 || ok4
}
// EdgePairClosestPoints returns the pair of points (a, b) that achieves the
// minimum distance between edges a0a1 and b0b1, where a is a point on a0a1 and
// b is a point on b0b1. If the two edges intersect, a and b are both equal to
// the intersection point. Handles a0 == a1 and b0 == b1 correctly.
func EdgePairClosestPoints(a0, a1, b0, b1 Point) (Point, Point) {
if CrossingSign(a0, a1, b0, b1) == Cross {
x := Intersection(a0, a1, b0, b1)
return x, x
}
// We save some work by first determining which vertex/edge pair achieves
// the minimum distance, and then computing the closest point on that edge.
var minDist s1.ChordAngle
var ok bool
minDist, ok = updateMinDistance(a0, b0, b1, minDist, true)
closestVertex := 0
if minDist, ok = UpdateMinDistance(a1, b0, b1, minDist); ok {
closestVertex = 1
}
if minDist, ok = UpdateMinDistance(b0, a0, a1, minDist); ok {
closestVertex = 2
}
if minDist, ok = UpdateMinDistance(b1, a0, a1, minDist); ok {
closestVertex = 3
}
switch closestVertex {
case 0:
return a0, Project(a0, b0, b1)
case 1:
return a1, Project(a1, b0, b1)
case 2:
return Project(b0, a0, a1), b0
case 3:
return Project(b1, a0, a1), b1
default:
panic("illegal case reached")
}
}

File diff suppressed because it is too large Load diff

237
vendor/github.com/golang/geo/s2/encode.go generated vendored Normal file
View file

@ -0,0 +1,237 @@
// Copyright 2017 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
import (
"encoding/binary"
"io"
)
const (
// encodingVersion is the current version of the encoding
// format that is compatible with C++ and other S2 libraries.
encodingVersion = int8(1)
// encodingCompressedVersion is the current version of the
// compressed format.
encodingCompressedVersion = int8(4)
)
// encoder handles the specifics of encoding for S2 types.
type encoder struct {
w io.Writer // the real writer passed to Encode
err error
}
func (e *encoder) writeUvarint(x uint64) {
if e.err != nil {
return
}
var buf [binary.MaxVarintLen64]byte
n := binary.PutUvarint(buf[:], x)
_, e.err = e.w.Write(buf[:n])
}
func (e *encoder) writeBool(x bool) {
if e.err != nil {
return
}
var val int8
if x {
val = 1
}
e.err = binary.Write(e.w, binary.LittleEndian, val)
}
func (e *encoder) writeInt8(x int8) {
if e.err != nil {
return
}
e.err = binary.Write(e.w, binary.LittleEndian, x)
}
func (e *encoder) writeInt16(x int16) {
if e.err != nil {
return
}
e.err = binary.Write(e.w, binary.LittleEndian, x)
}
func (e *encoder) writeInt32(x int32) {
if e.err != nil {
return
}
e.err = binary.Write(e.w, binary.LittleEndian, x)
}
func (e *encoder) writeInt64(x int64) {
if e.err != nil {
return
}
e.err = binary.Write(e.w, binary.LittleEndian, x)
}
func (e *encoder) writeUint8(x uint8) {
if e.err != nil {
return
}
_, e.err = e.w.Write([]byte{x})
}
func (e *encoder) writeUint32(x uint32) {
if e.err != nil {
return
}
e.err = binary.Write(e.w, binary.LittleEndian, x)
}
func (e *encoder) writeUint64(x uint64) {
if e.err != nil {
return
}
e.err = binary.Write(e.w, binary.LittleEndian, x)
}
func (e *encoder) writeFloat32(x float32) {
if e.err != nil {
return
}
e.err = binary.Write(e.w, binary.LittleEndian, x)
}
func (e *encoder) writeFloat64(x float64) {
if e.err != nil {
return
}
e.err = binary.Write(e.w, binary.LittleEndian, x)
}
type byteReader interface {
io.Reader
io.ByteReader
}
// byteReaderAdapter embellishes an io.Reader with a ReadByte method,
// so that it implements the io.ByteReader interface.
type byteReaderAdapter struct {
io.Reader
}
func (b byteReaderAdapter) ReadByte() (byte, error) {
buf := []byte{0}
_, err := io.ReadFull(b, buf)
return buf[0], err
}
func asByteReader(r io.Reader) byteReader {
if br, ok := r.(byteReader); ok {
return br
}
return byteReaderAdapter{r}
}
type decoder struct {
r byteReader // the real reader passed to Decode
err error
}
func (d *decoder) readBool() (x bool) {
if d.err != nil {
return
}
var val int8
d.err = binary.Read(d.r, binary.LittleEndian, &val)
return val == 1
}
func (d *decoder) readInt8() (x int8) {
if d.err != nil {
return
}
d.err = binary.Read(d.r, binary.LittleEndian, &x)
return
}
func (d *decoder) readInt16() (x int16) {
if d.err != nil {
return
}
d.err = binary.Read(d.r, binary.LittleEndian, &x)
return
}
func (d *decoder) readInt32() (x int32) {
if d.err != nil {
return
}
d.err = binary.Read(d.r, binary.LittleEndian, &x)
return
}
func (d *decoder) readInt64() (x int64) {
if d.err != nil {
return
}
d.err = binary.Read(d.r, binary.LittleEndian, &x)
return
}
func (d *decoder) readUint8() (x uint8) {
if d.err != nil {
return
}
x, d.err = d.r.ReadByte()
return
}
func (d *decoder) readUint32() (x uint32) {
if d.err != nil {
return
}
d.err = binary.Read(d.r, binary.LittleEndian, &x)
return
}
func (d *decoder) readUint64() (x uint64) {
if d.err != nil {
return
}
d.err = binary.Read(d.r, binary.LittleEndian, &x)
return
}
func (d *decoder) readFloat32() (x float32) {
if d.err != nil {
return
}
d.err = binary.Read(d.r, binary.LittleEndian, &x)
return
}
func (d *decoder) readFloat64() (x float64) {
if d.err != nil {
return
}
d.err = binary.Read(d.r, binary.LittleEndian, &x)
return
}
func (d *decoder) readUvarint() (x uint64) {
if d.err != nil {
return
}
x, d.err = binary.ReadUvarint(d.r)
return
}

143
vendor/github.com/golang/geo/s2/interleave.go generated vendored Normal file
View file

@ -0,0 +1,143 @@
// Copyright 2017 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
/*
The lookup table below can convert a sequence of interleaved 8 bits into
non-interleaved 4 bits. The table can convert both odd and even bits at the
same time, and lut[x & 0x55] converts the even bits (bits 0, 2, 4 and 6),
while lut[x & 0xaa] converts the odd bits (bits 1, 3, 5 and 7).
The lookup table below was generated using the following python code:
def deinterleave(bits):
if bits == 0: return 0
if bits < 4: return 1
return deinterleave(bits / 4) * 2 + deinterleave(bits & 3)
for i in range(256): print "0x%x," % deinterleave(i),
*/
var deinterleaveLookup = [256]uint32{
0x0, 0x1, 0x1, 0x1, 0x2, 0x3, 0x3, 0x3,
0x2, 0x3, 0x3, 0x3, 0x2, 0x3, 0x3, 0x3,
0x4, 0x5, 0x5, 0x5, 0x6, 0x7, 0x7, 0x7,
0x6, 0x7, 0x7, 0x7, 0x6, 0x7, 0x7, 0x7,
0x4, 0x5, 0x5, 0x5, 0x6, 0x7, 0x7, 0x7,
0x6, 0x7, 0x7, 0x7, 0x6, 0x7, 0x7, 0x7,
0x4, 0x5, 0x5, 0x5, 0x6, 0x7, 0x7, 0x7,
0x6, 0x7, 0x7, 0x7, 0x6, 0x7, 0x7, 0x7,
0x8, 0x9, 0x9, 0x9, 0xa, 0xb, 0xb, 0xb,
0xa, 0xb, 0xb, 0xb, 0xa, 0xb, 0xb, 0xb,
0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
0x8, 0x9, 0x9, 0x9, 0xa, 0xb, 0xb, 0xb,
0xa, 0xb, 0xb, 0xb, 0xa, 0xb, 0xb, 0xb,
0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
0x8, 0x9, 0x9, 0x9, 0xa, 0xb, 0xb, 0xb,
0xa, 0xb, 0xb, 0xb, 0xa, 0xb, 0xb, 0xb,
0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf,
0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf,
}
// deinterleaveUint32 decodes the interleaved values.
func deinterleaveUint32(code uint64) (uint32, uint32) {
x := (deinterleaveLookup[code&0x55]) |
(deinterleaveLookup[(code>>8)&0x55] << 4) |
(deinterleaveLookup[(code>>16)&0x55] << 8) |
(deinterleaveLookup[(code>>24)&0x55] << 12) |
(deinterleaveLookup[(code>>32)&0x55] << 16) |
(deinterleaveLookup[(code>>40)&0x55] << 20) |
(deinterleaveLookup[(code>>48)&0x55] << 24) |
(deinterleaveLookup[(code>>56)&0x55] << 28)
y := (deinterleaveLookup[code&0xaa]) |
(deinterleaveLookup[(code>>8)&0xaa] << 4) |
(deinterleaveLookup[(code>>16)&0xaa] << 8) |
(deinterleaveLookup[(code>>24)&0xaa] << 12) |
(deinterleaveLookup[(code>>32)&0xaa] << 16) |
(deinterleaveLookup[(code>>40)&0xaa] << 20) |
(deinterleaveLookup[(code>>48)&0xaa] << 24) |
(deinterleaveLookup[(code>>56)&0xaa] << 28)
return x, y
}
var interleaveLookup = [256]uint64{
0x0000, 0x0001, 0x0004, 0x0005, 0x0010, 0x0011, 0x0014, 0x0015,
0x0040, 0x0041, 0x0044, 0x0045, 0x0050, 0x0051, 0x0054, 0x0055,
0x0100, 0x0101, 0x0104, 0x0105, 0x0110, 0x0111, 0x0114, 0x0115,
0x0140, 0x0141, 0x0144, 0x0145, 0x0150, 0x0151, 0x0154, 0x0155,
0x0400, 0x0401, 0x0404, 0x0405, 0x0410, 0x0411, 0x0414, 0x0415,
0x0440, 0x0441, 0x0444, 0x0445, 0x0450, 0x0451, 0x0454, 0x0455,
0x0500, 0x0501, 0x0504, 0x0505, 0x0510, 0x0511, 0x0514, 0x0515,
0x0540, 0x0541, 0x0544, 0x0545, 0x0550, 0x0551, 0x0554, 0x0555,
0x1000, 0x1001, 0x1004, 0x1005, 0x1010, 0x1011, 0x1014, 0x1015,
0x1040, 0x1041, 0x1044, 0x1045, 0x1050, 0x1051, 0x1054, 0x1055,
0x1100, 0x1101, 0x1104, 0x1105, 0x1110, 0x1111, 0x1114, 0x1115,
0x1140, 0x1141, 0x1144, 0x1145, 0x1150, 0x1151, 0x1154, 0x1155,
0x1400, 0x1401, 0x1404, 0x1405, 0x1410, 0x1411, 0x1414, 0x1415,
0x1440, 0x1441, 0x1444, 0x1445, 0x1450, 0x1451, 0x1454, 0x1455,
0x1500, 0x1501, 0x1504, 0x1505, 0x1510, 0x1511, 0x1514, 0x1515,
0x1540, 0x1541, 0x1544, 0x1545, 0x1550, 0x1551, 0x1554, 0x1555,
0x4000, 0x4001, 0x4004, 0x4005, 0x4010, 0x4011, 0x4014, 0x4015,
0x4040, 0x4041, 0x4044, 0x4045, 0x4050, 0x4051, 0x4054, 0x4055,
0x4100, 0x4101, 0x4104, 0x4105, 0x4110, 0x4111, 0x4114, 0x4115,
0x4140, 0x4141, 0x4144, 0x4145, 0x4150, 0x4151, 0x4154, 0x4155,
0x4400, 0x4401, 0x4404, 0x4405, 0x4410, 0x4411, 0x4414, 0x4415,
0x4440, 0x4441, 0x4444, 0x4445, 0x4450, 0x4451, 0x4454, 0x4455,
0x4500, 0x4501, 0x4504, 0x4505, 0x4510, 0x4511, 0x4514, 0x4515,
0x4540, 0x4541, 0x4544, 0x4545, 0x4550, 0x4551, 0x4554, 0x4555,
0x5000, 0x5001, 0x5004, 0x5005, 0x5010, 0x5011, 0x5014, 0x5015,
0x5040, 0x5041, 0x5044, 0x5045, 0x5050, 0x5051, 0x5054, 0x5055,
0x5100, 0x5101, 0x5104, 0x5105, 0x5110, 0x5111, 0x5114, 0x5115,
0x5140, 0x5141, 0x5144, 0x5145, 0x5150, 0x5151, 0x5154, 0x5155,
0x5400, 0x5401, 0x5404, 0x5405, 0x5410, 0x5411, 0x5414, 0x5415,
0x5440, 0x5441, 0x5444, 0x5445, 0x5450, 0x5451, 0x5454, 0x5455,
0x5500, 0x5501, 0x5504, 0x5505, 0x5510, 0x5511, 0x5514, 0x5515,
0x5540, 0x5541, 0x5544, 0x5545, 0x5550, 0x5551, 0x5554, 0x5555,
}
// interleaveUint32 interleaves the given arguments into the return value.
//
// The 0-bit in val0 will be the 0-bit in the return value.
// The 0-bit in val1 will be the 1-bit in the return value.
// The 1-bit of val0 will be the 2-bit in the return value, and so on.
func interleaveUint32(x, y uint32) uint64 {
return (interleaveLookup[x&0xff]) |
(interleaveLookup[(x>>8)&0xff] << 16) |
(interleaveLookup[(x>>16)&0xff] << 32) |
(interleaveLookup[x>>24] << 48) |
(interleaveLookup[y&0xff] << 1) |
(interleaveLookup[(y>>8)&0xff] << 17) |
(interleaveLookup[(y>>16)&0xff] << 33) |
(interleaveLookup[y>>24] << 49)
}

View file

@ -1,18 +1,16 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2

1467
vendor/github.com/golang/geo/s2/loop.go generated vendored

File diff suppressed because it is too large Load diff

View file

@ -1,18 +1,16 @@
/*
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2

View file

@ -1,18 +1,16 @@
/*
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2

88
vendor/github.com/golang/geo/s2/nthderivative.go generated vendored Normal file
View file

@ -0,0 +1,88 @@
// Copyright 2017 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
// nthDerivativeCoder provides Nth Derivative Coding.
// (In signal processing disciplines, this is known as N-th Delta Coding.)
//
// Good for varint coding integer sequences with polynomial trends.
//
// Instead of coding a sequence of values directly, code its nth-order discrete
// derivative. Overflow in integer addition and subtraction makes this a
// lossless transform.
//
// constant linear quadratic
// trend trend trend
// / \ / \ / \_
// input |0 0 0 0 1 2 3 4 9 16 25 36
// 0th derivative(identity) |0 0 0 0 1 2 3 4 9 16 25 36
// 1st derivative(delta coding) | 0 0 0 1 1 1 1 5 7 9 11
// 2nd derivative(linear prediction) | 0 0 1 0 0 0 4 2 2 2
// -------------------------------------
// 0 1 2 3 4 5 6 7 8 9 10 11
// n in sequence
//
// Higher-order codings can break even or be detrimental on other sequences.
//
// random oscillating
// / \ / \_
// input |5 9 6 1 8 8 2 -2 4 -4 6 -6
// 0th derivative(identity) |5 9 6 1 8 8 2 -2 4 -4 6 -6
// 1st derivative(delta coding) | 4 -3 -5 7 0 -6 -4 6 -8 10 -12
// 2nd derivative(linear prediction) | -7 -2 12 -7 -6 2 10 -14 18 -22
// ---------------------------------------
// 0 1 2 3 4 5 6 7 8 9 10 11
// n in sequence
//
// Note that the nth derivative isn't available until sequence item n. Earlier
// values are coded at lower order. For the above table, read 5 4 -7 -2 12 ...
type nthDerivativeCoder struct {
n, m int
memory [10]int32
}
// newNthDerivativeCoder returns a new coder, where n is the derivative order of the encoder (the N in NthDerivative).
// n must be within [0,10].
func newNthDerivativeCoder(n int) *nthDerivativeCoder {
c := &nthDerivativeCoder{n: n}
if n < 0 || n > len(c.memory) {
panic("unsupported n. Must be within [0,10].")
}
return c
}
func (c *nthDerivativeCoder) encode(k int32) int32 {
for i := 0; i < c.m; i++ {
delta := k - c.memory[i]
c.memory[i] = k
k = delta
}
if c.m < c.n {
c.memory[c.m] = k
c.m++
}
return k
}
func (c *nthDerivativeCoder) decode(k int32) int32 {
if c.m < c.n {
c.m++
}
for i := c.m - 1; i >= 0; i-- {
c.memory[i] += k
k = c.memory[i]
}
return k
}

View file

@ -1,18 +1,16 @@
/*
Copyright 2016 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2016 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
@ -118,8 +116,8 @@ func (p PaddedCell) Level() int {
// Center returns the center of this cell.
func (p PaddedCell) Center() Point {
ijSize := sizeIJ(p.level)
si := uint64(2*p.iLo + ijSize)
ti := uint64(2*p.jLo + ijSize)
si := uint32(2*p.iLo + ijSize)
ti := uint32(2*p.jLo + ijSize)
return Point{faceSiTiToXYZ(p.id.Face(), si, ti).Normalize()}
}
@ -130,8 +128,8 @@ func (p *PaddedCell) Middle() r2.Rect {
// time (i.e., for cells where the recursion terminates).
if p.middle.IsEmpty() {
ijSize := sizeIJ(p.level)
u := stToUV(siTiToST(uint64(2*p.iLo + ijSize)))
v := stToUV(siTiToST(uint64(2*p.jLo + ijSize)))
u := stToUV(siTiToST(uint32(2*p.iLo + ijSize)))
v := stToUV(siTiToST(uint32(2*p.jLo + ijSize)))
p.middle = r2.Rect{
r1.Interval{u - p.padding, u + p.padding},
r1.Interval{v - p.padding, v + p.padding},
@ -164,7 +162,7 @@ func (p PaddedCell) EntryVertex() Point {
i += ijSize
j += ijSize
}
return Point{faceSiTiToXYZ(p.id.Face(), uint64(2*i), uint64(2*j)).Normalize()}
return Point{faceSiTiToXYZ(p.id.Face(), uint32(2*i), uint32(2*j)).Normalize()}
}
// ExitVertex returns the vertex where the space-filling curve exits this cell.
@ -179,7 +177,7 @@ func (p PaddedCell) ExitVertex() Point {
} else {
j += ijSize
}
return Point{faceSiTiToXYZ(p.id.Face(), uint64(2*i), uint64(2*j)).Normalize()}
return Point{faceSiTiToXYZ(p.id.Face(), uint32(2*i), uint32(2*j)).Normalize()}
}
// ShrinkToFit returns the smallest CellID that contains all descendants of this
@ -205,8 +203,8 @@ func (p *PaddedCell) ShrinkToFit(rect r2.Rect) CellID {
}
ijSize := sizeIJ(p.level)
if rect.X.Contains(stToUV(siTiToST(uint64(2*p.iLo+ijSize)))) ||
rect.Y.Contains(stToUV(siTiToST(uint64(2*p.jLo+ijSize)))) {
if rect.X.Contains(stToUV(siTiToST(uint32(2*p.iLo+ijSize)))) ||
rect.Y.Contains(stToUV(siTiToST(uint32(2*p.jLo+ijSize)))) {
return p.id
}

View file

@ -1,23 +1,24 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
import (
"fmt"
"io"
"math"
"sort"
"github.com/golang/geo/r3"
"github.com/golang/geo/s1"
@ -29,6 +30,18 @@ type Point struct {
r3.Vector
}
// sortPoints sorts the slice of Points in place.
func sortPoints(e []Point) {
sort.Sort(points(e))
}
// points implements the Sort interface for slices of Point.
type points []Point
func (p points) Len() int { return len(p) }
func (p points) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p points) Less(i, j int) bool { return p[i].Cmp(p[j].Vector) == -1 }
// PointFromCoords creates a new normalized point from coordinates.
//
// This always returns a valid point. If the given coordinates can not be normalized
@ -160,11 +173,7 @@ func PointArea(a, b, c Point) float64 {
dmin := s - math.Max(sa, math.Max(sb, sc))
if dmin < 1e-2*s*s*s*s*s {
// This triangle is skinny enough to use Girard's formula.
ab := a.PointCross(b)
bc := b.PointCross(c)
ac := a.PointCross(c)
area := math.Max(0.0, float64(ab.Angle(ac.Vector)-ab.Angle(bc.Vector)+bc.Angle(ac.Vector)))
area := GirardArea(a, b, c)
if dmin < s*0.1*area {
return area
}
@ -176,6 +185,37 @@ func PointArea(a, b, c Point) float64 {
math.Tan(0.5*(s-sb))*math.Tan(0.5*(s-sc)))))
}
// GirardArea returns the area of the triangle computed using Girard's formula.
// All points should be unit length, and no two points should be antipodal.
//
// This method is about twice as fast as PointArea() but has poor relative
// accuracy for small triangles. The maximum error is about 5e-15 (about
// 0.25 square meters on the Earth's surface) and the average error is about
// 1e-15. These bounds apply to triangles of any size, even as the maximum
// edge length of the triangle approaches 180 degrees. But note that for
// such triangles, tiny perturbations of the input points can change the
// true mathematical area dramatically.
func GirardArea(a, b, c Point) float64 {
// This is equivalent to the usual Girard's formula but is slightly more
// accurate, faster to compute, and handles a == b == c without a special
// case. PointCross is necessary to get good accuracy when two of
// the input points are very close together.
ab := a.PointCross(b)
bc := b.PointCross(c)
ac := a.PointCross(c)
area := float64(ab.Angle(ac.Vector) - ab.Angle(bc.Vector) + bc.Angle(ac.Vector))
if area < 0 {
area = 0
}
return area
}
// SignedArea returns a positive value for counterclockwise triangles and a negative
// value otherwise (similar to PointArea).
func SignedArea(a, b, c Point) float64 {
return float64(RobustSign(a, b, c)) * PointArea(a, b, c)
}
// TrueCentroid returns the true centroid of the spherical triangle ABC multiplied by the
// signed area of spherical triangle ABC. The result is not normalized.
// The reasons for multiplying by the signed area are (1) this is the quantity
@ -307,13 +347,97 @@ func (p Point) ContainsPoint(other Point) bool {
return p.Contains(other)
}
// CellUnionBound computes a covering of the Point.
func (p Point) CellUnionBound() []CellID {
return p.CapBound().CellUnionBound()
}
// Contains reports if this Point contains the other Point.
// (This method matches all other s2 types where the reflexive Contains
// method does not contain the type's name.)
func (p Point) Contains(other Point) bool { return p == other }
// TODO: Differences from C++
// Rotate
// Angle
// TurnAngle
// SignedArea
// Encode encodes the Point.
func (p Point) Encode(w io.Writer) error {
e := &encoder{w: w}
p.encode(e)
return e.err
}
func (p Point) encode(e *encoder) {
e.writeInt8(encodingVersion)
e.writeFloat64(p.X)
e.writeFloat64(p.Y)
e.writeFloat64(p.Z)
}
// Decode decodes the Point.
func (p *Point) Decode(r io.Reader) error {
d := &decoder{r: asByteReader(r)}
p.decode(d)
return d.err
}
func (p *Point) decode(d *decoder) {
version := d.readInt8()
if d.err != nil {
return
}
if version != encodingVersion {
d.err = fmt.Errorf("only version %d is supported", encodingVersion)
return
}
p.X = d.readFloat64()
p.Y = d.readFloat64()
p.Z = d.readFloat64()
}
// Angle returns the interior angle at the vertex B in the triangle ABC. The
// return value is always in the range [0, pi]. All points should be
// normalized. Ensures that Angle(a,b,c) == Angle(c,b,a) for all a,b,c.
//
// The angle is undefined if A or C is diametrically opposite from B, and
// becomes numerically unstable as the length of edge AB or BC approaches
// 180 degrees.
func Angle(a, b, c Point) s1.Angle {
return a.PointCross(b).Angle(c.PointCross(b).Vector)
}
// TurnAngle returns the exterior angle at vertex B in the triangle ABC. The
// return value is positive if ABC is counterclockwise and negative otherwise.
// If you imagine an ant walking from A to B to C, this is the angle that the
// ant turns at vertex B (positive = left = CCW, negative = right = CW).
// This quantity is also known as the "geodesic curvature" at B.
//
// Ensures that TurnAngle(a,b,c) == -TurnAngle(c,b,a) for all distinct
// a,b,c. The result is undefined if (a == b || b == c), but is either
// -Pi or Pi if (a == c). All points should be normalized.
func TurnAngle(a, b, c Point) s1.Angle {
// We use PointCross to get good accuracy when two points are very
// close together, and RobustSign to ensure that the sign is correct for
// turns that are close to 180 degrees.
angle := a.PointCross(b).Angle(b.PointCross(c).Vector)
// Don't return RobustSign * angle because it is legal to have (a == c).
if RobustSign(a, b, c) == CounterClockwise {
return angle
}
return -angle
}
// Rotate the given point about the given axis by the given angle. p and
// axis must be unit length; angle has no restrictions (e.g., it can be
// positive, negative, greater than 360 degrees, etc).
func Rotate(p, axis Point, angle s1.Angle) Point {
// Let M be the plane through P that is perpendicular to axis, and let
// center be the point where M intersects axis. We construct a
// right-handed orthogonal frame (dx, dy, center) such that dx is the
// vector from center to P, and dy has the same length as dx. The
// result can then be expressed as (cos(angle)*dx + sin(angle)*dy + center).
center := axis.Mul(p.Dot(axis.Vector))
dx := p.Sub(center)
dy := axis.Cross(p.Vector)
// Mathematically the result is unit length, but normalization is necessary
// to ensure that numerical errors don't accumulate.
return Point{dx.Mul(math.Cos(angle.Radians())).Add(dy.Mul(math.Sin(angle.Radians()))).Add(center).Normalize()}
}

319
vendor/github.com/golang/geo/s2/pointcompression.go generated vendored Normal file
View file

@ -0,0 +1,319 @@
// Copyright 2017 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
import (
"errors"
"fmt"
"github.com/golang/geo/r3"
)
// maxEncodedVertices is the maximum number of vertices, in a row, to be encoded or decoded.
// On decode, this defends against malicious encodings that try and have us exceed RAM.
const maxEncodedVertices = 50000000
// xyzFaceSiTi represents the The XYZ and face,si,ti coordinates of a Point
// and, if this point is equal to the center of a Cell, the level of this cell
// (-1 otherwise). This is used for Loops and Polygons to store data in a more
// compressed format.
type xyzFaceSiTi struct {
xyz Point
face int
si, ti uint32
level int
}
const derivativeEncodingOrder = 2
func appendFace(faces []faceRun, face int) []faceRun {
if len(faces) == 0 || faces[len(faces)-1].face != face {
return append(faces, faceRun{face, 1})
}
faces[len(faces)-1].count++
return faces
}
// encodePointsCompressed uses an optimized compressed format to encode the given values.
func encodePointsCompressed(e *encoder, vertices []xyzFaceSiTi, level int) {
var faces []faceRun
for _, v := range vertices {
faces = appendFace(faces, v.face)
}
encodeFaces(e, faces)
type piQi struct {
pi, qi uint32
}
verticesPiQi := make([]piQi, len(vertices))
for i, v := range vertices {
verticesPiQi[i] = piQi{siTitoPiQi(v.si, level), siTitoPiQi(v.ti, level)}
}
piCoder, qiCoder := newNthDerivativeCoder(derivativeEncodingOrder), newNthDerivativeCoder(derivativeEncodingOrder)
for i, v := range verticesPiQi {
f := encodePointCompressed
if i == 0 {
// The first point will be just the (pi, qi) coordinates
// of the Point. NthDerivativeCoder will not save anything
// in that case, so we encode in fixed format rather than varint
// to avoid the varint overhead.
f = encodeFirstPointFixedLength
}
f(e, v.pi, v.qi, level, piCoder, qiCoder)
}
var offCenter []int
for i, v := range vertices {
if v.level != level {
offCenter = append(offCenter, i)
}
}
e.writeUvarint(uint64(len(offCenter)))
for _, idx := range offCenter {
e.writeUvarint(uint64(idx))
e.writeFloat64(vertices[idx].xyz.X)
e.writeFloat64(vertices[idx].xyz.Y)
e.writeFloat64(vertices[idx].xyz.Z)
}
}
func encodeFirstPointFixedLength(e *encoder, pi, qi uint32, level int, piCoder, qiCoder *nthDerivativeCoder) {
// Do not ZigZagEncode the first point, since it cannot be negative.
codedPi, codedQi := piCoder.encode(int32(pi)), qiCoder.encode(int32(qi))
// Interleave to reduce overhead from two partial bytes to one.
interleaved := interleaveUint32(uint32(codedPi), uint32(codedQi))
// Write as little endian.
bytesRequired := (level + 7) / 8 * 2
for i := 0; i < bytesRequired; i++ {
e.writeUint8(uint8(interleaved))
interleaved >>= 8
}
}
// encodePointCompressed encodes points into e.
// Given a sequence of Points assumed to be the center of level-k cells,
// compresses it into a stream using the following method:
// - decompose the points into (face, si, ti) tuples.
// - run-length encode the faces, combining face number and count into a
// varint32. See the faceRun struct.
// - right shift the (si, ti) to remove the part that's constant for all cells
// of level-k. The result is called the (pi, qi) space.
// - 2nd derivative encode the pi and qi sequences (linear prediction)
// - zig-zag encode all derivative values but the first, which cannot be
// negative
// - interleave the zig-zag encoded values
// - encode the first interleaved value in a fixed length encoding
// (varint would make this value larger)
// - encode the remaining interleaved values as varint64s, as the
// derivative encoding should make the values small.
// In addition, provides a lossless method to compress a sequence of points even
// if some points are not the center of level-k cells. These points are stored
// exactly, using 3 double precision values, after the above encoded string,
// together with their index in the sequence (this leads to some redundancy - it
// is expected that only a small fraction of the points are not cell centers).
//
// To encode leaf cells, this requires 8 bytes for the first vertex plus
// an average of 3.8 bytes for each additional vertex, when computed on
// Google's geographic repository.
func encodePointCompressed(e *encoder, pi, qi uint32, level int, piCoder, qiCoder *nthDerivativeCoder) {
// ZigZagEncode, as varint requires the maximum number of bytes for
// negative numbers.
zzPi := zigzagEncode(piCoder.encode(int32(pi)))
zzQi := zigzagEncode(qiCoder.encode(int32(qi)))
// Interleave to reduce overhead from two partial bytes to one.
interleaved := interleaveUint32(zzPi, zzQi)
e.writeUvarint(interleaved)
}
type faceRun struct {
face, count int
}
func decodeFaceRun(d *decoder) faceRun {
faceAndCount := d.readUvarint()
ret := faceRun{
face: int(faceAndCount % numFaces),
count: int(faceAndCount / numFaces),
}
if ret.count <= 0 && d.err == nil {
d.err = errors.New("non-positive count for face run")
}
return ret
}
func decodeFaces(numVertices int, d *decoder) []faceRun {
var frs []faceRun
for nparsed := 0; nparsed < numVertices; {
fr := decodeFaceRun(d)
if d.err != nil {
return nil
}
frs = append(frs, fr)
nparsed += fr.count
}
return frs
}
// encodeFaceRun encodes each faceRun as a varint64 with value numFaces * count + face.
func encodeFaceRun(e *encoder, fr faceRun) {
// It isn't necessary to encode the number of faces left for the last run,
// but since this would only help if there were more than 21 faces, it will
// be a small overall savings, much smaller than the bound encoding.
coded := numFaces*uint64(fr.count) + uint64(fr.face)
e.writeUvarint(coded)
}
func encodeFaces(e *encoder, frs []faceRun) {
for _, fr := range frs {
encodeFaceRun(e, fr)
}
}
type facesIterator struct {
faces []faceRun
// How often have we yet shown the current face?
numCurrentFaceShown int
curFace int
}
func (fi *facesIterator) next() (ok bool) {
if len(fi.faces) == 0 {
return false
}
fi.curFace = fi.faces[0].face
fi.numCurrentFaceShown++
// Advance fs if needed.
if fi.faces[0].count <= fi.numCurrentFaceShown {
fi.faces = fi.faces[1:]
fi.numCurrentFaceShown = 0
}
return true
}
func decodePointsCompressed(d *decoder, level int, target []Point) {
faces := decodeFaces(len(target), d)
piCoder := newNthDerivativeCoder(derivativeEncodingOrder)
qiCoder := newNthDerivativeCoder(derivativeEncodingOrder)
iter := facesIterator{faces: faces}
for i := range target {
decodeFn := decodePointCompressed
if i == 0 {
decodeFn = decodeFirstPointFixedLength
}
pi, qi := decodeFn(d, level, piCoder, qiCoder)
if ok := iter.next(); !ok && d.err == nil {
d.err = fmt.Errorf("ran out of faces at target %d", i)
return
}
target[i] = Point{facePiQitoXYZ(iter.curFace, pi, qi, level)}
}
numOffCenter := int(d.readUvarint())
if d.err != nil {
return
}
if numOffCenter > len(target) {
d.err = fmt.Errorf("numOffCenter = %d, should be at most len(target) = %d", numOffCenter, len(target))
return
}
for i := 0; i < numOffCenter; i++ {
idx := int(d.readUvarint())
if d.err != nil {
return
}
if idx >= len(target) {
d.err = fmt.Errorf("off center index = %d, should be < len(target) = %d", idx, len(target))
return
}
target[idx].X = d.readFloat64()
target[idx].Y = d.readFloat64()
target[idx].Z = d.readFloat64()
}
}
func decodeFirstPointFixedLength(d *decoder, level int, piCoder, qiCoder *nthDerivativeCoder) (pi, qi uint32) {
bytesToRead := (level + 7) / 8 * 2
var interleaved uint64
for i := 0; i < bytesToRead; i++ {
rr := d.readUint8()
interleaved |= (uint64(rr) << uint(i*8))
}
piCoded, qiCoded := deinterleaveUint32(interleaved)
return uint32(piCoder.decode(int32(piCoded))), uint32(qiCoder.decode(int32(qiCoded)))
}
func zigzagEncode(x int32) uint32 {
return (uint32(x) << 1) ^ uint32(x>>31)
}
func zigzagDecode(x uint32) int32 {
return int32((x >> 1) ^ uint32((int32(x&1)<<31)>>31))
}
func decodePointCompressed(d *decoder, level int, piCoder, qiCoder *nthDerivativeCoder) (pi, qi uint32) {
interleavedZigZagEncodedDerivPiQi := d.readUvarint()
piZigzag, qiZigzag := deinterleaveUint32(interleavedZigZagEncodedDerivPiQi)
return uint32(piCoder.decode(zigzagDecode(piZigzag))), uint32(qiCoder.decode(zigzagDecode(qiZigzag)))
}
// We introduce a new coordinate system (pi, qi), which is (si, ti)
// with the bits that are constant for cells of that level shifted
// off to the right.
// si = round(s * 2^31)
// pi = si >> (31 - level)
// = floor(s * 2^level)
// If the point has been snapped to the level, the bits that are
// shifted off will be a 1 in the msb, then 0s after that, so the
// fractional part discarded by the cast is (close to) 0.5.
// stToPiQi returns the value transformed to the PiQi coordinate space.
func stToPiQi(s float64, level uint) uint32 {
return uint32(s * float64(int(1)<<level))
}
// siTiToPiQi returns the value transformed into the PiQi coordinate spade.
// encodeFirstPointFixedLength encodes the return value using level bits,
// so we clamp si to the range [0, 2**level - 1] before trying to encode
// it. This is okay because if si == maxSiTi, then it is not a cell center
// anyway and will be encoded separately as an off-center point.
func siTitoPiQi(siTi uint32, level int) uint32 {
s := uint(siTi)
const max = maxSiTi - 1
if s > max {
s = max
}
return uint32(s >> (maxLevel + 1 - uint(level)))
}
// piQiToST returns the value transformed to ST space.
func piQiToST(pi uint32, level int) float64 {
// We want to recover the position at the center of the cell. If the point
// was snapped to the center of the cell, then math.Modf(s * 2^level) == 0.5.
// Inverting STtoPiQi gives:
// s = (pi + 0.5) / 2^level.
return (float64(pi) + 0.5) / float64(int(1)<<uint(level))
}
func facePiQitoXYZ(face int, pi, qi uint32, level int) r3.Vector {
return faceUVToXYZ(face, stToUV(piQiToST(pi, level)), stToUV(piQiToST(qi, level))).Normalize()
}

File diff suppressed because it is too large Load diff

View file

@ -1,22 +1,22 @@
/*
Copyright 2016 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2016 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
import (
"fmt"
"io"
"math"
"github.com/golang/geo/s1"
@ -73,8 +73,8 @@ func (p *Polyline) Centroid() Point {
return centroid
}
// Equals reports whether the given Polyline is exactly the same as this one.
func (p *Polyline) Equals(b *Polyline) bool {
// Equal reports whether the given Polyline is exactly the same as this one.
func (p *Polyline) Equal(b *Polyline) bool {
if len(*p) != len(*b) {
return false
}
@ -146,6 +146,11 @@ func (p *Polyline) ContainsPoint(point Point) bool {
return false
}
// CellUnionBound computes a covering of the Polyline.
func (p *Polyline) CellUnionBound() []CellID {
return p.CapBound().CellUnionBound()
}
// NumEdges returns the number of edges in this shape.
func (p *Polyline) NumEdges() int {
if len(*p) == 0 {
@ -155,28 +160,8 @@ func (p *Polyline) NumEdges() int {
}
// Edge returns endpoints for the given edge index.
func (p *Polyline) Edge(i int) (a, b Point) {
return (*p)[i], (*p)[i+1]
}
// dimension returns the dimension of the geometry represented by this Polyline.
func (p *Polyline) dimension() dimension { return polylineGeometry }
// numChains reports the number of contiguous edge chains in this Polyline.
func (p *Polyline) numChains() int {
if p.NumEdges() >= 1 {
return 1
}
return 0
}
// chainStart returns the id of the first edge in the i-th edge chain in this Polyline.
func (p *Polyline) chainStart(i int) int {
if i == 0 {
return 0
}
return p.NumEdges()
func (p *Polyline) Edge(i int) Edge {
return Edge{(*p)[i], (*p)[i+1]}
}
// HasInterior returns false as Polylines are not closed.
@ -184,9 +169,213 @@ func (p *Polyline) HasInterior() bool {
return false
}
// ContainsOrigin returns false because there is no interior to contain s2.Origin.
func (p *Polyline) ContainsOrigin() bool {
return false
// ReferencePoint returns the default reference point with negative containment because Polylines are not closed.
func (p *Polyline) ReferencePoint() ReferencePoint {
return OriginReferencePoint(false)
}
// NumChains reports the number of contiguous edge chains in this Polyline.
func (p *Polyline) NumChains() int {
return minInt(1, p.NumEdges())
}
// Chain returns the i-th edge Chain in the Shape.
func (p *Polyline) Chain(chainID int) Chain {
return Chain{0, p.NumEdges()}
}
// ChainEdge returns the j-th edge of the i-th edge Chain.
func (p *Polyline) ChainEdge(chainID, offset int) Edge {
return Edge{(*p)[offset], (*p)[offset+1]}
}
// ChainPosition returns a pair (i, j) such that edgeID is the j-th edge
func (p *Polyline) ChainPosition(edgeID int) ChainPosition {
return ChainPosition{0, edgeID}
}
// dimension returns the dimension of the geometry represented by this Polyline.
func (p *Polyline) dimension() dimension { return polylineGeometry }
// findEndVertex reports the maximal end index such that the line segment between
// the start index and this one such that the line segment between these two
// vertices passes within the given tolerance of all interior vertices, in order.
func findEndVertex(p Polyline, tolerance s1.Angle, index int) int {
// The basic idea is to keep track of the "pie wedge" of angles
// from the starting vertex such that a ray from the starting
// vertex at that angle will pass through the discs of radius
// tolerance centered around all vertices processed so far.
//
// First we define a coordinate frame for the tangent and normal
// spaces at the starting vertex. Essentially this means picking
// three orthonormal vectors X,Y,Z such that X and Y span the
// tangent plane at the starting vertex, and Z is up. We use
// the coordinate frame to define a mapping from 3D direction
// vectors to a one-dimensional ray angle in the range (-π,
// π]. The angle of a direction vector is computed by
// transforming it into the X,Y,Z basis, and then calculating
// atan2(y,x). This mapping allows us to represent a wedge of
// angles as a 1D interval. Since the interval wraps around, we
// represent it as an Interval, i.e. an interval on the unit
// circle.
origin := p[index]
frame := getFrame(origin)
// As we go along, we keep track of the current wedge of angles
// and the distance to the last vertex (which must be
// non-decreasing).
currentWedge := s1.FullInterval()
var lastDistance s1.Angle
for index++; index < len(p); index++ {
candidate := p[index]
distance := origin.Distance(candidate)
// We don't allow simplification to create edges longer than
// 90 degrees, to avoid numeric instability as lengths
// approach 180 degrees. We do need to allow for original
// edges longer than 90 degrees, though.
if distance > math.Pi/2 && lastDistance > 0 {
break
}
// Vertices must be in increasing order along the ray, except
// for the initial disc around the origin.
if distance < lastDistance && lastDistance > tolerance {
break
}
lastDistance = distance
// Points that are within the tolerance distance of the origin
// do not constrain the ray direction, so we can ignore them.
if distance <= tolerance {
continue
}
// If the current wedge of angles does not contain the angle
// to this vertex, then stop right now. Note that the wedge
// of possible ray angles is not necessarily empty yet, but we
// can't continue unless we are willing to backtrack to the
// last vertex that was contained within the wedge (since we
// don't create new vertices). This would be more complicated
// and also make the worst-case running time more than linear.
direction := toFrame(frame, candidate)
center := math.Atan2(direction.Y, direction.X)
if !currentWedge.Contains(center) {
break
}
// To determine how this vertex constrains the possible ray
// angles, consider the triangle ABC where A is the origin, B
// is the candidate vertex, and C is one of the two tangent
// points between A and the spherical cap of radius
// tolerance centered at B. Then from the spherical law of
// sines, sin(a)/sin(A) = sin(c)/sin(C), where a and c are
// the lengths of the edges opposite A and C. In our case C
// is a 90 degree angle, therefore A = asin(sin(a) / sin(c)).
// Angle A is the half-angle of the allowable wedge.
halfAngle := math.Asin(math.Sin(tolerance.Radians()) / math.Sin(distance.Radians()))
target := s1.IntervalFromPointPair(center, center).Expanded(halfAngle)
currentWedge = currentWedge.Intersection(target)
}
// We break out of the loop when we reach a vertex index that
// can't be included in the line segment, so back up by one
// vertex.
return index - 1
}
// SubsampleVertices returns a subsequence of vertex indices such that the
// polyline connecting these vertices is never further than the given tolerance from
// the original polyline. Provided the first and last vertices are distinct,
// they are always preserved; if they are not, the subsequence may contain
// only a single index.
//
// Some useful properties of the algorithm:
//
// - It runs in linear time.
//
// - The output always represents a valid polyline. In particular, adjacent
// output vertices are never identical or antipodal.
//
// - The method is not optimal, but it tends to produce 2-3% fewer
// vertices than the Douglas-Peucker algorithm with the same tolerance.
//
// - The output is parametrically equivalent to the original polyline to
// within the given tolerance. For example, if a polyline backtracks on
// itself and then proceeds onwards, the backtracking will be preserved
// (to within the given tolerance). This is different than the
// Douglas-Peucker algorithm which only guarantees geometric equivalence.
func (p *Polyline) SubsampleVertices(tolerance s1.Angle) []int {
var result []int
if len(*p) < 1 {
return result
}
result = append(result, 0)
clampedTolerance := s1.Angle(math.Max(tolerance.Radians(), 0))
for index := 0; index+1 < len(*p); {
nextIndex := findEndVertex(*p, clampedTolerance, index)
// Don't create duplicate adjacent vertices.
if (*p)[nextIndex] != (*p)[index] {
result = append(result, nextIndex)
}
index = nextIndex
}
return result
}
// Encode encodes the Polyline.
func (p Polyline) Encode(w io.Writer) error {
e := &encoder{w: w}
p.encode(e)
return e.err
}
func (p Polyline) encode(e *encoder) {
e.writeInt8(encodingVersion)
e.writeUint32(uint32(len(p)))
for _, v := range p {
e.writeFloat64(v.X)
e.writeFloat64(v.Y)
e.writeFloat64(v.Z)
}
}
// Decode decodes the polyline.
func (p *Polyline) Decode(r io.Reader) error {
d := decoder{r: asByteReader(r)}
p.decode(d)
return d.err
}
func (p *Polyline) decode(d decoder) {
version := d.readInt8()
if d.err != nil {
return
}
if int(version) != int(encodingVersion) {
d.err = fmt.Errorf("can't decode version %d; my version: %d", version, encodingVersion)
return
}
nvertices := d.readUint32()
if d.err != nil {
return
}
if nvertices > maxEncodedVertices {
d.err = fmt.Errorf("too many vertices (%d; max is %d)", nvertices, maxEncodedVertices)
return
}
*p = make([]Point, nvertices)
for i := range *p {
(*p)[i].X = d.readFloat64()
(*p)[i].Y = d.readFloat64()
(*p)[i].Z = d.readFloat64()
}
}
// TODO(roberts): Differences from C++.
@ -195,8 +384,7 @@ func (p *Polyline) ContainsOrigin() bool {
// Interpolate/UnInterpolate
// Project
// IsPointOnRight
// Intersects
// Intersects(Polyline)
// Reverse
// SubsampleVertices
// ApproxEqual
// NearlyCoversPolyline

View file

@ -1,18 +1,16 @@
/*
Copyright 2016 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2016 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
@ -27,6 +25,7 @@ package s2
import (
"math"
"math/big"
"github.com/golang/geo/r3"
)
@ -219,20 +218,378 @@ func expensiveSign(a, b, c Point) Direction {
// the three points are truly collinear (e.g., three points on the equator).
detSign := stableSign(a, b, c)
if detSign != Indeterminate {
return detSign
return Direction(detSign)
}
// Otherwise fall back to exact arithmetic and symbolic permutations.
return exactSign(a, b, c, false)
return exactSign(a, b, c, true)
}
// exactSign reports the direction sign of the points using exact precision arithmetic.
// exactSign reports the direction sign of the points computed using high-precision
// arithmetic and/or symbolic perturbations.
func exactSign(a, b, c Point, perturb bool) Direction {
// In the C++ version, the final computation is performed using OpenSSL's
// Bignum exact precision math library. The existence of an equivalent
// library in Go is indeterminate. In C++, using the exact precision library
// to solve this stage is ~300x slower than the above checks.
// TODO(roberts): Select and incorporate an appropriate Go exact precision
// floating point library for the remaining calculations.
return Indeterminate
// Sort the three points in lexicographic order, keeping track of the sign
// of the permutation. (Each exchange inverts the sign of the determinant.)
permSign := Direction(CounterClockwise)
pa := &a
pb := &b
pc := &c
if pa.Cmp(pb.Vector) > 0 {
pa, pb = pb, pa
permSign = -permSign
}
if pb.Cmp(pc.Vector) > 0 {
pb, pc = pc, pb
permSign = -permSign
}
if pa.Cmp(pb.Vector) > 0 {
pa, pb = pb, pa
permSign = -permSign
}
// Construct multiple-precision versions of the sorted points and compute
// their precise 3x3 determinant.
xa := r3.PreciseVectorFromVector(pa.Vector)
xb := r3.PreciseVectorFromVector(pb.Vector)
xc := r3.PreciseVectorFromVector(pc.Vector)
xbCrossXc := xb.Cross(xc)
det := xa.Dot(xbCrossXc)
// The precision of big.Float is high enough that the result should always
// be exact enough (no rounding was performed).
// If the exact determinant is non-zero, we're done.
detSign := Direction(det.Sign())
if detSign == Indeterminate && perturb {
// Otherwise, we need to resort to symbolic perturbations to resolve the
// sign of the determinant.
detSign = symbolicallyPerturbedSign(xa, xb, xc, xbCrossXc)
}
return permSign * Direction(detSign)
}
// symbolicallyPerturbedSign reports the sign of the determinant of three points
// A, B, C under a model where every possible Point is slightly perturbed by
// a unique infinitesmal amount such that no three perturbed points are
// collinear and no four points are coplanar. The perturbations are so small
// that they do not change the sign of any determinant that was non-zero
// before the perturbations, and therefore can be safely ignored unless the
// determinant of three points is exactly zero (using multiple-precision
// arithmetic). This returns CounterClockwise or Clockwise according to the
// sign of the determinant after the symbolic perturbations are taken into account.
//
// Since the symbolic perturbation of a given point is fixed (i.e., the
// perturbation is the same for all calls to this method and does not depend
// on the other two arguments), the results of this method are always
// self-consistent. It will never return results that would correspond to an
// impossible configuration of non-degenerate points.
//
// This requires that the 3x3 determinant of A, B, C must be exactly zero.
// And the points must be distinct, with A < B < C in lexicographic order.
//
// Reference:
// "Simulation of Simplicity" (Edelsbrunner and Muecke, ACM Transactions on
// Graphics, 1990).
//
func symbolicallyPerturbedSign(a, b, c, bCrossC r3.PreciseVector) Direction {
// This method requires that the points are sorted in lexicographically
// increasing order. This is because every possible Point has its own
// symbolic perturbation such that if A < B then the symbolic perturbation
// for A is much larger than the perturbation for B.
//
// Alternatively, we could sort the points in this method and keep track of
// the sign of the permutation, but it is more efficient to do this before
// converting the inputs to the multi-precision representation, and this
// also lets us re-use the result of the cross product B x C.
//
// Every input coordinate x[i] is assigned a symbolic perturbation dx[i].
// We then compute the sign of the determinant of the perturbed points,
// i.e.
// | a.X+da.X a.Y+da.Y a.Z+da.Z |
// | b.X+db.X b.Y+db.Y b.Z+db.Z |
// | c.X+dc.X c.Y+dc.Y c.Z+dc.Z |
//
// The perturbations are chosen such that
//
// da.Z > da.Y > da.X > db.Z > db.Y > db.X > dc.Z > dc.Y > dc.X
//
// where each perturbation is so much smaller than the previous one that we
// don't even need to consider it unless the coefficients of all previous
// perturbations are zero. In fact, it is so small that we don't need to
// consider it unless the coefficient of all products of the previous
// perturbations are zero. For example, we don't need to consider the
// coefficient of db.Y unless the coefficient of db.Z *da.X is zero.
//
// The follow code simply enumerates the coefficients of the perturbations
// (and products of perturbations) that appear in the determinant above, in
// order of decreasing perturbation magnitude. The first non-zero
// coefficient determines the sign of the result. The easiest way to
// enumerate the coefficients in the correct order is to pretend that each
// perturbation is some tiny value "eps" raised to a power of two:
//
// eps** 1 2 4 8 16 32 64 128 256
// da.Z da.Y da.X db.Z db.Y db.X dc.Z dc.Y dc.X
//
// Essentially we can then just count in binary and test the corresponding
// subset of perturbations at each step. So for example, we must test the
// coefficient of db.Z*da.X before db.Y because eps**12 > eps**16.
//
// Of course, not all products of these perturbations appear in the
// determinant above, since the determinant only contains the products of
// elements in distinct rows and columns. Thus we don't need to consider
// da.Z*da.Y, db.Y *da.Y, etc. Furthermore, sometimes different pairs of
// perturbations have the same coefficient in the determinant; for example,
// da.Y*db.X and db.Y*da.X have the same coefficient (c.Z). Therefore
// we only need to test this coefficient the first time we encounter it in
// the binary order above (which will be db.Y*da.X).
//
// The sequence of tests below also appears in Table 4-ii of the paper
// referenced above, if you just want to look it up, with the following
// translations: [a,b,c] -> [i,j,k] and [0,1,2] -> [1,2,3]. Also note that
// some of the signs are different because the opposite cross product is
// used (e.g., B x C rather than C x B).
detSign := bCrossC.Z.Sign() // da.Z
if detSign != 0 {
return Direction(detSign)
}
detSign = bCrossC.Y.Sign() // da.Y
if detSign != 0 {
return Direction(detSign)
}
detSign = bCrossC.X.Sign() // da.X
if detSign != 0 {
return Direction(detSign)
}
detSign = new(big.Float).Sub(new(big.Float).Mul(c.X, a.Y), new(big.Float).Mul(c.Y, a.X)).Sign() // db.Z
if detSign != 0 {
return Direction(detSign)
}
detSign = c.X.Sign() // db.Z * da.Y
if detSign != 0 {
return Direction(detSign)
}
detSign = -(c.Y.Sign()) // db.Z * da.X
if detSign != 0 {
return Direction(detSign)
}
detSign = new(big.Float).Sub(new(big.Float).Mul(c.Z, a.X), new(big.Float).Mul(c.X, a.Z)).Sign() // db.Y
if detSign != 0 {
return Direction(detSign)
}
detSign = c.Z.Sign() // db.Y * da.X
if detSign != 0 {
return Direction(detSign)
}
// The following test is listed in the paper, but it is redundant because
// the previous tests guarantee that C == (0, 0, 0).
// (c.Y*a.Z - c.Z*a.Y).Sign() // db.X
detSign = new(big.Float).Sub(new(big.Float).Mul(a.X, b.Y), new(big.Float).Mul(a.Y, b.X)).Sign() // dc.Z
if detSign != 0 {
return Direction(detSign)
}
detSign = -(b.X.Sign()) // dc.Z * da.Y
if detSign != 0 {
return Direction(detSign)
}
detSign = b.Y.Sign() // dc.Z * da.X
if detSign != 0 {
return Direction(detSign)
}
detSign = a.X.Sign() // dc.Z * db.Y
if detSign != 0 {
return Direction(detSign)
}
return CounterClockwise // dc.Z * db.Y * da.X
}
// CompareDistances returns -1, 0, or +1 according to whether AX < BX, A == B,
// or AX > BX respectively. Distances are measured with respect to the positions
// of X, A, and B as though they were reprojected to lie exactly on the surface of
// the unit sphere. Furthermore, this method uses symbolic perturbations to
// ensure that the result is non-zero whenever A != B, even when AX == BX
// exactly, or even when A and B project to the same point on the sphere.
// Such results are guaranteed to be self-consistent, i.e. if AB < BC and
// BC < AC, then AB < AC.
func CompareDistances(x, a, b Point) int {
// We start by comparing distances using dot products (i.e., cosine of the
// angle), because (1) this is the cheapest technique, and (2) it is valid
// over the entire range of possible angles. (We can only use the sin^2
// technique if both angles are less than 90 degrees or both angles are
// greater than 90 degrees.)
sign := triageCompareCosDistances(x, a, b)
if sign != 0 {
return sign
}
// Optimization for (a == b) to avoid falling back to exact arithmetic.
if a == b {
return 0
}
// It is much better numerically to compare distances using cos(angle) if
// the distances are near 90 degrees and sin^2(angle) if the distances are
// near 0 or 180 degrees. We only need to check one of the two angles when
// making this decision because the fact that the test above failed means
// that angles "a" and "b" are very close together.
cosAX := a.Dot(x.Vector)
if cosAX > 1/math.Sqrt2 {
// Angles < 45 degrees.
sign = triageCompareSin2Distances(x, a, b)
} else if cosAX < -1/math.Sqrt2 {
// Angles > 135 degrees. sin^2(angle) is decreasing in this range.
sign = -triageCompareSin2Distances(x, a, b)
}
// C++ adds an additional check here using 80-bit floats.
// This is skipped in Go because we only have 32 and 64 bit floats.
if sign != 0 {
return sign
}
sign = exactCompareDistances(r3.PreciseVectorFromVector(x.Vector), r3.PreciseVectorFromVector(a.Vector), r3.PreciseVectorFromVector(b.Vector))
if sign != 0 {
return sign
}
return symbolicCompareDistances(x, a, b)
}
// cosDistance returns cos(XY) where XY is the angle between X and Y, and the
// maximum error amount in the result. This requires X and Y be normalized.
func cosDistance(x, y Point) (cos, err float64) {
cos = x.Dot(y.Vector)
return cos, 9.5*dblEpsilon*math.Abs(cos) + 1.5*dblEpsilon
}
// sin2Distance returns sin**2(XY), where XY is the angle between X and Y,
// and the maximum error amount in the result. This requires X and Y be normalized.
func sin2Distance(x, y Point) (sin2, err float64) {
// The (x-y).Cross(x+y) trick eliminates almost all of error due to x
// and y being not quite unit length. This method is extremely accurate
// for small distances; the *relative* error in the result is O(dblEpsilon) for
// distances as small as dblEpsilon.
n := x.Sub(y.Vector).Cross(x.Add(y.Vector))
sin2 = 0.25 * n.Norm2()
err = ((21+4*math.Sqrt(3))*dblEpsilon*sin2 +
32*math.Sqrt(3)*dblEpsilon*dblEpsilon*math.Sqrt(sin2) +
768*dblEpsilon*dblEpsilon*dblEpsilon*dblEpsilon)
return sin2, err
}
// triageCompareCosDistances returns -1, 0, or +1 according to whether AX < BX,
// A == B, or AX > BX by comparing the distances between them using cosDistance.
func triageCompareCosDistances(x, a, b Point) int {
cosAX, cosAXerror := cosDistance(a, x)
cosBX, cosBXerror := cosDistance(b, x)
diff := cosAX - cosBX
err := cosAXerror + cosBXerror
if diff > err {
return -1
}
if diff < -err {
return 1
}
return 0
}
// triageCompareSin2Distances returns -1, 0, or +1 according to whether AX < BX,
// A == B, or AX > BX by comparing the distances between them using sin2Distance.
func triageCompareSin2Distances(x, a, b Point) int {
sin2AX, sin2AXerror := sin2Distance(a, x)
sin2BX, sin2BXerror := sin2Distance(b, x)
diff := sin2AX - sin2BX
err := sin2AXerror + sin2BXerror
if diff > err {
return 1
}
if diff < -err {
return -1
}
return 0
}
// exactCompareDistances returns -1, 0, or 1 after comparing using the values as
// PreciseVectors.
func exactCompareDistances(x, a, b r3.PreciseVector) int {
// This code produces the same result as though all points were reprojected
// to lie exactly on the surface of the unit sphere. It is based on testing
// whether x.Dot(a.Normalize()) < x.Dot(b.Normalize()), reformulated
// so that it can be evaluated using exact arithmetic.
cosAX := x.Dot(a)
cosBX := x.Dot(b)
// If the two values have different signs, we need to handle that case now
// before squaring them below.
aSign := cosAX.Sign()
bSign := cosBX.Sign()
if aSign != bSign {
// If cos(AX) > cos(BX), then AX < BX.
if aSign > bSign {
return -1
}
return 1
}
cosAX2 := new(big.Float).Mul(cosAX, cosAX)
cosBX2 := new(big.Float).Mul(cosBX, cosBX)
cmp := new(big.Float).Sub(cosBX2.Mul(cosBX2, a.Norm2()), cosAX2.Mul(cosAX2, b.Norm2()))
return aSign * cmp.Sign()
}
// symbolicCompareDistances returns -1, 0, or +1 given three points such that AX == BX
// (exactly) according to whether AX < BX, AX == BX, or AX > BX after symbolic
// perturbations are taken into account.
func symbolicCompareDistances(x, a, b Point) int {
// Our symbolic perturbation strategy is based on the following model.
// Similar to "simulation of simplicity", we assign a perturbation to every
// point such that if A < B, then the symbolic perturbation for A is much,
// much larger than the symbolic perturbation for B. We imagine that
// rather than projecting every point to lie exactly on the unit sphere,
// instead each point is positioned on its own tiny pedestal that raises it
// just off the surface of the unit sphere. This means that the distance AX
// is actually the true distance AX plus the (symbolic) heights of the
// pedestals for A and X. The pedestals are infinitesmally thin, so they do
// not affect distance measurements except at the two endpoints. If several
// points project to exactly the same point on the unit sphere, we imagine
// that they are placed on separate pedestals placed close together, where
// the distance between pedestals is much, much less than the height of any
// pedestal. (There are a finite number of Points, and therefore a finite
// number of pedestals, so this is possible.)
//
// If A < B, then A is on a higher pedestal than B, and therefore AX > BX.
switch a.Cmp(b.Vector) {
case -1:
return 1
case 1:
return -1
default:
return 0
}
}
// TODO(roberts): Differences from C++
// CompareDistance
// CompareEdgeDistance
// CompareEdgeDirections
// EdgeCircumcenterSign
// GetVoronoiSiteExclusion
// GetClosestVertex
// TriageCompareLineSin2Distance
// TriageCompareLineCos2Distance
// TriageCompareLineDistance
// TriageCompareEdgeDistance
// ExactCompareLineDistance
// ExactCompareEdgeDistance
// TriageCompareEdgeDirections
// ExactCompareEdgeDirections
// ArePointsAntipodal
// ArePointsLinearlyDependent
// GetCircumcenter
// TriageEdgeCircumcenterSign
// ExactEdgeCircumcenterSign
// UnperturbedSign
// SymbolicEdgeCircumcenterSign
// ExactVoronoiSiteExclusion

203
vendor/github.com/golang/geo/s2/projections.go generated vendored Normal file
View file

@ -0,0 +1,203 @@
// Copyright 2018 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
import (
"math"
"github.com/golang/geo/r2"
"github.com/golang/geo/s1"
)
// Projection defines an interface for different ways of mapping between s2 and r2 Points.
// It can also define the coordinate wrapping behavior along each axis.
type Projection interface {
// Project converts a point on the sphere to a projected 2D point.
Project(p Point) r2.Point
// Unproject converts a projected 2D point to a point on the sphere.
//
// If wrapping is defined for a given axis (see below), then this method
// should accept any real number for the corresponding coordinate.
Unproject(p r2.Point) Point
// FromLatLng is a convenience function equivalent to Project(LatLngToPoint(ll)),
// but the implementation is more efficient.
FromLatLng(ll LatLng) r2.Point
// ToLatLng is a convenience function equivalent to LatLngFromPoint(Unproject(p)),
// but the implementation is more efficient.
ToLatLng(p r2.Point) LatLng
// Interpolate returns the point obtained by interpolating the given
// fraction of the distance along the line from A to B.
// Fractions < 0 or > 1 result in extrapolation instead.
Interpolate(f float64, a, b r2.Point) r2.Point
// WrapDistance reports the coordinate wrapping distance along each axis.
// If this value is non-zero for a given axis, the coordinates are assumed
// to "wrap" with the given period. For example, if WrapDistance.Y == 360
// then (x, y) and (x, y + 360) should map to the same Point.
//
// This information is used to ensure that edges takes the shortest path
// between two given points. For example, if coordinates represent
// (latitude, longitude) pairs in degrees and WrapDistance().Y == 360,
// then the edge (5:179, 5:-179) would be interpreted as spanning 2 degrees
// of longitude rather than 358 degrees.
//
// If a given axis does not wrap, its WrapDistance should be set to zero.
WrapDistance() r2.Point
}
// PlateCarreeProjection defines the "plate carree" (square plate) projection,
// which converts points on the sphere to (longitude, latitude) pairs.
// Coordinates can be scaled so that they represent radians, degrees, etc, but
// the projection is always centered around (latitude=0, longitude=0).
//
// Note that (x, y) coordinates are backwards compared to the usual (latitude,
// longitude) ordering, in order to match the usual convention for graphs in
// which "x" is horizontal and "y" is vertical.
type PlateCarreeProjection struct {
xWrap float64
toRadians float64 // Multiplier to convert coordinates to radians.
fromRadians float64 // Multiplier to convert coordinates from radians.
}
// NewPlateCarreeProjection constructs a plate carree projection where the
// x-coordinates (lng) span [-xScale, xScale] and the y coordinates (lat)
// span [-xScale/2, xScale/2]. For example if xScale==180 then the x
// range is [-180, 180] and the y range is [-90, 90].
//
// By default coordinates are expressed in radians, i.e. the x range is
// [-Pi, Pi] and the y range is [-Pi/2, Pi/2].
func NewPlateCarreeProjection(xScale float64) Projection {
return &PlateCarreeProjection{
xWrap: 2 * xScale,
toRadians: math.Pi / xScale,
fromRadians: xScale / math.Pi,
}
}
// Project converts a point on the sphere to a projected 2D point.
func (p *PlateCarreeProjection) Project(pt Point) r2.Point {
return p.FromLatLng(LatLngFromPoint(pt))
}
// Unproject converts a projected 2D point to a point on the sphere.
func (p *PlateCarreeProjection) Unproject(pt r2.Point) Point {
return PointFromLatLng(p.ToLatLng(pt))
}
// FromLatLng returns the LatLng projected into an R2 Point.
func (p *PlateCarreeProjection) FromLatLng(ll LatLng) r2.Point {
return r2.Point{
X: p.fromRadians * ll.Lng.Radians(),
Y: p.fromRadians * ll.Lat.Radians(),
}
}
// ToLatLng returns the LatLng projected from the given R2 Point.
func (p *PlateCarreeProjection) ToLatLng(pt r2.Point) LatLng {
return LatLng{
Lat: s1.Angle(p.fromRadians * pt.Y),
Lng: s1.Angle(p.fromRadians * math.Remainder(pt.X, p.xWrap)),
}
}
// Interpolate returns the point obtained by interpolating the given
// fraction of the distance along the line from A to B.
func (p *PlateCarreeProjection) Interpolate(f float64, a, b r2.Point) r2.Point {
return a.Mul(1 - f).Add(b.Mul(f))
}
// WrapDistance reports the coordinate wrapping distance along each axis.
func (p *PlateCarreeProjection) WrapDistance() r2.Point {
return r2.Point{p.xWrap, 0}
}
// MercatorProjection defines the spherical Mercator projection. Google Maps
// uses this projection together with WGS84 coordinates, in which case it is
// known as the "Web Mercator" projection (see Wikipedia). This class makes
// no assumptions regarding the coordinate system of its input points, but
// simply applies the spherical Mercator projection to them.
//
// The Mercator projection is finite in width (x) but infinite in height (y).
// "x" corresponds to longitude, and spans a finite range such as [-180, 180]
// (with coordinate wrapping), while "y" is a function of latitude and spans
// an infinite range. (As "y" coordinates get larger, points get closer to
// the north pole but never quite reach it.) The north and south poles have
// infinite "y" values. (Note that this will cause problems if you tessellate
// a Mercator edge where one endpoint is a pole. If you need to do this, clip
// the edge first so that the "y" coordinate is no more than about 5 * maxX.)
type MercatorProjection struct {
xWrap float64
toRadians float64 // Multiplier to convert coordinates to radians.
fromRadians float64 // Multiplier to convert coordinates from radians.
}
// NewMercatorProjection constructs a Mercator projection with the given maximum
// longitude axis value corresponding to a range of [-maxLng, maxLng].
// The horizontal and vertical axes are scaled equally.
func NewMercatorProjection(maxLng float64) Projection {
return &MercatorProjection{
xWrap: 2 * maxLng,
toRadians: math.Pi / maxLng,
fromRadians: maxLng / math.Pi,
}
}
// Project converts a point on the sphere to a projected 2D point.
func (p *MercatorProjection) Project(pt Point) r2.Point {
return p.FromLatLng(LatLngFromPoint(pt))
}
// Unproject converts a projected 2D point to a point on the sphere.
func (p *MercatorProjection) Unproject(pt r2.Point) Point {
return PointFromLatLng(p.ToLatLng(pt))
}
// FromLatLng returns the LatLng projected into an R2 Point.
func (p *MercatorProjection) FromLatLng(ll LatLng) r2.Point {
// This formula is more accurate near zero than the log(tan()) version.
// Note that latitudes of +/- 90 degrees yield "y" values of +/- infinity.
sinPhi := math.Sin(float64(ll.Lat))
y := 0.5 * math.Log((1+sinPhi)/(1-sinPhi))
return r2.Point{p.fromRadians * float64(ll.Lng), p.fromRadians * y}
}
// ToLatLng returns the LatLng projected from the given R2 Point.
func (p *MercatorProjection) ToLatLng(pt r2.Point) LatLng {
// This formula is more accurate near zero than the atan(exp()) version.
x := p.toRadians * math.Remainder(pt.X, p.xWrap)
k := math.Exp(2 * p.toRadians * pt.Y)
var y float64
if math.IsInf(k, 0) {
y = math.Pi / 2
} else {
y = math.Asin((k - 1) / (k + 1))
}
return LatLng{s1.Angle(y), s1.Angle(x)}
}
// Interpolate returns the point obtained by interpolating the given
// fraction of the distance along the line from A to B.
func (p *MercatorProjection) Interpolate(f float64, a, b r2.Point) r2.Point {
return a.Mul(1 - f).Add(b.Mul(f))
}
// WrapDistance reports the coordinate wrapping distance along each axis.
func (p *MercatorProjection) WrapDistance() r2.Point {
return r2.Point{p.xWrap, 0}
}

View file

@ -1,23 +1,22 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
import (
"fmt"
"io"
"math"
"github.com/golang/geo/r1"
@ -292,6 +291,11 @@ func (r Rect) ContainsPoint(p Point) bool {
return r.ContainsLatLng(LatLngFromPoint(p))
}
// CellUnionBound computes a covering of the Rect.
func (r Rect) CellUnionBound() []CellID {
return r.CapBound().CellUnionBound()
}
// intersectsLatEdge reports whether the edge AB intersects the given edge of constant
// latitude. Requires the points to have unit length.
func intersectsLatEdge(a, b Point, lat s1.Angle, lng s1.Interval) bool {
@ -354,8 +358,8 @@ func intersectsLatEdge(a, b Point, lat s1.Angle, lng s1.Interval) bool {
func intersectsLngEdge(a, b Point, lat r1.Interval, lng s1.Angle) bool {
// The nice thing about edges of constant longitude is that
// they are straight lines on the sphere (geodesics).
return SimpleCrossing(a, b, PointFromLatLng(LatLng{s1.Angle(lat.Lo), lng}),
PointFromLatLng(LatLng{s1.Angle(lat.Hi), lng}))
return CrossingSign(a, b, PointFromLatLng(LatLng{s1.Angle(lat.Lo), lng}),
PointFromLatLng(LatLng{s1.Angle(lat.Hi), lng})) == Cross
}
// IntersectsCell reports whether this rectangle intersects the given cell. This is an
@ -423,5 +427,39 @@ func (r Rect) IntersectsCell(c Cell) bool {
return false
}
// Encode encodes the Rect.
func (r Rect) Encode(w io.Writer) error {
e := &encoder{w: w}
r.encode(e)
return e.err
}
func (r Rect) encode(e *encoder) {
e.writeInt8(encodingVersion)
e.writeFloat64(r.Lat.Lo)
e.writeFloat64(r.Lat.Hi)
e.writeFloat64(r.Lng.Lo)
e.writeFloat64(r.Lng.Hi)
}
// Decode decodes a rectangle.
func (r *Rect) Decode(rd io.Reader) error {
d := &decoder{r: asByteReader(rd)}
r.decode(d)
return d.err
}
func (r *Rect) decode(d *decoder) {
if version := d.readUint8(); int(version) != int(encodingVersion) && d.err == nil {
d.err = fmt.Errorf("can't decode version %d; my version: %d", version, encodingVersion)
return
}
r.Lat.Lo = d.readFloat64()
r.Lat.Hi = d.readFloat64()
r.Lng.Lo = d.readFloat64()
r.Lng.Hi = d.readFloat64()
return
}
// BUG: The major differences from the C++ version are:
// - GetCentroid, Get*Distance, Vertex, InteriorContains(LatLng|Rect|Point)

352
vendor/github.com/golang/geo/s2/rect_bounder.go generated vendored Normal file
View file

@ -0,0 +1,352 @@
// Copyright 2017 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
import (
"math"
"github.com/golang/geo/r1"
"github.com/golang/geo/r3"
"github.com/golang/geo/s1"
)
// RectBounder is used to compute a bounding rectangle that contains all edges
// defined by a vertex chain (v0, v1, v2, ...). All vertices must be unit length.
// Note that the bounding rectangle of an edge can be larger than the bounding
// rectangle of its endpoints, e.g. consider an edge that passes through the North Pole.
//
// The bounds are calculated conservatively to account for numerical errors
// when points are converted to LatLngs. More precisely, this function
// guarantees the following:
// Let L be a closed edge chain (Loop) such that the interior of the loop does
// not contain either pole. Now if P is any point such that L.ContainsPoint(P),
// then RectBound(L).ContainsPoint(LatLngFromPoint(P)).
type RectBounder struct {
// The previous vertex in the chain.
a Point
// The previous vertex latitude longitude.
aLL LatLng
bound Rect
}
// NewRectBounder returns a new instance of a RectBounder.
func NewRectBounder() *RectBounder {
return &RectBounder{
bound: EmptyRect(),
}
}
// maxErrorForTests returns the maximum error in RectBound provided that the
// result does not include either pole. It is only used for testing purposes
func (r *RectBounder) maxErrorForTests() LatLng {
// The maximum error in the latitude calculation is
// 3.84 * dblEpsilon for the PointCross calculation
// 0.96 * dblEpsilon for the Latitude calculation
// 5 * dblEpsilon added by AddPoint/RectBound to compensate for error
// -----------------
// 9.80 * dblEpsilon maximum error in result
//
// The maximum error in the longitude calculation is dblEpsilon. RectBound
// does not do any expansion because this isn't necessary in order to
// bound the *rounded* longitudes of contained points.
return LatLng{10 * dblEpsilon * s1.Radian, 1 * dblEpsilon * s1.Radian}
}
// AddPoint adds the given point to the chain. The Point must be unit length.
func (r *RectBounder) AddPoint(b Point) {
bLL := LatLngFromPoint(b)
if r.bound.IsEmpty() {
r.a = b
r.aLL = bLL
r.bound = r.bound.AddPoint(bLL)
return
}
// First compute the cross product N = A x B robustly. This is the normal
// to the great circle through A and B. We don't use RobustSign
// since that method returns an arbitrary vector orthogonal to A if the two
// vectors are proportional, and we want the zero vector in that case.
n := r.a.Sub(b.Vector).Cross(r.a.Add(b.Vector)) // N = 2 * (A x B)
// The relative error in N gets large as its norm gets very small (i.e.,
// when the two points are nearly identical or antipodal). We handle this
// by choosing a maximum allowable error, and if the error is greater than
// this we fall back to a different technique. Since it turns out that
// the other sources of error in converting the normal to a maximum
// latitude add up to at most 1.16 * dblEpsilon, and it is desirable to
// have the total error be a multiple of dblEpsilon, we have chosen to
// limit the maximum error in the normal to be 3.84 * dblEpsilon.
// It is possible to show that the error is less than this when
//
// n.Norm() >= 8 * sqrt(3) / (3.84 - 0.5 - sqrt(3)) * dblEpsilon
// = 1.91346e-15 (about 8.618 * dblEpsilon)
nNorm := n.Norm()
if nNorm < 1.91346e-15 {
// A and B are either nearly identical or nearly antipodal (to within
// 4.309 * dblEpsilon, or about 6 nanometers on the earth's surface).
if r.a.Dot(b.Vector) < 0 {
// The two points are nearly antipodal. The easiest solution is to
// assume that the edge between A and B could go in any direction
// around the sphere.
r.bound = FullRect()
} else {
// The two points are nearly identical (to within 4.309 * dblEpsilon).
// In this case we can just use the bounding rectangle of the points,
// since after the expansion done by GetBound this Rect is
// guaranteed to include the (lat,lng) values of all points along AB.
r.bound = r.bound.Union(RectFromLatLng(r.aLL).AddPoint(bLL))
}
r.a = b
r.aLL = bLL
return
}
// Compute the longitude range spanned by AB.
lngAB := s1.EmptyInterval().AddPoint(r.aLL.Lng.Radians()).AddPoint(bLL.Lng.Radians())
if lngAB.Length() >= math.Pi-2*dblEpsilon {
// The points lie on nearly opposite lines of longitude to within the
// maximum error of the calculation. The easiest solution is to assume
// that AB could go on either side of the pole.
lngAB = s1.FullInterval()
}
// Next we compute the latitude range spanned by the edge AB. We start
// with the range spanning the two endpoints of the edge:
latAB := r1.IntervalFromPoint(r.aLL.Lat.Radians()).AddPoint(bLL.Lat.Radians())
// This is the desired range unless the edge AB crosses the plane
// through N and the Z-axis (which is where the great circle through A
// and B attains its minimum and maximum latitudes). To test whether AB
// crosses this plane, we compute a vector M perpendicular to this
// plane and then project A and B onto it.
m := n.Cross(r3.Vector{0, 0, 1})
mA := m.Dot(r.a.Vector)
mB := m.Dot(b.Vector)
// We want to test the signs of "mA" and "mB", so we need to bound
// the error in these calculations. It is possible to show that the
// total error is bounded by
//
// (1 + sqrt(3)) * dblEpsilon * nNorm + 8 * sqrt(3) * (dblEpsilon**2)
// = 6.06638e-16 * nNorm + 6.83174e-31
mError := 6.06638e-16*nNorm + 6.83174e-31
if mA*mB < 0 || math.Abs(mA) <= mError || math.Abs(mB) <= mError {
// Minimum/maximum latitude *may* occur in the edge interior.
//
// The maximum latitude is 90 degrees minus the latitude of N. We
// compute this directly using atan2 in order to get maximum accuracy
// near the poles.
//
// Our goal is compute a bound that contains the computed latitudes of
// all S2Points P that pass the point-in-polygon containment test.
// There are three sources of error we need to consider:
// - the directional error in N (at most 3.84 * dblEpsilon)
// - converting N to a maximum latitude
// - computing the latitude of the test point P
// The latter two sources of error are at most 0.955 * dblEpsilon
// individually, but it is possible to show by a more complex analysis
// that together they can add up to at most 1.16 * dblEpsilon, for a
// total error of 5 * dblEpsilon.
//
// We add 3 * dblEpsilon to the bound here, and GetBound() will pad
// the bound by another 2 * dblEpsilon.
maxLat := math.Min(
math.Atan2(math.Sqrt(n.X*n.X+n.Y*n.Y), math.Abs(n.Z))+3*dblEpsilon,
math.Pi/2)
// In order to get tight bounds when the two points are close together,
// we also bound the min/max latitude relative to the latitudes of the
// endpoints A and B. First we compute the distance between A and B,
// and then we compute the maximum change in latitude between any two
// points along the great circle that are separated by this distance.
// This gives us a latitude change "budget". Some of this budget must
// be spent getting from A to B; the remainder bounds the round-trip
// distance (in latitude) from A or B to the min or max latitude
// attained along the edge AB.
latBudget := 2 * math.Asin(0.5*(r.a.Sub(b.Vector)).Norm()*math.Sin(maxLat))
maxDelta := 0.5*(latBudget-latAB.Length()) + dblEpsilon
// Test whether AB passes through the point of maximum latitude or
// minimum latitude. If the dot product(s) are small enough then the
// result may be ambiguous.
if mA <= mError && mB >= -mError {
latAB.Hi = math.Min(maxLat, latAB.Hi+maxDelta)
}
if mB <= mError && mA >= -mError {
latAB.Lo = math.Max(-maxLat, latAB.Lo-maxDelta)
}
}
r.a = b
r.aLL = bLL
r.bound = r.bound.Union(Rect{latAB, lngAB})
}
// RectBound returns the bounding rectangle of the edge chain that connects the
// vertices defined so far. This bound satisfies the guarantee made
// above, i.e. if the edge chain defines a Loop, then the bound contains
// the LatLng coordinates of all Points contained by the loop.
func (r *RectBounder) RectBound() Rect {
return r.bound.expanded(LatLng{s1.Angle(2 * dblEpsilon), 0}).PolarClosure()
}
// ExpandForSubregions expands a bounding Rect so that it is guaranteed to
// contain the bounds of any subregion whose bounds are computed using
// ComputeRectBound. For example, consider a loop L that defines a square.
// GetBound ensures that if a point P is contained by this square, then
// LatLngFromPoint(P) is contained by the bound. But now consider a diamond
// shaped loop S contained by L. It is possible that GetBound returns a
// *larger* bound for S than it does for L, due to rounding errors. This
// method expands the bound for L so that it is guaranteed to contain the
// bounds of any subregion S.
//
// More precisely, if L is a loop that does not contain either pole, and S
// is a loop such that L.Contains(S), then
//
// ExpandForSubregions(L.RectBound).Contains(S.RectBound).
//
func ExpandForSubregions(bound Rect) Rect {
// Empty bounds don't need expansion.
if bound.IsEmpty() {
return bound
}
// First we need to check whether the bound B contains any nearly-antipodal
// points (to within 4.309 * dblEpsilon). If so then we need to return
// FullRect, since the subregion might have an edge between two
// such points, and AddPoint returns Full for such edges. Note that
// this can happen even if B is not Full for example, consider a loop
// that defines a 10km strip straddling the equator extending from
// longitudes -100 to +100 degrees.
//
// It is easy to check whether B contains any antipodal points, but checking
// for nearly-antipodal points is trickier. Essentially we consider the
// original bound B and its reflection through the origin B', and then test
// whether the minimum distance between B and B' is less than 4.309 * dblEpsilon.
// lngGap is a lower bound on the longitudinal distance between B and its
// reflection B'. (2.5 * dblEpsilon is the maximum combined error of the
// endpoint longitude calculations and the Length call.)
lngGap := math.Max(0, math.Pi-bound.Lng.Length()-2.5*dblEpsilon)
// minAbsLat is the minimum distance from B to the equator (if zero or
// negative, then B straddles the equator).
minAbsLat := math.Max(bound.Lat.Lo, -bound.Lat.Hi)
// latGapSouth and latGapNorth measure the minimum distance from B to the
// south and north poles respectively.
latGapSouth := math.Pi/2 + bound.Lat.Lo
latGapNorth := math.Pi/2 - bound.Lat.Hi
if minAbsLat >= 0 {
// The bound B does not straddle the equator. In this case the minimum
// distance is between one endpoint of the latitude edge in B closest to
// the equator and the other endpoint of that edge in B'. The latitude
// distance between these two points is 2*minAbsLat, and the longitude
// distance is lngGap. We could compute the distance exactly using the
// Haversine formula, but then we would need to bound the errors in that
// calculation. Since we only need accuracy when the distance is very
// small (close to 4.309 * dblEpsilon), we substitute the Euclidean
// distance instead. This gives us a right triangle XYZ with two edges of
// length x = 2*minAbsLat and y ~= lngGap. The desired distance is the
// length of the third edge z, and we have
//
// z ~= sqrt(x^2 + y^2) >= (x + y) / sqrt(2)
//
// Therefore the region may contain nearly antipodal points only if
//
// 2*minAbsLat + lngGap < sqrt(2) * 4.309 * dblEpsilon
// ~= 1.354e-15
//
// Note that because the given bound B is conservative, minAbsLat and
// lngGap are both lower bounds on their true values so we do not need
// to make any adjustments for their errors.
if 2*minAbsLat+lngGap < 1.354e-15 {
return FullRect()
}
} else if lngGap >= math.Pi/2 {
// B spans at most Pi/2 in longitude. The minimum distance is always
// between one corner of B and the diagonally opposite corner of B'. We
// use the same distance approximation that we used above; in this case
// we have an obtuse triangle XYZ with two edges of length x = latGapSouth
// and y = latGapNorth, and angle Z >= Pi/2 between them. We then have
//
// z >= sqrt(x^2 + y^2) >= (x + y) / sqrt(2)
//
// Unlike the case above, latGapSouth and latGapNorth are not lower bounds
// (because of the extra addition operation, and because math.Pi/2 is not
// exactly equal to Pi/2); they can exceed their true values by up to
// 0.75 * dblEpsilon. Putting this all together, the region may contain
// nearly antipodal points only if
//
// latGapSouth + latGapNorth < (sqrt(2) * 4.309 + 1.5) * dblEpsilon
// ~= 1.687e-15
if latGapSouth+latGapNorth < 1.687e-15 {
return FullRect()
}
} else {
// Otherwise we know that (1) the bound straddles the equator and (2) its
// width in longitude is at least Pi/2. In this case the minimum
// distance can occur either between a corner of B and the diagonally
// opposite corner of B' (as in the case above), or between a corner of B
// and the opposite longitudinal edge reflected in B'. It is sufficient
// to only consider the corner-edge case, since this distance is also a
// lower bound on the corner-corner distance when that case applies.
// Consider the spherical triangle XYZ where X is a corner of B with
// minimum absolute latitude, Y is the closest pole to X, and Z is the
// point closest to X on the opposite longitudinal edge of B'. This is a
// right triangle (Z = Pi/2), and from the spherical law of sines we have
//
// sin(z) / sin(Z) = sin(y) / sin(Y)
// sin(maxLatGap) / 1 = sin(dMin) / sin(lngGap)
// sin(dMin) = sin(maxLatGap) * sin(lngGap)
//
// where "maxLatGap" = max(latGapSouth, latGapNorth) and "dMin" is the
// desired minimum distance. Now using the facts that sin(t) >= (2/Pi)*t
// for 0 <= t <= Pi/2, that we only need an accurate approximation when
// at least one of "maxLatGap" or lngGap is extremely small (in which
// case sin(t) ~= t), and recalling that "maxLatGap" has an error of up
// to 0.75 * dblEpsilon, we want to test whether
//
// maxLatGap * lngGap < (4.309 + 0.75) * (Pi/2) * dblEpsilon
// ~= 1.765e-15
if math.Max(latGapSouth, latGapNorth)*lngGap < 1.765e-15 {
return FullRect()
}
}
// Next we need to check whether the subregion might contain any edges that
// span (math.Pi - 2 * dblEpsilon) radians or more in longitude, since AddPoint
// sets the longitude bound to Full in that case. This corresponds to
// testing whether (lngGap <= 0) in lngExpansion below.
// Otherwise, the maximum latitude error in AddPoint is 4.8 * dblEpsilon.
// In the worst case, the errors when computing the latitude bound for a
// subregion could go in the opposite direction as the errors when computing
// the bound for the original region, so we need to double this value.
// (More analysis shows that it's okay to round down to a multiple of
// dblEpsilon.)
//
// For longitude, we rely on the fact that atan2 is correctly rounded and
// therefore no additional bounds expansion is necessary.
latExpansion := 9 * dblEpsilon
lngExpansion := 0.0
if lngGap <= 0 {
lngExpansion = math.Pi
}
return bound.expanded(LatLng{s1.Angle(latExpansion), s1.Angle(lngExpansion)}).PolarClosure()
}

View file

@ -1,18 +1,16 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
@ -39,18 +37,35 @@ type Region interface {
IntersectsCell(c Cell) bool
// ContainsPoint reports whether the region contains the given point or not.
// The point should be unit length, although some implementations may relax this restriction.
// The point should be unit length, although some implementations may relax
// this restriction.
ContainsPoint(p Point) bool
// CellUnionBound returns a small collection of CellIDs whose union covers
// the region. The cells are not sorted, may have redundancies (such as cells
// that contain other cells), and may cover much more area than necessary.
//
// This method is not intended for direct use by client code. Clients
// should typically use Covering, which has options to control the size and
// accuracy of the covering. Alternatively, if you want a fast covering and
// don't care about accuracy, consider calling FastCovering (which returns a
// cleaned-up version of the covering computed by this method).
//
// CellUnionBound implementations should attempt to return a small
// covering (ideally 4 cells or fewer) that covers the region and can be
// computed quickly. The result is used by RegionCoverer as a starting
// point for further refinement.
CellUnionBound() []CellID
}
// Enforce interface satisfaction.
// Enforce Region interface satisfaction.
var (
_ Region = Cap{}
_ Region = Cell{}
_ Region = (*CellUnion)(nil)
_ Region = (*Loop)(nil)
_ Region = Point{}
//_ Region = (*Polygon)(nil)
_ Region = (*Polygon)(nil)
_ Region = (*Polyline)(nil)
_ Region = Rect{}
)

View file

@ -1,18 +1,16 @@
/*
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
@ -99,20 +97,6 @@ type candidate struct {
priority int // Priority of the candiate.
}
func min(x, y int) int {
if x < y {
return x
}
return y
}
func max(x, y int) int {
if x > y {
return x
}
return y
}
type priorityQueue []*candidate
func (pq priorityQueue) Len() int {
@ -273,9 +257,9 @@ func (c *coverer) adjustCellLevels(cells *CellUnion) {
// initialCandidates computes a set of initial candidates that cover the given region.
func (c *coverer) initialCandidates() {
// Optimization: start with a small (usually 4 cell) covering of the region's bounding cap.
temp := &RegionCoverer{MaxLevel: c.maxLevel, LevelMod: 1, MaxCells: min(4, c.maxCells)}
temp := &RegionCoverer{MaxLevel: c.maxLevel, LevelMod: 1, MaxCells: minInt(4, c.maxCells)}
cells := temp.FastCovering(c.region.CapBound())
cells := temp.FastCovering(c.region)
c.adjustCellLevels(&cells)
for _, ci := range cells {
c.addCandidate(c.newCandidate(CellFromCellID(ci)))
@ -329,9 +313,9 @@ func (c *coverer) coveringInternal(region Region) {
// newCoverer returns an instance of coverer.
func (rc *RegionCoverer) newCoverer() *coverer {
return &coverer{
minLevel: max(0, min(maxLevel, rc.MinLevel)),
maxLevel: max(0, min(maxLevel, rc.MaxLevel)),
levelMod: max(1, min(3, rc.LevelMod)),
minLevel: maxInt(0, minInt(maxLevel, rc.MinLevel)),
maxLevel: maxInt(0, minInt(maxLevel, rc.MaxLevel)),
levelMod: maxInt(1, minInt(3, rc.LevelMod)),
maxCells: rc.MaxCells,
}
}
@ -339,14 +323,14 @@ func (rc *RegionCoverer) newCoverer() *coverer {
// Covering returns a CellUnion that covers the given region and satisfies the various restrictions.
func (rc *RegionCoverer) Covering(region Region) CellUnion {
covering := rc.CellUnion(region)
covering.Denormalize(max(0, min(maxLevel, rc.MinLevel)), max(1, min(3, rc.LevelMod)))
covering.Denormalize(maxInt(0, minInt(maxLevel, rc.MinLevel)), maxInt(1, minInt(3, rc.LevelMod)))
return covering
}
// InteriorCovering returns a CellUnion that is contained within the given region and satisfies the various restrictions.
func (rc *RegionCoverer) InteriorCovering(region Region) CellUnion {
intCovering := rc.InteriorCellUnion(region)
intCovering.Denormalize(max(0, min(maxLevel, rc.MinLevel)), max(1, min(3, rc.LevelMod)))
intCovering.Denormalize(maxInt(0, minInt(maxLevel, rc.MinLevel)), maxInt(1, minInt(3, rc.LevelMod)))
return intCovering
}
@ -387,31 +371,13 @@ func (rc *RegionCoverer) InteriorCellUnion(region Region) CellUnion {
//
// This function is useful as a starting point for algorithms that
// recursively subdivide cells.
func (rc *RegionCoverer) FastCovering(cap Cap) CellUnion {
func (rc *RegionCoverer) FastCovering(region Region) CellUnion {
c := rc.newCoverer()
cu := c.rawFastCovering(cap)
cu := CellUnion(region.CellUnionBound())
c.normalizeCovering(&cu)
return cu
}
// rawFastCovering computes a covering of the given cap. In general the covering consists of
// at most 4 cells (except for very large caps, which may need up to 6 cells).
// The output is not sorted.
func (c *coverer) rawFastCovering(cap Cap) CellUnion {
var covering CellUnion
// Find the maximum level such that the cap contains at most one cell vertex
// and such that CellId.VertexNeighbors() can be called.
level := min(MinWidthMetric.MaxLevel(2*cap.Radius().Radians()), maxLevel-1)
if level == 0 {
for face := 0; face < 6; face++ {
covering = append(covering, CellIDFromFace(face))
}
} else {
covering = append(covering, cellIDFromPoint(cap.center).VertexNeighbors(level)...)
}
return covering
}
// normalizeCovering normalizes the "covering" so that it conforms to the current covering
// parameters (MaxCells, minLevel, maxLevel, and levelMod).
// This method makes no attempt to be optimal. In particular, if
@ -425,7 +391,7 @@ func (c *coverer) normalizeCovering(covering *CellUnion) {
if c.maxLevel < maxLevel || c.levelMod > 1 {
for i, ci := range *covering {
level := ci.Level()
newLevel := c.adjustLevel(min(level, c.maxLevel))
newLevel := c.adjustLevel(minInt(level, c.maxLevel))
if newLevel != level {
(*covering)[i] = ci.Parent(newLevel)
}

194
vendor/github.com/golang/geo/s2/shape.go generated vendored Normal file
View file

@ -0,0 +1,194 @@
// Copyright 2017 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s2
import (
"sort"
)
// dimension defines the types of geometry dimensions that a Shape supports.
type dimension int
const (
pointGeometry dimension = iota
polylineGeometry
polygonGeometry
)
// Edge represents a geodesic edge consisting of two vertices. Zero-length edges are
// allowed, and can be used to represent points.
type Edge struct {
V0, V1 Point
}
// Cmp compares the two edges using the underlying Points Cmp method and returns
//
// -1 if e < other
// 0 if e == other
// +1 if e > other
//
// The two edges are compared by first vertex, and then by the second vertex.
func (e Edge) Cmp(other Edge) int {
if v0cmp := e.V0.Cmp(other.V0.Vector); v0cmp != 0 {
return v0cmp
}
return e.V1.Cmp(other.V1.Vector)
}
// sortEdges sorts the slice of Edges in place.
func sortEdges(e []Edge) {
sort.Sort(edges(e))
}
// edges implements the Sort interface for slices of Edge.
type edges []Edge
func (e edges) Len() int { return len(e) }
func (e edges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e edges) Less(i, j int) bool { return e[i].Cmp(e[j]) == -1 }
// Chain represents a range of edge IDs corresponding to a chain of connected
// edges, specified as a (start, length) pair. The chain is defined to consist of
// edge IDs {start, start + 1, ..., start + length - 1}.
type Chain struct {
Start, Length int
}
// ChainPosition represents the position of an edge within a given edge chain,
// specified as a (chainID, offset) pair. Chains are numbered sequentially
// starting from zero, and offsets are measured from the start of each chain.
type ChainPosition struct {
ChainID, Offset int
}
// A ReferencePoint consists of a point and a boolean indicating whether the point
// is contained by a particular shape.
type ReferencePoint struct {
Point Point
Contained bool
}
// OriginReferencePoint returns a ReferencePoint with the given value for
// contained and the origin point. It should be used when all points or no
// points are contained.
func OriginReferencePoint(contained bool) ReferencePoint {
return ReferencePoint{Point: OriginPoint(), Contained: contained}
}
// Shape represents polygonal geometry in a flexible way. It is organized as a
// collection of edges that optionally defines an interior. All geometry
// represented by a given Shape must have the same dimension, which means that
// an Shape can represent either a set of points, a set of polylines, or a set
// of polygons.
//
// Shape is defined as an interface in order to give clients control over the
// underlying data representation. Sometimes an Shape does not have any data of
// its own, but instead wraps some other type.
//
// Shape operations are typically defined on a ShapeIndex rather than
// individual shapes. An ShapeIndex is simply a collection of Shapes,
// possibly of different dimensions (e.g. 10 points and 3 polygons), organized
// into a data structure for efficient edge access.
//
// The edges of a Shape are indexed by a contiguous range of edge IDs
// starting at 0. The edges are further subdivided into chains, where each
// chain consists of a sequence of edges connected end-to-end (a polyline).
// For example, a Shape representing two polylines AB and CDE would have
// three edges (AB, CD, DE) grouped into two chains: (AB) and (CD, DE).
// Similarly, an Shape representing 5 points would have 5 chains consisting
// of one edge each.
//
// Shape has methods that allow edges to be accessed either using the global
// numbering (edge ID) or within a particular chain. The global numbering is
// sufficient for most purposes, but the chain representation is useful for
// certain algorithms such as intersection (see BooleanOperation).
type Shape interface {
// NumEdges returns the number of edges in this shape.
NumEdges() int
// Edge returns the edge for the given edge index.
Edge(i int) Edge
// HasInterior reports whether this shape has an interior.
HasInterior() bool
// ReferencePoint returns an arbitrary reference point for the shape. (The
// containment boolean value must be false for shapes that do not have an interior.)
//
// This reference point may then be used to compute the containment of other
// points by counting edge crossings.
ReferencePoint() ReferencePoint
// NumChains reports the number of contiguous edge chains in the shape.
// For example, a shape whose edges are [AB, BC, CD, AE, EF] would consist
// of two chains (AB,BC,CD and AE,EF). Every chain is assigned a chain Id
// numbered sequentially starting from zero.
//
// Note that it is always acceptable to implement this method by returning
// NumEdges, i.e. every chain consists of a single edge, but this may
// reduce the efficiency of some algorithms.
NumChains() int
// Chain returns the range of edge IDs corresponding to the given edge chain.
// Edge chains must form contiguous, non-overlapping ranges that cover
// the entire range of edge IDs. This is spelled out more formally below:
//
// 0 <= i < NumChains()
// Chain(i).length > 0, for all i
// Chain(0).start == 0
// Chain(i).start + Chain(i).length == Chain(i+1).start, for i < NumChains()-1
// Chain(i).start + Chain(i).length == NumEdges(), for i == NumChains()-1
Chain(chainID int) Chain
// ChainEdgeReturns the edge at offset "offset" within edge chain "chainID".
// Equivalent to "shape.Edge(shape.Chain(chainID).start + offset)"
// but more efficient.
ChainEdge(chainID, offset int) Edge
// ChainPosition finds the chain containing the given edge, and returns the
// position of that edge as a ChainPosition(chainID, offset) pair.
//
// shape.Chain(pos.chainID).start + pos.offset == edgeID
// shape.Chain(pos.chainID+1).start > edgeID
//
// where pos == shape.ChainPosition(edgeID).
ChainPosition(edgeID int) ChainPosition
// dimension returns the dimension of the geometry represented by this shape.
//
// pointGeometry: Each point is represented as a degenerate edge.
//
// polylineGeometry: Polyline edges may be degenerate. A shape may
// represent any number of polylines. Polylines edges may intersect.
//
// polygonGeometry: Edges should be oriented such that the polygon
// interior is always on the left. In theory the edges may be returned
// in any order, but typically the edges are organized as a collection
// of edge chains where each chain represents one polygon loop.
// Polygons may have degeneracies (e.g., degenerate edges or sibling
// pairs consisting of an edge and its corresponding reversed edge).
//
// Note that this method allows degenerate geometry of different dimensions
// to be distinguished, e.g. it allows a point to be distinguished from a
// polyline or polygon that has been simplified to a single point.
dimension() dimension
}
// A minimal check for types that should satisfy the Shape interface.
var (
_ Shape = &Loop{}
_ Shape = &Polygon{}
_ Shape = &Polyline{}
)

Some files were not shown because too many files have changed in this diff Show more